1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * This file contians vfs address (mmap) ops for 9P2000.
4 *
5 * Copyright (C) 2005 by Eric Van Hensbergen <ericvh@gmail.com>
6 * Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov>
7 */
8
9 #include <linux/module.h>
10 #include <linux/errno.h>
11 #include <linux/fs.h>
12 #include <linux/file.h>
13 #include <linux/stat.h>
14 #include <linux/string.h>
15 #include <linux/pagemap.h>
16 #include <linux/sched.h>
17 #include <linux/swap.h>
18 #include <linux/uio.h>
19 #include <linux/netfs.h>
20 #include <net/9p/9p.h>
21 #include <net/9p/client.h>
22
23 #include "v9fs.h"
24 #include "v9fs_vfs.h"
25 #include "cache.h"
26 #include "fid.h"
27
28 /**
29 * v9fs_issue_read - Issue a read from 9P
30 * @subreq: The read to make
31 */
v9fs_issue_read(struct netfs_io_subrequest * subreq)32 static void v9fs_issue_read(struct netfs_io_subrequest *subreq)
33 {
34 struct netfs_io_request *rreq = subreq->rreq;
35 struct p9_fid *fid = rreq->netfs_priv;
36 struct iov_iter to;
37 loff_t pos = subreq->start + subreq->transferred;
38 size_t len = subreq->len - subreq->transferred;
39 int total, err;
40
41 iov_iter_xarray(&to, ITER_DEST, &rreq->mapping->i_pages, pos, len);
42
43 total = p9_client_read(fid, pos, &to, &err);
44
45 /* if we just extended the file size, any portion not in
46 * cache won't be on server and is zeroes */
47 __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
48
49 netfs_subreq_terminated(subreq, err ?: total, false);
50 }
51
52 /**
53 * v9fs_init_request - Initialise a read request
54 * @rreq: The read request
55 * @file: The file being read from
56 */
v9fs_init_request(struct netfs_io_request * rreq,struct file * file)57 static int v9fs_init_request(struct netfs_io_request *rreq, struct file *file)
58 {
59 struct p9_fid *fid = file->private_data;
60
61 BUG_ON(!fid);
62
63 /* we might need to read from a fid that was opened write-only
64 * for read-modify-write of page cache, use the writeback fid
65 * for that */
66 WARN_ON(rreq->origin == NETFS_READ_FOR_WRITE &&
67 !(fid->mode & P9_ORDWR));
68
69 p9_fid_get(fid);
70 rreq->netfs_priv = fid;
71 return 0;
72 }
73
74 /**
75 * v9fs_free_request - Cleanup request initialized by v9fs_init_rreq
76 * @rreq: The I/O request to clean up
77 */
v9fs_free_request(struct netfs_io_request * rreq)78 static void v9fs_free_request(struct netfs_io_request *rreq)
79 {
80 struct p9_fid *fid = rreq->netfs_priv;
81
82 p9_fid_put(fid);
83 }
84
85 /**
86 * v9fs_begin_cache_operation - Begin a cache operation for a read
87 * @rreq: The read request
88 */
v9fs_begin_cache_operation(struct netfs_io_request * rreq)89 static int v9fs_begin_cache_operation(struct netfs_io_request *rreq)
90 {
91 #ifdef CONFIG_9P_FSCACHE
92 struct fscache_cookie *cookie = v9fs_inode_cookie(V9FS_I(rreq->inode));
93
94 return fscache_begin_read_operation(&rreq->cache_resources, cookie);
95 #else
96 return -ENOBUFS;
97 #endif
98 }
99
100 const struct netfs_request_ops v9fs_req_ops = {
101 .init_request = v9fs_init_request,
102 .free_request = v9fs_free_request,
103 .begin_cache_operation = v9fs_begin_cache_operation,
104 .issue_read = v9fs_issue_read,
105 };
106
107 /**
108 * v9fs_release_folio - release the private state associated with a folio
109 * @folio: The folio to be released
110 * @gfp: The caller's allocation restrictions
111 *
112 * Returns true if the page can be released, false otherwise.
113 */
114
v9fs_release_folio(struct folio * folio,gfp_t gfp)115 static bool v9fs_release_folio(struct folio *folio, gfp_t gfp)
116 {
117 if (folio_test_private(folio))
118 return false;
119 #ifdef CONFIG_9P_FSCACHE
120 if (folio_test_fscache(folio)) {
121 if (current_is_kswapd() || !(gfp & __GFP_FS))
122 return false;
123 folio_wait_fscache(folio);
124 }
125 fscache_note_page_release(v9fs_inode_cookie(V9FS_I(folio_inode(folio))));
126 #endif
127 return true;
128 }
129
v9fs_invalidate_folio(struct folio * folio,size_t offset,size_t length)130 static void v9fs_invalidate_folio(struct folio *folio, size_t offset,
131 size_t length)
132 {
133 folio_wait_fscache(folio);
134 }
135
136 #ifdef CONFIG_9P_FSCACHE
v9fs_write_to_cache_done(void * priv,ssize_t transferred_or_error,bool was_async)137 static void v9fs_write_to_cache_done(void *priv, ssize_t transferred_or_error,
138 bool was_async)
139 {
140 struct v9fs_inode *v9inode = priv;
141 __le32 version;
142
143 if (IS_ERR_VALUE(transferred_or_error) &&
144 transferred_or_error != -ENOBUFS) {
145 version = cpu_to_le32(v9inode->qid.version);
146 fscache_invalidate(v9fs_inode_cookie(v9inode), &version,
147 i_size_read(&v9inode->netfs.inode), 0);
148 }
149 }
150 #endif
151
v9fs_vfs_write_folio_locked(struct folio * folio)152 static int v9fs_vfs_write_folio_locked(struct folio *folio)
153 {
154 struct inode *inode = folio_inode(folio);
155 loff_t start = folio_pos(folio);
156 loff_t i_size = i_size_read(inode);
157 struct iov_iter from;
158 size_t len = folio_size(folio);
159 struct p9_fid *writeback_fid;
160 int err;
161 struct v9fs_inode __maybe_unused *v9inode = V9FS_I(inode);
162 struct fscache_cookie __maybe_unused *cookie = v9fs_inode_cookie(v9inode);
163
164 if (start >= i_size)
165 return 0; /* Simultaneous truncation occurred */
166
167 len = min_t(loff_t, i_size - start, len);
168
169 iov_iter_xarray(&from, ITER_SOURCE, &folio_mapping(folio)->i_pages, start, len);
170
171 writeback_fid = v9fs_fid_find_inode(inode, true, INVALID_UID, true);
172 if (!writeback_fid) {
173 WARN_ONCE(1, "folio expected an open fid inode->i_private=%p\n",
174 inode->i_private);
175 return -EINVAL;
176 }
177
178 folio_wait_fscache(folio);
179 folio_start_writeback(folio);
180
181 p9_client_write(writeback_fid, start, &from, &err);
182
183 #ifdef CONFIG_9P_FSCACHE
184 if (err == 0 &&
185 fscache_cookie_enabled(cookie) &&
186 test_bit(FSCACHE_COOKIE_IS_CACHING, &cookie->flags)) {
187 folio_start_fscache(folio);
188 fscache_write_to_cache(v9fs_inode_cookie(v9inode),
189 folio_mapping(folio), start, len, i_size,
190 v9fs_write_to_cache_done, v9inode,
191 true);
192 }
193 #endif
194
195 folio_end_writeback(folio);
196 p9_fid_put(writeback_fid);
197
198 return err;
199 }
200
v9fs_vfs_writepage(struct page * page,struct writeback_control * wbc)201 static int v9fs_vfs_writepage(struct page *page, struct writeback_control *wbc)
202 {
203 struct folio *folio = page_folio(page);
204 int retval;
205
206 p9_debug(P9_DEBUG_VFS, "folio %p\n", folio);
207
208 retval = v9fs_vfs_write_folio_locked(folio);
209 if (retval < 0) {
210 if (retval == -EAGAIN) {
211 folio_redirty_for_writepage(wbc, folio);
212 retval = 0;
213 } else {
214 mapping_set_error(folio_mapping(folio), retval);
215 }
216 } else
217 retval = 0;
218
219 folio_unlock(folio);
220 return retval;
221 }
222
v9fs_launder_folio(struct folio * folio)223 static int v9fs_launder_folio(struct folio *folio)
224 {
225 int retval;
226
227 if (folio_clear_dirty_for_io(folio)) {
228 retval = v9fs_vfs_write_folio_locked(folio);
229 if (retval)
230 return retval;
231 }
232 folio_wait_fscache(folio);
233 return 0;
234 }
235
236 /**
237 * v9fs_direct_IO - 9P address space operation for direct I/O
238 * @iocb: target I/O control block
239 * @iter: The data/buffer to use
240 *
241 * The presence of v9fs_direct_IO() in the address space ops vector
242 * allowes open() O_DIRECT flags which would have failed otherwise.
243 *
244 * In the non-cached mode, we shunt off direct read and write requests before
245 * the VFS gets them, so this method should never be called.
246 *
247 * Direct IO is not 'yet' supported in the cached mode. Hence when
248 * this routine is called through generic_file_aio_read(), the read/write fails
249 * with an error.
250 *
251 */
252 static ssize_t
v9fs_direct_IO(struct kiocb * iocb,struct iov_iter * iter)253 v9fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
254 {
255 struct file *file = iocb->ki_filp;
256 loff_t pos = iocb->ki_pos;
257 ssize_t n;
258 int err = 0;
259
260 if (iov_iter_rw(iter) == WRITE) {
261 n = p9_client_write(file->private_data, pos, iter, &err);
262 if (n) {
263 struct inode *inode = file_inode(file);
264 loff_t i_size = i_size_read(inode);
265
266 if (pos + n > i_size)
267 inode_add_bytes(inode, pos + n - i_size);
268 }
269 } else {
270 n = p9_client_read(file->private_data, pos, iter, &err);
271 }
272 return n ? n : err;
273 }
274
v9fs_write_begin(struct file * filp,struct address_space * mapping,loff_t pos,unsigned int len,struct page ** subpagep,void ** fsdata)275 static int v9fs_write_begin(struct file *filp, struct address_space *mapping,
276 loff_t pos, unsigned int len,
277 struct page **subpagep, void **fsdata)
278 {
279 int retval;
280 struct folio *folio;
281 struct v9fs_inode *v9inode = V9FS_I(mapping->host);
282
283 p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping);
284
285 /* Prefetch area to be written into the cache if we're caching this
286 * file. We need to do this before we get a lock on the page in case
287 * there's more than one writer competing for the same cache block.
288 */
289 retval = netfs_write_begin(&v9inode->netfs, filp, mapping, pos, len, &folio, fsdata);
290 if (retval < 0)
291 return retval;
292
293 *subpagep = &folio->page;
294 return retval;
295 }
296
v9fs_write_end(struct file * filp,struct address_space * mapping,loff_t pos,unsigned int len,unsigned int copied,struct page * subpage,void * fsdata)297 static int v9fs_write_end(struct file *filp, struct address_space *mapping,
298 loff_t pos, unsigned int len, unsigned int copied,
299 struct page *subpage, void *fsdata)
300 {
301 loff_t last_pos = pos + copied;
302 struct folio *folio = page_folio(subpage);
303 struct inode *inode = mapping->host;
304
305 p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping);
306
307 if (!folio_test_uptodate(folio)) {
308 if (unlikely(copied < len)) {
309 copied = 0;
310 goto out;
311 }
312
313 folio_mark_uptodate(folio);
314 }
315
316 /*
317 * No need to use i_size_read() here, the i_size
318 * cannot change under us because we hold the i_mutex.
319 */
320 if (last_pos > inode->i_size) {
321 inode_add_bytes(inode, last_pos - inode->i_size);
322 i_size_write(inode, last_pos);
323 #ifdef CONFIG_9P_FSCACHE
324 fscache_update_cookie(v9fs_inode_cookie(V9FS_I(inode)), NULL,
325 &last_pos);
326 #endif
327 }
328 folio_mark_dirty(folio);
329 out:
330 folio_unlock(folio);
331 folio_put(folio);
332
333 return copied;
334 }
335
336 #ifdef CONFIG_9P_FSCACHE
337 /*
338 * Mark a page as having been made dirty and thus needing writeback. We also
339 * need to pin the cache object to write back to.
340 */
v9fs_dirty_folio(struct address_space * mapping,struct folio * folio)341 static bool v9fs_dirty_folio(struct address_space *mapping, struct folio *folio)
342 {
343 struct v9fs_inode *v9inode = V9FS_I(mapping->host);
344
345 return fscache_dirty_folio(mapping, folio, v9fs_inode_cookie(v9inode));
346 }
347 #else
348 #define v9fs_dirty_folio filemap_dirty_folio
349 #endif
350
351 const struct address_space_operations v9fs_addr_operations = {
352 .read_folio = netfs_read_folio,
353 .readahead = netfs_readahead,
354 .dirty_folio = v9fs_dirty_folio,
355 .writepage = v9fs_vfs_writepage,
356 .write_begin = v9fs_write_begin,
357 .write_end = v9fs_write_end,
358 .release_folio = v9fs_release_folio,
359 .invalidate_folio = v9fs_invalidate_folio,
360 .launder_folio = v9fs_launder_folio,
361 .direct_IO = v9fs_direct_IO,
362 };
363