1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * This file contians vfs address (mmap) ops for 9P2000.
4 *
5 * Copyright (C) 2005 by Eric Van Hensbergen <ericvh@gmail.com>
6 * Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov>
7 */
8
9 #include <linux/module.h>
10 #include <linux/errno.h>
11 #include <linux/fs.h>
12 #include <linux/file.h>
13 #include <linux/stat.h>
14 #include <linux/string.h>
15 #include <linux/inet.h>
16 #include <linux/pagemap.h>
17 #include <linux/idr.h>
18 #include <linux/sched.h>
19 #include <linux/swap.h>
20 #include <linux/uio.h>
21 #include <linux/netfs.h>
22 #include <net/9p/9p.h>
23 #include <net/9p/client.h>
24
25 #include "v9fs.h"
26 #include "v9fs_vfs.h"
27 #include "cache.h"
28 #include "fid.h"
29
30 /**
31 * v9fs_issue_read - Issue a read from 9P
32 * @subreq: The read to make
33 */
v9fs_issue_read(struct netfs_io_subrequest * subreq)34 static void v9fs_issue_read(struct netfs_io_subrequest *subreq)
35 {
36 struct netfs_io_request *rreq = subreq->rreq;
37 struct p9_fid *fid = rreq->netfs_priv;
38 struct iov_iter to;
39 loff_t pos = subreq->start + subreq->transferred;
40 size_t len = subreq->len - subreq->transferred;
41 int total, err;
42
43 iov_iter_xarray(&to, READ, &rreq->mapping->i_pages, pos, len);
44
45 total = p9_client_read(fid, pos, &to, &err);
46
47 /* if we just extended the file size, any portion not in
48 * cache won't be on server and is zeroes */
49 __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
50
51 netfs_subreq_terminated(subreq, err ?: total, false);
52 }
53
54 /**
55 * v9fs_init_request - Initialise a read request
56 * @rreq: The read request
57 * @file: The file being read from
58 */
v9fs_init_request(struct netfs_io_request * rreq,struct file * file)59 static int v9fs_init_request(struct netfs_io_request *rreq, struct file *file)
60 {
61 struct inode *inode = file_inode(file);
62 struct v9fs_inode *v9inode = V9FS_I(inode);
63 struct p9_fid *fid = file->private_data;
64
65 BUG_ON(!fid);
66
67 /* we might need to read from a fid that was opened write-only
68 * for read-modify-write of page cache, use the writeback fid
69 * for that */
70 if (rreq->origin == NETFS_READ_FOR_WRITE &&
71 (fid->mode & O_ACCMODE) == O_WRONLY) {
72 fid = v9inode->writeback_fid;
73 BUG_ON(!fid);
74 }
75
76 p9_fid_get(fid);
77 rreq->netfs_priv = fid;
78 return 0;
79 }
80
81 /**
82 * v9fs_free_request - Cleanup request initialized by v9fs_init_rreq
83 * @rreq: The I/O request to clean up
84 */
v9fs_free_request(struct netfs_io_request * rreq)85 static void v9fs_free_request(struct netfs_io_request *rreq)
86 {
87 struct p9_fid *fid = rreq->netfs_priv;
88
89 p9_fid_put(fid);
90 }
91
92 /**
93 * v9fs_begin_cache_operation - Begin a cache operation for a read
94 * @rreq: The read request
95 */
v9fs_begin_cache_operation(struct netfs_io_request * rreq)96 static int v9fs_begin_cache_operation(struct netfs_io_request *rreq)
97 {
98 #ifdef CONFIG_9P_FSCACHE
99 struct fscache_cookie *cookie = v9fs_inode_cookie(V9FS_I(rreq->inode));
100
101 return fscache_begin_read_operation(&rreq->cache_resources, cookie);
102 #else
103 return -ENOBUFS;
104 #endif
105 }
106
107 const struct netfs_request_ops v9fs_req_ops = {
108 .init_request = v9fs_init_request,
109 .free_request = v9fs_free_request,
110 .begin_cache_operation = v9fs_begin_cache_operation,
111 .issue_read = v9fs_issue_read,
112 };
113
114 /**
115 * v9fs_release_folio - release the private state associated with a folio
116 * @folio: The folio to be released
117 * @gfp: The caller's allocation restrictions
118 *
119 * Returns true if the page can be released, false otherwise.
120 */
121
v9fs_release_folio(struct folio * folio,gfp_t gfp)122 static bool v9fs_release_folio(struct folio *folio, gfp_t gfp)
123 {
124 struct inode *inode = folio_inode(folio);
125
126 if (folio_test_private(folio))
127 return false;
128 #ifdef CONFIG_9P_FSCACHE
129 if (folio_test_fscache(folio)) {
130 if (current_is_kswapd() || !(gfp & __GFP_FS))
131 return false;
132 folio_wait_fscache(folio);
133 }
134 #endif
135 fscache_note_page_release(v9fs_inode_cookie(V9FS_I(inode)));
136 return true;
137 }
138
v9fs_invalidate_folio(struct folio * folio,size_t offset,size_t length)139 static void v9fs_invalidate_folio(struct folio *folio, size_t offset,
140 size_t length)
141 {
142 folio_wait_fscache(folio);
143 }
144
v9fs_write_to_cache_done(void * priv,ssize_t transferred_or_error,bool was_async)145 static void v9fs_write_to_cache_done(void *priv, ssize_t transferred_or_error,
146 bool was_async)
147 {
148 struct v9fs_inode *v9inode = priv;
149 __le32 version;
150
151 if (IS_ERR_VALUE(transferred_or_error) &&
152 transferred_or_error != -ENOBUFS) {
153 version = cpu_to_le32(v9inode->qid.version);
154 fscache_invalidate(v9fs_inode_cookie(v9inode), &version,
155 i_size_read(&v9inode->netfs.inode), 0);
156 }
157 }
158
v9fs_vfs_write_folio_locked(struct folio * folio)159 static int v9fs_vfs_write_folio_locked(struct folio *folio)
160 {
161 struct inode *inode = folio_inode(folio);
162 struct v9fs_inode *v9inode = V9FS_I(inode);
163 struct fscache_cookie *cookie = v9fs_inode_cookie(v9inode);
164 loff_t start = folio_pos(folio);
165 loff_t i_size = i_size_read(inode);
166 struct iov_iter from;
167 size_t len = folio_size(folio);
168 int err;
169
170 if (start >= i_size)
171 return 0; /* Simultaneous truncation occurred */
172
173 len = min_t(loff_t, i_size - start, len);
174
175 iov_iter_xarray(&from, WRITE, &folio_mapping(folio)->i_pages, start, len);
176
177 /* We should have writeback_fid always set */
178 BUG_ON(!v9inode->writeback_fid);
179
180 folio_wait_fscache(folio);
181 folio_start_writeback(folio);
182
183 p9_client_write(v9inode->writeback_fid, start, &from, &err);
184
185 if (err == 0 &&
186 fscache_cookie_enabled(cookie) &&
187 test_bit(FSCACHE_COOKIE_IS_CACHING, &cookie->flags)) {
188 folio_start_fscache(folio);
189 fscache_write_to_cache(v9fs_inode_cookie(v9inode),
190 folio_mapping(folio), start, len, i_size,
191 v9fs_write_to_cache_done, v9inode,
192 true);
193 }
194
195 folio_end_writeback(folio);
196 return err;
197 }
198
v9fs_vfs_writepage(struct page * page,struct writeback_control * wbc)199 static int v9fs_vfs_writepage(struct page *page, struct writeback_control *wbc)
200 {
201 struct folio *folio = page_folio(page);
202 int retval;
203
204 p9_debug(P9_DEBUG_VFS, "folio %p\n", folio);
205
206 retval = v9fs_vfs_write_folio_locked(folio);
207 if (retval < 0) {
208 if (retval == -EAGAIN) {
209 folio_redirty_for_writepage(wbc, folio);
210 retval = 0;
211 } else {
212 mapping_set_error(folio_mapping(folio), retval);
213 }
214 } else
215 retval = 0;
216
217 folio_unlock(folio);
218 return retval;
219 }
220
v9fs_launder_folio(struct folio * folio)221 static int v9fs_launder_folio(struct folio *folio)
222 {
223 int retval;
224
225 if (folio_clear_dirty_for_io(folio)) {
226 retval = v9fs_vfs_write_folio_locked(folio);
227 if (retval)
228 return retval;
229 }
230 folio_wait_fscache(folio);
231 return 0;
232 }
233
234 /**
235 * v9fs_direct_IO - 9P address space operation for direct I/O
236 * @iocb: target I/O control block
237 * @iter: The data/buffer to use
238 *
239 * The presence of v9fs_direct_IO() in the address space ops vector
240 * allowes open() O_DIRECT flags which would have failed otherwise.
241 *
242 * In the non-cached mode, we shunt off direct read and write requests before
243 * the VFS gets them, so this method should never be called.
244 *
245 * Direct IO is not 'yet' supported in the cached mode. Hence when
246 * this routine is called through generic_file_aio_read(), the read/write fails
247 * with an error.
248 *
249 */
250 static ssize_t
v9fs_direct_IO(struct kiocb * iocb,struct iov_iter * iter)251 v9fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
252 {
253 struct file *file = iocb->ki_filp;
254 loff_t pos = iocb->ki_pos;
255 ssize_t n;
256 int err = 0;
257
258 if (iov_iter_rw(iter) == WRITE) {
259 n = p9_client_write(file->private_data, pos, iter, &err);
260 if (n) {
261 struct inode *inode = file_inode(file);
262 loff_t i_size = i_size_read(inode);
263
264 if (pos + n > i_size)
265 inode_add_bytes(inode, pos + n - i_size);
266 }
267 } else {
268 n = p9_client_read(file->private_data, pos, iter, &err);
269 }
270 return n ? n : err;
271 }
272
v9fs_write_begin(struct file * filp,struct address_space * mapping,loff_t pos,unsigned int len,struct page ** subpagep,void ** fsdata)273 static int v9fs_write_begin(struct file *filp, struct address_space *mapping,
274 loff_t pos, unsigned int len,
275 struct page **subpagep, void **fsdata)
276 {
277 int retval;
278 struct folio *folio;
279 struct v9fs_inode *v9inode = V9FS_I(mapping->host);
280
281 p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping);
282
283 BUG_ON(!v9inode->writeback_fid);
284
285 /* Prefetch area to be written into the cache if we're caching this
286 * file. We need to do this before we get a lock on the page in case
287 * there's more than one writer competing for the same cache block.
288 */
289 retval = netfs_write_begin(&v9inode->netfs, filp, mapping, pos, len, &folio, fsdata);
290 if (retval < 0)
291 return retval;
292
293 *subpagep = &folio->page;
294 return retval;
295 }
296
v9fs_write_end(struct file * filp,struct address_space * mapping,loff_t pos,unsigned int len,unsigned int copied,struct page * subpage,void * fsdata)297 static int v9fs_write_end(struct file *filp, struct address_space *mapping,
298 loff_t pos, unsigned int len, unsigned int copied,
299 struct page *subpage, void *fsdata)
300 {
301 loff_t last_pos = pos + copied;
302 struct folio *folio = page_folio(subpage);
303 struct inode *inode = mapping->host;
304 struct v9fs_inode *v9inode = V9FS_I(inode);
305
306 p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping);
307
308 if (!folio_test_uptodate(folio)) {
309 if (unlikely(copied < len)) {
310 copied = 0;
311 goto out;
312 }
313
314 folio_mark_uptodate(folio);
315 }
316
317 /*
318 * No need to use i_size_read() here, the i_size
319 * cannot change under us because we hold the i_mutex.
320 */
321 if (last_pos > inode->i_size) {
322 inode_add_bytes(inode, last_pos - inode->i_size);
323 i_size_write(inode, last_pos);
324 fscache_update_cookie(v9fs_inode_cookie(v9inode), NULL, &last_pos);
325 }
326 folio_mark_dirty(folio);
327 out:
328 folio_unlock(folio);
329 folio_put(folio);
330
331 return copied;
332 }
333
334 #ifdef CONFIG_9P_FSCACHE
335 /*
336 * Mark a page as having been made dirty and thus needing writeback. We also
337 * need to pin the cache object to write back to.
338 */
v9fs_dirty_folio(struct address_space * mapping,struct folio * folio)339 static bool v9fs_dirty_folio(struct address_space *mapping, struct folio *folio)
340 {
341 struct v9fs_inode *v9inode = V9FS_I(mapping->host);
342
343 return fscache_dirty_folio(mapping, folio, v9fs_inode_cookie(v9inode));
344 }
345 #else
346 #define v9fs_dirty_folio filemap_dirty_folio
347 #endif
348
349 const struct address_space_operations v9fs_addr_operations = {
350 .read_folio = netfs_read_folio,
351 .readahead = netfs_readahead,
352 .dirty_folio = v9fs_dirty_folio,
353 .writepage = v9fs_vfs_writepage,
354 .write_begin = v9fs_write_begin,
355 .write_end = v9fs_write_end,
356 .release_folio = v9fs_release_folio,
357 .invalidate_folio = v9fs_invalidate_folio,
358 .launder_folio = v9fs_launder_folio,
359 .direct_IO = v9fs_direct_IO,
360 };
361