1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/ceph/ceph_debug.h>
3 #include <linux/ceph/striper.h>
4
5 #include <linux/module.h>
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/file.h>
9 #include <linux/mount.h>
10 #include <linux/namei.h>
11 #include <linux/writeback.h>
12 #include <linux/falloc.h>
13 #include <linux/iversion.h>
14 #include <linux/ktime.h>
15
16 #include "super.h"
17 #include "mds_client.h"
18 #include "cache.h"
19 #include "io.h"
20 #include "metric.h"
21
ceph_flags_sys2wire(u32 flags)22 static __le32 ceph_flags_sys2wire(u32 flags)
23 {
24 u32 wire_flags = 0;
25
26 switch (flags & O_ACCMODE) {
27 case O_RDONLY:
28 wire_flags |= CEPH_O_RDONLY;
29 break;
30 case O_WRONLY:
31 wire_flags |= CEPH_O_WRONLY;
32 break;
33 case O_RDWR:
34 wire_flags |= CEPH_O_RDWR;
35 break;
36 }
37
38 flags &= ~O_ACCMODE;
39
40 #define ceph_sys2wire(a) if (flags & a) { wire_flags |= CEPH_##a; flags &= ~a; }
41
42 ceph_sys2wire(O_CREAT);
43 ceph_sys2wire(O_EXCL);
44 ceph_sys2wire(O_TRUNC);
45 ceph_sys2wire(O_DIRECTORY);
46 ceph_sys2wire(O_NOFOLLOW);
47
48 #undef ceph_sys2wire
49
50 if (flags)
51 dout("unused open flags: %x\n", flags);
52
53 return cpu_to_le32(wire_flags);
54 }
55
56 /*
57 * Ceph file operations
58 *
59 * Implement basic open/close functionality, and implement
60 * read/write.
61 *
62 * We implement three modes of file I/O:
63 * - buffered uses the generic_file_aio_{read,write} helpers
64 *
65 * - synchronous is used when there is multi-client read/write
66 * sharing, avoids the page cache, and synchronously waits for an
67 * ack from the OSD.
68 *
69 * - direct io takes the variant of the sync path that references
70 * user pages directly.
71 *
72 * fsync() flushes and waits on dirty pages, but just queues metadata
73 * for writeback: since the MDS can recover size and mtime there is no
74 * need to wait for MDS acknowledgement.
75 */
76
77 /*
78 * How many pages to get in one call to iov_iter_get_pages(). This
79 * determines the size of the on-stack array used as a buffer.
80 */
81 #define ITER_GET_BVECS_PAGES 64
82
__iter_get_bvecs(struct iov_iter * iter,size_t maxsize,struct bio_vec * bvecs)83 static ssize_t __iter_get_bvecs(struct iov_iter *iter, size_t maxsize,
84 struct bio_vec *bvecs)
85 {
86 size_t size = 0;
87 int bvec_idx = 0;
88
89 if (maxsize > iov_iter_count(iter))
90 maxsize = iov_iter_count(iter);
91
92 while (size < maxsize) {
93 struct page *pages[ITER_GET_BVECS_PAGES];
94 ssize_t bytes;
95 size_t start;
96 int idx = 0;
97
98 bytes = iov_iter_get_pages2(iter, pages, maxsize - size,
99 ITER_GET_BVECS_PAGES, &start);
100 if (bytes < 0)
101 return size ?: bytes;
102
103 size += bytes;
104
105 for ( ; bytes; idx++, bvec_idx++) {
106 struct bio_vec bv = {
107 .bv_page = pages[idx],
108 .bv_len = min_t(int, bytes, PAGE_SIZE - start),
109 .bv_offset = start,
110 };
111
112 bvecs[bvec_idx] = bv;
113 bytes -= bv.bv_len;
114 start = 0;
115 }
116 }
117
118 return size;
119 }
120
121 /*
122 * iov_iter_get_pages() only considers one iov_iter segment, no matter
123 * what maxsize or maxpages are given. For ITER_BVEC that is a single
124 * page.
125 *
126 * Attempt to get up to @maxsize bytes worth of pages from @iter.
127 * Return the number of bytes in the created bio_vec array, or an error.
128 */
iter_get_bvecs_alloc(struct iov_iter * iter,size_t maxsize,struct bio_vec ** bvecs,int * num_bvecs)129 static ssize_t iter_get_bvecs_alloc(struct iov_iter *iter, size_t maxsize,
130 struct bio_vec **bvecs, int *num_bvecs)
131 {
132 struct bio_vec *bv;
133 size_t orig_count = iov_iter_count(iter);
134 ssize_t bytes;
135 int npages;
136
137 iov_iter_truncate(iter, maxsize);
138 npages = iov_iter_npages(iter, INT_MAX);
139 iov_iter_reexpand(iter, orig_count);
140
141 /*
142 * __iter_get_bvecs() may populate only part of the array -- zero it
143 * out.
144 */
145 bv = kvmalloc_array(npages, sizeof(*bv), GFP_KERNEL | __GFP_ZERO);
146 if (!bv)
147 return -ENOMEM;
148
149 bytes = __iter_get_bvecs(iter, maxsize, bv);
150 if (bytes < 0) {
151 /*
152 * No pages were pinned -- just free the array.
153 */
154 kvfree(bv);
155 return bytes;
156 }
157
158 *bvecs = bv;
159 *num_bvecs = npages;
160 return bytes;
161 }
162
put_bvecs(struct bio_vec * bvecs,int num_bvecs,bool should_dirty)163 static void put_bvecs(struct bio_vec *bvecs, int num_bvecs, bool should_dirty)
164 {
165 int i;
166
167 for (i = 0; i < num_bvecs; i++) {
168 if (bvecs[i].bv_page) {
169 if (should_dirty)
170 set_page_dirty_lock(bvecs[i].bv_page);
171 put_page(bvecs[i].bv_page);
172 }
173 }
174 kvfree(bvecs);
175 }
176
177 /*
178 * Prepare an open request. Preallocate ceph_cap to avoid an
179 * inopportune ENOMEM later.
180 */
181 static struct ceph_mds_request *
prepare_open_request(struct super_block * sb,int flags,int create_mode)182 prepare_open_request(struct super_block *sb, int flags, int create_mode)
183 {
184 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(sb);
185 struct ceph_mds_request *req;
186 int want_auth = USE_ANY_MDS;
187 int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN;
188
189 if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC))
190 want_auth = USE_AUTH_MDS;
191
192 req = ceph_mdsc_create_request(mdsc, op, want_auth);
193 if (IS_ERR(req))
194 goto out;
195 req->r_fmode = ceph_flags_to_mode(flags);
196 req->r_args.open.flags = ceph_flags_sys2wire(flags);
197 req->r_args.open.mode = cpu_to_le32(create_mode);
198 out:
199 return req;
200 }
201
ceph_init_file_info(struct inode * inode,struct file * file,int fmode,bool isdir)202 static int ceph_init_file_info(struct inode *inode, struct file *file,
203 int fmode, bool isdir)
204 {
205 struct ceph_inode_info *ci = ceph_inode(inode);
206 struct ceph_mount_options *opt =
207 ceph_inode_to_client(&ci->netfs.inode)->mount_options;
208 struct ceph_file_info *fi;
209 int ret;
210
211 dout("%s %p %p 0%o (%s)\n", __func__, inode, file,
212 inode->i_mode, isdir ? "dir" : "regular");
213 BUG_ON(inode->i_fop->release != ceph_release);
214
215 if (isdir) {
216 struct ceph_dir_file_info *dfi =
217 kmem_cache_zalloc(ceph_dir_file_cachep, GFP_KERNEL);
218 if (!dfi)
219 return -ENOMEM;
220
221 file->private_data = dfi;
222 fi = &dfi->file_info;
223 dfi->next_offset = 2;
224 dfi->readdir_cache_idx = -1;
225 } else {
226 fi = kmem_cache_zalloc(ceph_file_cachep, GFP_KERNEL);
227 if (!fi)
228 return -ENOMEM;
229
230 if (opt->flags & CEPH_MOUNT_OPT_NOPAGECACHE)
231 fi->flags |= CEPH_F_SYNC;
232
233 file->private_data = fi;
234 }
235
236 ceph_get_fmode(ci, fmode, 1);
237 fi->fmode = fmode;
238
239 spin_lock_init(&fi->rw_contexts_lock);
240 INIT_LIST_HEAD(&fi->rw_contexts);
241 fi->filp_gen = READ_ONCE(ceph_inode_to_client(inode)->filp_gen);
242
243 if ((file->f_mode & FMODE_WRITE) && ceph_has_inline_data(ci)) {
244 ret = ceph_uninline_data(file);
245 if (ret < 0)
246 goto error;
247 }
248
249 return 0;
250
251 error:
252 ceph_fscache_unuse_cookie(inode, file->f_mode & FMODE_WRITE);
253 ceph_put_fmode(ci, fi->fmode, 1);
254 kmem_cache_free(ceph_file_cachep, fi);
255 /* wake up anyone waiting for caps on this inode */
256 wake_up_all(&ci->i_cap_wq);
257 return ret;
258 }
259
260 /*
261 * initialize private struct file data.
262 * if we fail, clean up by dropping fmode reference on the ceph_inode
263 */
ceph_init_file(struct inode * inode,struct file * file,int fmode)264 static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
265 {
266 int ret = 0;
267
268 switch (inode->i_mode & S_IFMT) {
269 case S_IFREG:
270 ceph_fscache_use_cookie(inode, file->f_mode & FMODE_WRITE);
271 fallthrough;
272 case S_IFDIR:
273 ret = ceph_init_file_info(inode, file, fmode,
274 S_ISDIR(inode->i_mode));
275 break;
276
277 case S_IFLNK:
278 dout("init_file %p %p 0%o (symlink)\n", inode, file,
279 inode->i_mode);
280 break;
281
282 default:
283 dout("init_file %p %p 0%o (special)\n", inode, file,
284 inode->i_mode);
285 /*
286 * we need to drop the open ref now, since we don't
287 * have .release set to ceph_release.
288 */
289 BUG_ON(inode->i_fop->release == ceph_release);
290
291 /* call the proper open fop */
292 ret = inode->i_fop->open(inode, file);
293 }
294 return ret;
295 }
296
297 /*
298 * try renew caps after session gets killed.
299 */
ceph_renew_caps(struct inode * inode,int fmode)300 int ceph_renew_caps(struct inode *inode, int fmode)
301 {
302 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
303 struct ceph_inode_info *ci = ceph_inode(inode);
304 struct ceph_mds_request *req;
305 int err, flags, wanted;
306
307 spin_lock(&ci->i_ceph_lock);
308 __ceph_touch_fmode(ci, mdsc, fmode);
309 wanted = __ceph_caps_file_wanted(ci);
310 if (__ceph_is_any_real_caps(ci) &&
311 (!(wanted & CEPH_CAP_ANY_WR) || ci->i_auth_cap)) {
312 int issued = __ceph_caps_issued(ci, NULL);
313 spin_unlock(&ci->i_ceph_lock);
314 dout("renew caps %p want %s issued %s updating mds_wanted\n",
315 inode, ceph_cap_string(wanted), ceph_cap_string(issued));
316 ceph_check_caps(ci, 0, NULL);
317 return 0;
318 }
319 spin_unlock(&ci->i_ceph_lock);
320
321 flags = 0;
322 if ((wanted & CEPH_CAP_FILE_RD) && (wanted & CEPH_CAP_FILE_WR))
323 flags = O_RDWR;
324 else if (wanted & CEPH_CAP_FILE_RD)
325 flags = O_RDONLY;
326 else if (wanted & CEPH_CAP_FILE_WR)
327 flags = O_WRONLY;
328 #ifdef O_LAZY
329 if (wanted & CEPH_CAP_FILE_LAZYIO)
330 flags |= O_LAZY;
331 #endif
332
333 req = prepare_open_request(inode->i_sb, flags, 0);
334 if (IS_ERR(req)) {
335 err = PTR_ERR(req);
336 goto out;
337 }
338
339 req->r_inode = inode;
340 ihold(inode);
341 req->r_num_caps = 1;
342
343 err = ceph_mdsc_do_request(mdsc, NULL, req);
344 ceph_mdsc_put_request(req);
345 out:
346 dout("renew caps %p open result=%d\n", inode, err);
347 return err < 0 ? err : 0;
348 }
349
350 /*
351 * If we already have the requisite capabilities, we can satisfy
352 * the open request locally (no need to request new caps from the
353 * MDS). We do, however, need to inform the MDS (asynchronously)
354 * if our wanted caps set expands.
355 */
ceph_open(struct inode * inode,struct file * file)356 int ceph_open(struct inode *inode, struct file *file)
357 {
358 struct ceph_inode_info *ci = ceph_inode(inode);
359 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
360 struct ceph_mds_client *mdsc = fsc->mdsc;
361 struct ceph_mds_request *req;
362 struct ceph_file_info *fi = file->private_data;
363 int err;
364 int flags, fmode, wanted;
365
366 if (fi) {
367 dout("open file %p is already opened\n", file);
368 return 0;
369 }
370
371 /* filter out O_CREAT|O_EXCL; vfs did that already. yuck. */
372 flags = file->f_flags & ~(O_CREAT|O_EXCL);
373 if (S_ISDIR(inode->i_mode))
374 flags = O_DIRECTORY; /* mds likes to know */
375
376 dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode,
377 ceph_vinop(inode), file, flags, file->f_flags);
378 fmode = ceph_flags_to_mode(flags);
379 wanted = ceph_caps_for_mode(fmode);
380
381 /* snapped files are read-only */
382 if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE))
383 return -EROFS;
384
385 /* trivially open snapdir */
386 if (ceph_snap(inode) == CEPH_SNAPDIR) {
387 return ceph_init_file(inode, file, fmode);
388 }
389
390 /*
391 * No need to block if we have caps on the auth MDS (for
392 * write) or any MDS (for read). Update wanted set
393 * asynchronously.
394 */
395 spin_lock(&ci->i_ceph_lock);
396 if (__ceph_is_any_real_caps(ci) &&
397 (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) {
398 int mds_wanted = __ceph_caps_mds_wanted(ci, true);
399 int issued = __ceph_caps_issued(ci, NULL);
400
401 dout("open %p fmode %d want %s issued %s using existing\n",
402 inode, fmode, ceph_cap_string(wanted),
403 ceph_cap_string(issued));
404 __ceph_touch_fmode(ci, mdsc, fmode);
405 spin_unlock(&ci->i_ceph_lock);
406
407 /* adjust wanted? */
408 if ((issued & wanted) != wanted &&
409 (mds_wanted & wanted) != wanted &&
410 ceph_snap(inode) != CEPH_SNAPDIR)
411 ceph_check_caps(ci, 0, NULL);
412
413 return ceph_init_file(inode, file, fmode);
414 } else if (ceph_snap(inode) != CEPH_NOSNAP &&
415 (ci->i_snap_caps & wanted) == wanted) {
416 __ceph_touch_fmode(ci, mdsc, fmode);
417 spin_unlock(&ci->i_ceph_lock);
418 return ceph_init_file(inode, file, fmode);
419 }
420
421 spin_unlock(&ci->i_ceph_lock);
422
423 dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
424 req = prepare_open_request(inode->i_sb, flags, 0);
425 if (IS_ERR(req)) {
426 err = PTR_ERR(req);
427 goto out;
428 }
429 req->r_inode = inode;
430 ihold(inode);
431
432 req->r_num_caps = 1;
433 err = ceph_mdsc_do_request(mdsc, NULL, req);
434 if (!err)
435 err = ceph_init_file(inode, file, req->r_fmode);
436 ceph_mdsc_put_request(req);
437 dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode));
438 out:
439 return err;
440 }
441
442 /* Clone the layout from a synchronous create, if the dir now has Dc caps */
443 static void
cache_file_layout(struct inode * dst,struct inode * src)444 cache_file_layout(struct inode *dst, struct inode *src)
445 {
446 struct ceph_inode_info *cdst = ceph_inode(dst);
447 struct ceph_inode_info *csrc = ceph_inode(src);
448
449 spin_lock(&cdst->i_ceph_lock);
450 if ((__ceph_caps_issued(cdst, NULL) & CEPH_CAP_DIR_CREATE) &&
451 !ceph_file_layout_is_valid(&cdst->i_cached_layout)) {
452 memcpy(&cdst->i_cached_layout, &csrc->i_layout,
453 sizeof(cdst->i_cached_layout));
454 rcu_assign_pointer(cdst->i_cached_layout.pool_ns,
455 ceph_try_get_string(csrc->i_layout.pool_ns));
456 }
457 spin_unlock(&cdst->i_ceph_lock);
458 }
459
460 /*
461 * Try to set up an async create. We need caps, a file layout, and inode number,
462 * and either a lease on the dentry or complete dir info. If any of those
463 * criteria are not satisfied, then return false and the caller can go
464 * synchronous.
465 */
try_prep_async_create(struct inode * dir,struct dentry * dentry,struct ceph_file_layout * lo,u64 * pino)466 static int try_prep_async_create(struct inode *dir, struct dentry *dentry,
467 struct ceph_file_layout *lo, u64 *pino)
468 {
469 struct ceph_inode_info *ci = ceph_inode(dir);
470 struct ceph_dentry_info *di = ceph_dentry(dentry);
471 int got = 0, want = CEPH_CAP_FILE_EXCL | CEPH_CAP_DIR_CREATE;
472 u64 ino;
473
474 spin_lock(&ci->i_ceph_lock);
475 /* No auth cap means no chance for Dc caps */
476 if (!ci->i_auth_cap)
477 goto no_async;
478
479 /* Any delegated inos? */
480 if (xa_empty(&ci->i_auth_cap->session->s_delegated_inos))
481 goto no_async;
482
483 if (!ceph_file_layout_is_valid(&ci->i_cached_layout))
484 goto no_async;
485
486 if ((__ceph_caps_issued(ci, NULL) & want) != want)
487 goto no_async;
488
489 if (d_in_lookup(dentry)) {
490 if (!__ceph_dir_is_complete(ci))
491 goto no_async;
492 spin_lock(&dentry->d_lock);
493 di->lease_shared_gen = atomic_read(&ci->i_shared_gen);
494 spin_unlock(&dentry->d_lock);
495 } else if (atomic_read(&ci->i_shared_gen) !=
496 READ_ONCE(di->lease_shared_gen)) {
497 goto no_async;
498 }
499
500 ino = ceph_get_deleg_ino(ci->i_auth_cap->session);
501 if (!ino)
502 goto no_async;
503
504 *pino = ino;
505 ceph_take_cap_refs(ci, want, false);
506 memcpy(lo, &ci->i_cached_layout, sizeof(*lo));
507 rcu_assign_pointer(lo->pool_ns,
508 ceph_try_get_string(ci->i_cached_layout.pool_ns));
509 got = want;
510 no_async:
511 spin_unlock(&ci->i_ceph_lock);
512 return got;
513 }
514
restore_deleg_ino(struct inode * dir,u64 ino)515 static void restore_deleg_ino(struct inode *dir, u64 ino)
516 {
517 struct ceph_inode_info *ci = ceph_inode(dir);
518 struct ceph_mds_session *s = NULL;
519
520 spin_lock(&ci->i_ceph_lock);
521 if (ci->i_auth_cap)
522 s = ceph_get_mds_session(ci->i_auth_cap->session);
523 spin_unlock(&ci->i_ceph_lock);
524 if (s) {
525 int err = ceph_restore_deleg_ino(s, ino);
526 if (err)
527 pr_warn("ceph: unable to restore delegated ino 0x%llx to session: %d\n",
528 ino, err);
529 ceph_put_mds_session(s);
530 }
531 }
532
wake_async_create_waiters(struct inode * inode,struct ceph_mds_session * session)533 static void wake_async_create_waiters(struct inode *inode,
534 struct ceph_mds_session *session)
535 {
536 struct ceph_inode_info *ci = ceph_inode(inode);
537
538 spin_lock(&ci->i_ceph_lock);
539 if (ci->i_ceph_flags & CEPH_I_ASYNC_CREATE) {
540 ci->i_ceph_flags &= ~CEPH_I_ASYNC_CREATE;
541 wake_up_bit(&ci->i_ceph_flags, CEPH_ASYNC_CREATE_BIT);
542 }
543 ceph_kick_flushing_inode_caps(session, ci);
544 spin_unlock(&ci->i_ceph_lock);
545 }
546
ceph_async_create_cb(struct ceph_mds_client * mdsc,struct ceph_mds_request * req)547 static void ceph_async_create_cb(struct ceph_mds_client *mdsc,
548 struct ceph_mds_request *req)
549 {
550 struct dentry *dentry = req->r_dentry;
551 struct inode *dinode = d_inode(dentry);
552 struct inode *tinode = req->r_target_inode;
553 int result = req->r_err ? req->r_err :
554 le32_to_cpu(req->r_reply_info.head->result);
555
556 WARN_ON_ONCE(dinode && tinode && dinode != tinode);
557
558 /* MDS changed -- caller must resubmit */
559 if (result == -EJUKEBOX)
560 goto out;
561
562 mapping_set_error(req->r_parent->i_mapping, result);
563
564 if (result) {
565 int pathlen = 0;
566 u64 base = 0;
567 char *path = ceph_mdsc_build_path(req->r_dentry, &pathlen,
568 &base, 0);
569
570 pr_warn("async create failure path=(%llx)%s result=%d!\n",
571 base, IS_ERR(path) ? "<<bad>>" : path, result);
572 ceph_mdsc_free_path(path, pathlen);
573
574 ceph_dir_clear_complete(req->r_parent);
575 if (!d_unhashed(dentry))
576 d_drop(dentry);
577
578 if (dinode) {
579 mapping_set_error(dinode->i_mapping, result);
580 ceph_inode_shutdown(dinode);
581 wake_async_create_waiters(dinode, req->r_session);
582 }
583 }
584
585 if (tinode) {
586 u64 ino = ceph_vino(tinode).ino;
587
588 if (req->r_deleg_ino != ino)
589 pr_warn("%s: inode number mismatch! err=%d deleg_ino=0x%llx target=0x%llx\n",
590 __func__, req->r_err, req->r_deleg_ino, ino);
591
592 mapping_set_error(tinode->i_mapping, result);
593 wake_async_create_waiters(tinode, req->r_session);
594 } else if (!result) {
595 pr_warn("%s: no req->r_target_inode for 0x%llx\n", __func__,
596 req->r_deleg_ino);
597 }
598 out:
599 ceph_mdsc_release_dir_caps(req);
600 }
601
ceph_finish_async_create(struct inode * dir,struct dentry * dentry,struct file * file,umode_t mode,struct ceph_mds_request * req,struct ceph_acl_sec_ctx * as_ctx,struct ceph_file_layout * lo)602 static int ceph_finish_async_create(struct inode *dir, struct dentry *dentry,
603 struct file *file, umode_t mode,
604 struct ceph_mds_request *req,
605 struct ceph_acl_sec_ctx *as_ctx,
606 struct ceph_file_layout *lo)
607 {
608 int ret;
609 char xattr_buf[4];
610 struct ceph_mds_reply_inode in = { };
611 struct ceph_mds_reply_info_in iinfo = { .in = &in };
612 struct ceph_inode_info *ci = ceph_inode(dir);
613 struct ceph_dentry_info *di = ceph_dentry(dentry);
614 struct inode *inode;
615 struct timespec64 now;
616 struct ceph_string *pool_ns;
617 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb);
618 struct ceph_vino vino = { .ino = req->r_deleg_ino,
619 .snap = CEPH_NOSNAP };
620
621 ktime_get_real_ts64(&now);
622
623 inode = ceph_get_inode(dentry->d_sb, vino);
624 if (IS_ERR(inode))
625 return PTR_ERR(inode);
626
627 iinfo.inline_version = CEPH_INLINE_NONE;
628 iinfo.change_attr = 1;
629 ceph_encode_timespec64(&iinfo.btime, &now);
630
631 if (req->r_pagelist) {
632 iinfo.xattr_len = req->r_pagelist->length;
633 iinfo.xattr_data = req->r_pagelist->mapped_tail;
634 } else {
635 /* fake it */
636 iinfo.xattr_len = ARRAY_SIZE(xattr_buf);
637 iinfo.xattr_data = xattr_buf;
638 memset(iinfo.xattr_data, 0, iinfo.xattr_len);
639 }
640
641 in.ino = cpu_to_le64(vino.ino);
642 in.snapid = cpu_to_le64(CEPH_NOSNAP);
643 in.version = cpu_to_le64(1); // ???
644 in.cap.caps = in.cap.wanted = cpu_to_le32(CEPH_CAP_ALL_FILE);
645 in.cap.cap_id = cpu_to_le64(1);
646 in.cap.realm = cpu_to_le64(ci->i_snap_realm->ino);
647 in.cap.flags = CEPH_CAP_FLAG_AUTH;
648 in.ctime = in.mtime = in.atime = iinfo.btime;
649 in.truncate_seq = cpu_to_le32(1);
650 in.truncate_size = cpu_to_le64(-1ULL);
651 in.xattr_version = cpu_to_le64(1);
652 in.uid = cpu_to_le32(from_kuid(&init_user_ns, current_fsuid()));
653 if (dir->i_mode & S_ISGID) {
654 in.gid = cpu_to_le32(from_kgid(&init_user_ns, dir->i_gid));
655
656 /* Directories always inherit the setgid bit. */
657 if (S_ISDIR(mode))
658 mode |= S_ISGID;
659 } else {
660 in.gid = cpu_to_le32(from_kgid(&init_user_ns, current_fsgid()));
661 }
662 in.mode = cpu_to_le32((u32)mode);
663
664 in.nlink = cpu_to_le32(1);
665 in.max_size = cpu_to_le64(lo->stripe_unit);
666
667 ceph_file_layout_to_legacy(lo, &in.layout);
668 /* lo is private, so pool_ns can't change */
669 pool_ns = rcu_dereference_raw(lo->pool_ns);
670 if (pool_ns) {
671 iinfo.pool_ns_len = pool_ns->len;
672 iinfo.pool_ns_data = pool_ns->str;
673 }
674
675 down_read(&mdsc->snap_rwsem);
676 ret = ceph_fill_inode(inode, NULL, &iinfo, NULL, req->r_session,
677 req->r_fmode, NULL);
678 up_read(&mdsc->snap_rwsem);
679 if (ret) {
680 dout("%s failed to fill inode: %d\n", __func__, ret);
681 ceph_dir_clear_complete(dir);
682 if (!d_unhashed(dentry))
683 d_drop(dentry);
684 if (inode->i_state & I_NEW)
685 discard_new_inode(inode);
686 } else {
687 struct dentry *dn;
688
689 dout("%s d_adding new inode 0x%llx to 0x%llx/%s\n", __func__,
690 vino.ino, ceph_ino(dir), dentry->d_name.name);
691 ceph_dir_clear_ordered(dir);
692 ceph_init_inode_acls(inode, as_ctx);
693 if (inode->i_state & I_NEW) {
694 /*
695 * If it's not I_NEW, then someone created this before
696 * we got here. Assume the server is aware of it at
697 * that point and don't worry about setting
698 * CEPH_I_ASYNC_CREATE.
699 */
700 ceph_inode(inode)->i_ceph_flags = CEPH_I_ASYNC_CREATE;
701 unlock_new_inode(inode);
702 }
703 if (d_in_lookup(dentry) || d_really_is_negative(dentry)) {
704 if (!d_unhashed(dentry))
705 d_drop(dentry);
706 dn = d_splice_alias(inode, dentry);
707 WARN_ON_ONCE(dn && dn != dentry);
708 }
709 file->f_mode |= FMODE_CREATED;
710 ret = finish_open(file, dentry, ceph_open);
711 }
712
713 spin_lock(&dentry->d_lock);
714 di->flags &= ~CEPH_DENTRY_ASYNC_CREATE;
715 wake_up_bit(&di->flags, CEPH_DENTRY_ASYNC_CREATE_BIT);
716 spin_unlock(&dentry->d_lock);
717
718 return ret;
719 }
720
721 /*
722 * Do a lookup + open with a single request. If we get a non-existent
723 * file or symlink, return 1 so the VFS can retry.
724 */
ceph_atomic_open(struct inode * dir,struct dentry * dentry,struct file * file,unsigned flags,umode_t mode)725 int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
726 struct file *file, unsigned flags, umode_t mode)
727 {
728 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
729 struct ceph_mds_client *mdsc = fsc->mdsc;
730 struct ceph_mds_request *req;
731 struct dentry *dn;
732 struct ceph_acl_sec_ctx as_ctx = {};
733 bool try_async = ceph_test_mount_opt(fsc, ASYNC_DIROPS);
734 int mask;
735 int err;
736
737 dout("atomic_open %p dentry %p '%pd' %s flags %d mode 0%o\n",
738 dir, dentry, dentry,
739 d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode);
740
741 if (dentry->d_name.len > NAME_MAX)
742 return -ENAMETOOLONG;
743
744 err = ceph_wait_on_conflict_unlink(dentry);
745 if (err)
746 return err;
747 /*
748 * Do not truncate the file, since atomic_open is called before the
749 * permission check. The caller will do the truncation afterward.
750 */
751 flags &= ~O_TRUNC;
752
753 if (flags & O_CREAT) {
754 if (ceph_quota_is_max_files_exceeded(dir))
755 return -EDQUOT;
756 err = ceph_pre_init_acls(dir, &mode, &as_ctx);
757 if (err < 0)
758 return err;
759 err = ceph_security_init_secctx(dentry, mode, &as_ctx);
760 if (err < 0)
761 goto out_ctx;
762 /* Async create can't handle more than a page of xattrs */
763 if (as_ctx.pagelist &&
764 !list_is_singular(&as_ctx.pagelist->head))
765 try_async = false;
766 } else if (!d_in_lookup(dentry)) {
767 /* If it's not being looked up, it's negative */
768 return -ENOENT;
769 }
770 retry:
771 /* do the open */
772 req = prepare_open_request(dir->i_sb, flags, mode);
773 if (IS_ERR(req)) {
774 err = PTR_ERR(req);
775 goto out_ctx;
776 }
777 req->r_dentry = dget(dentry);
778 req->r_num_caps = 2;
779 mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
780 if (ceph_security_xattr_wanted(dir))
781 mask |= CEPH_CAP_XATTR_SHARED;
782 req->r_args.open.mask = cpu_to_le32(mask);
783 req->r_parent = dir;
784 ihold(dir);
785
786 if (flags & O_CREAT) {
787 struct ceph_file_layout lo;
788
789 req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL;
790 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
791 if (as_ctx.pagelist) {
792 req->r_pagelist = as_ctx.pagelist;
793 as_ctx.pagelist = NULL;
794 }
795 if (try_async &&
796 (req->r_dir_caps =
797 try_prep_async_create(dir, dentry, &lo,
798 &req->r_deleg_ino))) {
799 struct ceph_dentry_info *di = ceph_dentry(dentry);
800
801 set_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags);
802 req->r_args.open.flags |= cpu_to_le32(CEPH_O_EXCL);
803 req->r_callback = ceph_async_create_cb;
804
805 spin_lock(&dentry->d_lock);
806 di->flags |= CEPH_DENTRY_ASYNC_CREATE;
807 spin_unlock(&dentry->d_lock);
808
809 err = ceph_mdsc_submit_request(mdsc, dir, req);
810 if (!err) {
811 err = ceph_finish_async_create(dir, dentry,
812 file, mode, req,
813 &as_ctx, &lo);
814 } else if (err == -EJUKEBOX) {
815 restore_deleg_ino(dir, req->r_deleg_ino);
816 ceph_mdsc_put_request(req);
817 try_async = false;
818 ceph_put_string(rcu_dereference_raw(lo.pool_ns));
819 goto retry;
820 }
821 ceph_put_string(rcu_dereference_raw(lo.pool_ns));
822 goto out_req;
823 }
824 }
825
826 set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
827 err = ceph_mdsc_do_request(mdsc, (flags & O_CREAT) ? dir : NULL, req);
828 if (err == -ENOENT) {
829 dentry = ceph_handle_snapdir(req, dentry);
830 if (IS_ERR(dentry)) {
831 err = PTR_ERR(dentry);
832 goto out_req;
833 }
834 err = 0;
835 }
836
837 if (!err && (flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
838 err = ceph_handle_notrace_create(dir, dentry);
839
840 if (d_in_lookup(dentry)) {
841 dn = ceph_finish_lookup(req, dentry, err);
842 if (IS_ERR(dn))
843 err = PTR_ERR(dn);
844 } else {
845 /* we were given a hashed negative dentry */
846 dn = NULL;
847 }
848 if (err)
849 goto out_req;
850 if (dn || d_really_is_negative(dentry) || d_is_symlink(dentry)) {
851 /* make vfs retry on splice, ENOENT, or symlink */
852 dout("atomic_open finish_no_open on dn %p\n", dn);
853 err = finish_no_open(file, dn);
854 } else {
855 dout("atomic_open finish_open on dn %p\n", dn);
856 if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
857 struct inode *newino = d_inode(dentry);
858
859 cache_file_layout(dir, newino);
860 ceph_init_inode_acls(newino, &as_ctx);
861 file->f_mode |= FMODE_CREATED;
862 }
863 err = finish_open(file, dentry, ceph_open);
864 }
865 out_req:
866 ceph_mdsc_put_request(req);
867 out_ctx:
868 ceph_release_acl_sec_ctx(&as_ctx);
869 dout("atomic_open result=%d\n", err);
870 return err;
871 }
872
ceph_release(struct inode * inode,struct file * file)873 int ceph_release(struct inode *inode, struct file *file)
874 {
875 struct ceph_inode_info *ci = ceph_inode(inode);
876
877 if (S_ISDIR(inode->i_mode)) {
878 struct ceph_dir_file_info *dfi = file->private_data;
879 dout("release inode %p dir file %p\n", inode, file);
880 WARN_ON(!list_empty(&dfi->file_info.rw_contexts));
881
882 ceph_put_fmode(ci, dfi->file_info.fmode, 1);
883
884 if (dfi->last_readdir)
885 ceph_mdsc_put_request(dfi->last_readdir);
886 kfree(dfi->last_name);
887 kfree(dfi->dir_info);
888 kmem_cache_free(ceph_dir_file_cachep, dfi);
889 } else {
890 struct ceph_file_info *fi = file->private_data;
891 dout("release inode %p regular file %p\n", inode, file);
892 WARN_ON(!list_empty(&fi->rw_contexts));
893
894 ceph_fscache_unuse_cookie(inode, file->f_mode & FMODE_WRITE);
895 ceph_put_fmode(ci, fi->fmode, 1);
896
897 kmem_cache_free(ceph_file_cachep, fi);
898 }
899
900 /* wake up anyone waiting for caps on this inode */
901 wake_up_all(&ci->i_cap_wq);
902 return 0;
903 }
904
905 enum {
906 HAVE_RETRIED = 1,
907 CHECK_EOF = 2,
908 READ_INLINE = 3,
909 };
910
911 /*
912 * Completely synchronous read and write methods. Direct from __user
913 * buffer to osd, or directly to user pages (if O_DIRECT).
914 *
915 * If the read spans object boundary, just do multiple reads. (That's not
916 * atomic, but good enough for now.)
917 *
918 * If we get a short result from the OSD, check against i_size; we need to
919 * only return a short read to the caller if we hit EOF.
920 */
ceph_sync_read(struct kiocb * iocb,struct iov_iter * to,int * retry_op)921 static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
922 int *retry_op)
923 {
924 struct file *file = iocb->ki_filp;
925 struct inode *inode = file_inode(file);
926 struct ceph_inode_info *ci = ceph_inode(inode);
927 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
928 struct ceph_osd_client *osdc = &fsc->client->osdc;
929 ssize_t ret;
930 u64 off = iocb->ki_pos;
931 u64 len = iov_iter_count(to);
932 u64 i_size = i_size_read(inode);
933
934 dout("sync_read on file %p %llu~%u %s\n", file, off, (unsigned)len,
935 (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
936
937 if (!len)
938 return 0;
939 /*
940 * flush any page cache pages in this range. this
941 * will make concurrent normal and sync io slow,
942 * but it will at least behave sensibly when they are
943 * in sequence.
944 */
945 ret = filemap_write_and_wait_range(inode->i_mapping,
946 off, off + len - 1);
947 if (ret < 0)
948 return ret;
949
950 ret = 0;
951 while ((len = iov_iter_count(to)) > 0) {
952 struct ceph_osd_request *req;
953 struct page **pages;
954 int num_pages;
955 size_t page_off;
956 bool more;
957 int idx;
958 size_t left;
959
960 req = ceph_osdc_new_request(osdc, &ci->i_layout,
961 ci->i_vino, off, &len, 0, 1,
962 CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
963 NULL, ci->i_truncate_seq,
964 ci->i_truncate_size, false);
965 if (IS_ERR(req)) {
966 ret = PTR_ERR(req);
967 break;
968 }
969
970 more = len < iov_iter_count(to);
971
972 num_pages = calc_pages_for(off, len);
973 page_off = off & ~PAGE_MASK;
974 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
975 if (IS_ERR(pages)) {
976 ceph_osdc_put_request(req);
977 ret = PTR_ERR(pages);
978 break;
979 }
980
981 osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_off,
982 false, false);
983 ceph_osdc_start_request(osdc, req);
984 ret = ceph_osdc_wait_request(osdc, req);
985
986 ceph_update_read_metrics(&fsc->mdsc->metric,
987 req->r_start_latency,
988 req->r_end_latency,
989 len, ret);
990
991 ceph_osdc_put_request(req);
992
993 i_size = i_size_read(inode);
994 dout("sync_read %llu~%llu got %zd i_size %llu%s\n",
995 off, len, ret, i_size, (more ? " MORE" : ""));
996
997 if (ret == -ENOENT)
998 ret = 0;
999 if (ret >= 0 && ret < len && (off + ret < i_size)) {
1000 int zlen = min(len - ret, i_size - off - ret);
1001 int zoff = page_off + ret;
1002 dout("sync_read zero gap %llu~%llu\n",
1003 off + ret, off + ret + zlen);
1004 ceph_zero_page_vector_range(zoff, zlen, pages);
1005 ret += zlen;
1006 }
1007
1008 idx = 0;
1009 left = ret > 0 ? ret : 0;
1010 while (left > 0) {
1011 size_t len, copied;
1012 page_off = off & ~PAGE_MASK;
1013 len = min_t(size_t, left, PAGE_SIZE - page_off);
1014 SetPageUptodate(pages[idx]);
1015 copied = copy_page_to_iter(pages[idx++],
1016 page_off, len, to);
1017 off += copied;
1018 left -= copied;
1019 if (copied < len) {
1020 ret = -EFAULT;
1021 break;
1022 }
1023 }
1024 ceph_release_page_vector(pages, num_pages);
1025
1026 if (ret < 0) {
1027 if (ret == -EBLOCKLISTED)
1028 fsc->blocklisted = true;
1029 break;
1030 }
1031
1032 if (off >= i_size || !more)
1033 break;
1034 }
1035
1036 if (off > iocb->ki_pos) {
1037 if (off >= i_size) {
1038 *retry_op = CHECK_EOF;
1039 ret = i_size - iocb->ki_pos;
1040 iocb->ki_pos = i_size;
1041 } else {
1042 ret = off - iocb->ki_pos;
1043 iocb->ki_pos = off;
1044 }
1045 }
1046
1047 dout("sync_read result %zd retry_op %d\n", ret, *retry_op);
1048 return ret;
1049 }
1050
1051 struct ceph_aio_request {
1052 struct kiocb *iocb;
1053 size_t total_len;
1054 bool write;
1055 bool should_dirty;
1056 int error;
1057 struct list_head osd_reqs;
1058 unsigned num_reqs;
1059 atomic_t pending_reqs;
1060 struct timespec64 mtime;
1061 struct ceph_cap_flush *prealloc_cf;
1062 };
1063
1064 struct ceph_aio_work {
1065 struct work_struct work;
1066 struct ceph_osd_request *req;
1067 };
1068
1069 static void ceph_aio_retry_work(struct work_struct *work);
1070
ceph_aio_complete(struct inode * inode,struct ceph_aio_request * aio_req)1071 static void ceph_aio_complete(struct inode *inode,
1072 struct ceph_aio_request *aio_req)
1073 {
1074 struct ceph_inode_info *ci = ceph_inode(inode);
1075 int ret;
1076
1077 if (!atomic_dec_and_test(&aio_req->pending_reqs))
1078 return;
1079
1080 if (aio_req->iocb->ki_flags & IOCB_DIRECT)
1081 inode_dio_end(inode);
1082
1083 ret = aio_req->error;
1084 if (!ret)
1085 ret = aio_req->total_len;
1086
1087 dout("ceph_aio_complete %p rc %d\n", inode, ret);
1088
1089 if (ret >= 0 && aio_req->write) {
1090 int dirty;
1091
1092 loff_t endoff = aio_req->iocb->ki_pos + aio_req->total_len;
1093 if (endoff > i_size_read(inode)) {
1094 if (ceph_inode_set_size(inode, endoff))
1095 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
1096 }
1097
1098 spin_lock(&ci->i_ceph_lock);
1099 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1100 &aio_req->prealloc_cf);
1101 spin_unlock(&ci->i_ceph_lock);
1102 if (dirty)
1103 __mark_inode_dirty(inode, dirty);
1104
1105 }
1106
1107 ceph_put_cap_refs(ci, (aio_req->write ? CEPH_CAP_FILE_WR :
1108 CEPH_CAP_FILE_RD));
1109
1110 aio_req->iocb->ki_complete(aio_req->iocb, ret);
1111
1112 ceph_free_cap_flush(aio_req->prealloc_cf);
1113 kfree(aio_req);
1114 }
1115
ceph_aio_complete_req(struct ceph_osd_request * req)1116 static void ceph_aio_complete_req(struct ceph_osd_request *req)
1117 {
1118 int rc = req->r_result;
1119 struct inode *inode = req->r_inode;
1120 struct ceph_aio_request *aio_req = req->r_priv;
1121 struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0);
1122 struct ceph_client_metric *metric = &ceph_sb_to_mdsc(inode->i_sb)->metric;
1123 unsigned int len = osd_data->bvec_pos.iter.bi_size;
1124
1125 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_BVECS);
1126 BUG_ON(!osd_data->num_bvecs);
1127
1128 dout("ceph_aio_complete_req %p rc %d bytes %u\n", inode, rc, len);
1129
1130 if (rc == -EOLDSNAPC) {
1131 struct ceph_aio_work *aio_work;
1132 BUG_ON(!aio_req->write);
1133
1134 aio_work = kmalloc(sizeof(*aio_work), GFP_NOFS);
1135 if (aio_work) {
1136 INIT_WORK(&aio_work->work, ceph_aio_retry_work);
1137 aio_work->req = req;
1138 queue_work(ceph_inode_to_client(inode)->inode_wq,
1139 &aio_work->work);
1140 return;
1141 }
1142 rc = -ENOMEM;
1143 } else if (!aio_req->write) {
1144 if (rc == -ENOENT)
1145 rc = 0;
1146 if (rc >= 0 && len > rc) {
1147 struct iov_iter i;
1148 int zlen = len - rc;
1149
1150 /*
1151 * If read is satisfied by single OSD request,
1152 * it can pass EOF. Otherwise read is within
1153 * i_size.
1154 */
1155 if (aio_req->num_reqs == 1) {
1156 loff_t i_size = i_size_read(inode);
1157 loff_t endoff = aio_req->iocb->ki_pos + rc;
1158 if (endoff < i_size)
1159 zlen = min_t(size_t, zlen,
1160 i_size - endoff);
1161 aio_req->total_len = rc + zlen;
1162 }
1163
1164 iov_iter_bvec(&i, READ, osd_data->bvec_pos.bvecs,
1165 osd_data->num_bvecs, len);
1166 iov_iter_advance(&i, rc);
1167 iov_iter_zero(zlen, &i);
1168 }
1169 }
1170
1171 /* r_start_latency == 0 means the request was not submitted */
1172 if (req->r_start_latency) {
1173 if (aio_req->write)
1174 ceph_update_write_metrics(metric, req->r_start_latency,
1175 req->r_end_latency, len, rc);
1176 else
1177 ceph_update_read_metrics(metric, req->r_start_latency,
1178 req->r_end_latency, len, rc);
1179 }
1180
1181 put_bvecs(osd_data->bvec_pos.bvecs, osd_data->num_bvecs,
1182 aio_req->should_dirty);
1183 ceph_osdc_put_request(req);
1184
1185 if (rc < 0)
1186 cmpxchg(&aio_req->error, 0, rc);
1187
1188 ceph_aio_complete(inode, aio_req);
1189 return;
1190 }
1191
ceph_aio_retry_work(struct work_struct * work)1192 static void ceph_aio_retry_work(struct work_struct *work)
1193 {
1194 struct ceph_aio_work *aio_work =
1195 container_of(work, struct ceph_aio_work, work);
1196 struct ceph_osd_request *orig_req = aio_work->req;
1197 struct ceph_aio_request *aio_req = orig_req->r_priv;
1198 struct inode *inode = orig_req->r_inode;
1199 struct ceph_inode_info *ci = ceph_inode(inode);
1200 struct ceph_snap_context *snapc;
1201 struct ceph_osd_request *req;
1202 int ret;
1203
1204 spin_lock(&ci->i_ceph_lock);
1205 if (__ceph_have_pending_cap_snap(ci)) {
1206 struct ceph_cap_snap *capsnap =
1207 list_last_entry(&ci->i_cap_snaps,
1208 struct ceph_cap_snap,
1209 ci_item);
1210 snapc = ceph_get_snap_context(capsnap->context);
1211 } else {
1212 BUG_ON(!ci->i_head_snapc);
1213 snapc = ceph_get_snap_context(ci->i_head_snapc);
1214 }
1215 spin_unlock(&ci->i_ceph_lock);
1216
1217 req = ceph_osdc_alloc_request(orig_req->r_osdc, snapc, 1,
1218 false, GFP_NOFS);
1219 if (!req) {
1220 ret = -ENOMEM;
1221 req = orig_req;
1222 goto out;
1223 }
1224
1225 req->r_flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
1226 ceph_oloc_copy(&req->r_base_oloc, &orig_req->r_base_oloc);
1227 ceph_oid_copy(&req->r_base_oid, &orig_req->r_base_oid);
1228
1229 req->r_ops[0] = orig_req->r_ops[0];
1230
1231 req->r_mtime = aio_req->mtime;
1232 req->r_data_offset = req->r_ops[0].extent.offset;
1233
1234 ret = ceph_osdc_alloc_messages(req, GFP_NOFS);
1235 if (ret) {
1236 ceph_osdc_put_request(req);
1237 req = orig_req;
1238 goto out;
1239 }
1240
1241 ceph_osdc_put_request(orig_req);
1242
1243 req->r_callback = ceph_aio_complete_req;
1244 req->r_inode = inode;
1245 req->r_priv = aio_req;
1246
1247 ceph_osdc_start_request(req->r_osdc, req);
1248 out:
1249 if (ret < 0) {
1250 req->r_result = ret;
1251 ceph_aio_complete_req(req);
1252 }
1253
1254 ceph_put_snap_context(snapc);
1255 kfree(aio_work);
1256 }
1257
1258 static ssize_t
ceph_direct_read_write(struct kiocb * iocb,struct iov_iter * iter,struct ceph_snap_context * snapc,struct ceph_cap_flush ** pcf)1259 ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
1260 struct ceph_snap_context *snapc,
1261 struct ceph_cap_flush **pcf)
1262 {
1263 struct file *file = iocb->ki_filp;
1264 struct inode *inode = file_inode(file);
1265 struct ceph_inode_info *ci = ceph_inode(inode);
1266 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1267 struct ceph_client_metric *metric = &fsc->mdsc->metric;
1268 struct ceph_vino vino;
1269 struct ceph_osd_request *req;
1270 struct bio_vec *bvecs;
1271 struct ceph_aio_request *aio_req = NULL;
1272 int num_pages = 0;
1273 int flags;
1274 int ret = 0;
1275 struct timespec64 mtime = current_time(inode);
1276 size_t count = iov_iter_count(iter);
1277 loff_t pos = iocb->ki_pos;
1278 bool write = iov_iter_rw(iter) == WRITE;
1279 bool should_dirty = !write && user_backed_iter(iter);
1280
1281 if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP)
1282 return -EROFS;
1283
1284 dout("sync_direct_%s on file %p %lld~%u snapc %p seq %lld\n",
1285 (write ? "write" : "read"), file, pos, (unsigned)count,
1286 snapc, snapc ? snapc->seq : 0);
1287
1288 if (write) {
1289 int ret2;
1290
1291 ceph_fscache_invalidate(inode, true);
1292
1293 ret2 = invalidate_inode_pages2_range(inode->i_mapping,
1294 pos >> PAGE_SHIFT,
1295 (pos + count - 1) >> PAGE_SHIFT);
1296 if (ret2 < 0)
1297 dout("invalidate_inode_pages2_range returned %d\n", ret2);
1298
1299 flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
1300 } else {
1301 flags = CEPH_OSD_FLAG_READ;
1302 }
1303
1304 while (iov_iter_count(iter) > 0) {
1305 u64 size = iov_iter_count(iter);
1306 ssize_t len;
1307
1308 if (write)
1309 size = min_t(u64, size, fsc->mount_options->wsize);
1310 else
1311 size = min_t(u64, size, fsc->mount_options->rsize);
1312
1313 vino = ceph_vino(inode);
1314 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1315 vino, pos, &size, 0,
1316 1,
1317 write ? CEPH_OSD_OP_WRITE :
1318 CEPH_OSD_OP_READ,
1319 flags, snapc,
1320 ci->i_truncate_seq,
1321 ci->i_truncate_size,
1322 false);
1323 if (IS_ERR(req)) {
1324 ret = PTR_ERR(req);
1325 break;
1326 }
1327
1328 len = iter_get_bvecs_alloc(iter, size, &bvecs, &num_pages);
1329 if (len < 0) {
1330 ceph_osdc_put_request(req);
1331 ret = len;
1332 break;
1333 }
1334 if (len != size)
1335 osd_req_op_extent_update(req, 0, len);
1336
1337 /*
1338 * To simplify error handling, allow AIO when IO within i_size
1339 * or IO can be satisfied by single OSD request.
1340 */
1341 if (pos == iocb->ki_pos && !is_sync_kiocb(iocb) &&
1342 (len == count || pos + count <= i_size_read(inode))) {
1343 aio_req = kzalloc(sizeof(*aio_req), GFP_KERNEL);
1344 if (aio_req) {
1345 aio_req->iocb = iocb;
1346 aio_req->write = write;
1347 aio_req->should_dirty = should_dirty;
1348 INIT_LIST_HEAD(&aio_req->osd_reqs);
1349 if (write) {
1350 aio_req->mtime = mtime;
1351 swap(aio_req->prealloc_cf, *pcf);
1352 }
1353 }
1354 /* ignore error */
1355 }
1356
1357 if (write) {
1358 /*
1359 * throw out any page cache pages in this range. this
1360 * may block.
1361 */
1362 truncate_inode_pages_range(inode->i_mapping, pos,
1363 PAGE_ALIGN(pos + len) - 1);
1364
1365 req->r_mtime = mtime;
1366 }
1367
1368 osd_req_op_extent_osd_data_bvecs(req, 0, bvecs, num_pages, len);
1369
1370 if (aio_req) {
1371 aio_req->total_len += len;
1372 aio_req->num_reqs++;
1373 atomic_inc(&aio_req->pending_reqs);
1374
1375 req->r_callback = ceph_aio_complete_req;
1376 req->r_inode = inode;
1377 req->r_priv = aio_req;
1378 list_add_tail(&req->r_private_item, &aio_req->osd_reqs);
1379
1380 pos += len;
1381 continue;
1382 }
1383
1384 ceph_osdc_start_request(req->r_osdc, req);
1385 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1386
1387 if (write)
1388 ceph_update_write_metrics(metric, req->r_start_latency,
1389 req->r_end_latency, len, ret);
1390 else
1391 ceph_update_read_metrics(metric, req->r_start_latency,
1392 req->r_end_latency, len, ret);
1393
1394 size = i_size_read(inode);
1395 if (!write) {
1396 if (ret == -ENOENT)
1397 ret = 0;
1398 if (ret >= 0 && ret < len && pos + ret < size) {
1399 struct iov_iter i;
1400 int zlen = min_t(size_t, len - ret,
1401 size - pos - ret);
1402
1403 iov_iter_bvec(&i, READ, bvecs, num_pages, len);
1404 iov_iter_advance(&i, ret);
1405 iov_iter_zero(zlen, &i);
1406 ret += zlen;
1407 }
1408 if (ret >= 0)
1409 len = ret;
1410 }
1411
1412 put_bvecs(bvecs, num_pages, should_dirty);
1413 ceph_osdc_put_request(req);
1414 if (ret < 0)
1415 break;
1416
1417 pos += len;
1418 if (!write && pos >= size)
1419 break;
1420
1421 if (write && pos > size) {
1422 if (ceph_inode_set_size(inode, pos))
1423 ceph_check_caps(ceph_inode(inode),
1424 CHECK_CAPS_AUTHONLY,
1425 NULL);
1426 }
1427 }
1428
1429 if (aio_req) {
1430 LIST_HEAD(osd_reqs);
1431
1432 if (aio_req->num_reqs == 0) {
1433 kfree(aio_req);
1434 return ret;
1435 }
1436
1437 ceph_get_cap_refs(ci, write ? CEPH_CAP_FILE_WR :
1438 CEPH_CAP_FILE_RD);
1439
1440 list_splice(&aio_req->osd_reqs, &osd_reqs);
1441 inode_dio_begin(inode);
1442 while (!list_empty(&osd_reqs)) {
1443 req = list_first_entry(&osd_reqs,
1444 struct ceph_osd_request,
1445 r_private_item);
1446 list_del_init(&req->r_private_item);
1447 if (ret >= 0)
1448 ceph_osdc_start_request(req->r_osdc, req);
1449 if (ret < 0) {
1450 req->r_result = ret;
1451 ceph_aio_complete_req(req);
1452 }
1453 }
1454 return -EIOCBQUEUED;
1455 }
1456
1457 if (ret != -EOLDSNAPC && pos > iocb->ki_pos) {
1458 ret = pos - iocb->ki_pos;
1459 iocb->ki_pos = pos;
1460 }
1461 return ret;
1462 }
1463
1464 /*
1465 * Synchronous write, straight from __user pointer or user pages.
1466 *
1467 * If write spans object boundary, just do multiple writes. (For a
1468 * correct atomic write, we should e.g. take write locks on all
1469 * objects, rollback on failure, etc.)
1470 */
1471 static ssize_t
ceph_sync_write(struct kiocb * iocb,struct iov_iter * from,loff_t pos,struct ceph_snap_context * snapc)1472 ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
1473 struct ceph_snap_context *snapc)
1474 {
1475 struct file *file = iocb->ki_filp;
1476 struct inode *inode = file_inode(file);
1477 struct ceph_inode_info *ci = ceph_inode(inode);
1478 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1479 struct ceph_vino vino;
1480 struct ceph_osd_request *req;
1481 struct page **pages;
1482 u64 len;
1483 int num_pages;
1484 int written = 0;
1485 int flags;
1486 int ret;
1487 bool check_caps = false;
1488 struct timespec64 mtime = current_time(inode);
1489 size_t count = iov_iter_count(from);
1490
1491 if (ceph_snap(file_inode(file)) != CEPH_NOSNAP)
1492 return -EROFS;
1493
1494 dout("sync_write on file %p %lld~%u snapc %p seq %lld\n",
1495 file, pos, (unsigned)count, snapc, snapc->seq);
1496
1497 ret = filemap_write_and_wait_range(inode->i_mapping,
1498 pos, pos + count - 1);
1499 if (ret < 0)
1500 return ret;
1501
1502 ceph_fscache_invalidate(inode, false);
1503 ret = invalidate_inode_pages2_range(inode->i_mapping,
1504 pos >> PAGE_SHIFT,
1505 (pos + count - 1) >> PAGE_SHIFT);
1506 if (ret < 0)
1507 dout("invalidate_inode_pages2_range returned %d\n", ret);
1508
1509 flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
1510
1511 while ((len = iov_iter_count(from)) > 0) {
1512 size_t left;
1513 int n;
1514
1515 vino = ceph_vino(inode);
1516 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1517 vino, pos, &len, 0, 1,
1518 CEPH_OSD_OP_WRITE, flags, snapc,
1519 ci->i_truncate_seq,
1520 ci->i_truncate_size,
1521 false);
1522 if (IS_ERR(req)) {
1523 ret = PTR_ERR(req);
1524 break;
1525 }
1526
1527 /*
1528 * write from beginning of first page,
1529 * regardless of io alignment
1530 */
1531 num_pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1532
1533 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
1534 if (IS_ERR(pages)) {
1535 ret = PTR_ERR(pages);
1536 goto out;
1537 }
1538
1539 left = len;
1540 for (n = 0; n < num_pages; n++) {
1541 size_t plen = min_t(size_t, left, PAGE_SIZE);
1542 ret = copy_page_from_iter(pages[n], 0, plen, from);
1543 if (ret != plen) {
1544 ret = -EFAULT;
1545 break;
1546 }
1547 left -= ret;
1548 }
1549
1550 if (ret < 0) {
1551 ceph_release_page_vector(pages, num_pages);
1552 goto out;
1553 }
1554
1555 req->r_inode = inode;
1556
1557 osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0,
1558 false, true);
1559
1560 req->r_mtime = mtime;
1561 ceph_osdc_start_request(&fsc->client->osdc, req);
1562 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1563
1564 ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency,
1565 req->r_end_latency, len, ret);
1566 out:
1567 ceph_osdc_put_request(req);
1568 if (ret != 0) {
1569 ceph_set_error_write(ci);
1570 break;
1571 }
1572
1573 ceph_clear_error_write(ci);
1574 pos += len;
1575 written += len;
1576 if (pos > i_size_read(inode)) {
1577 check_caps = ceph_inode_set_size(inode, pos);
1578 if (check_caps)
1579 ceph_check_caps(ceph_inode(inode),
1580 CHECK_CAPS_AUTHONLY,
1581 NULL);
1582 }
1583
1584 }
1585
1586 if (ret != -EOLDSNAPC && written > 0) {
1587 ret = written;
1588 iocb->ki_pos = pos;
1589 }
1590 return ret;
1591 }
1592
1593 /*
1594 * Wrap generic_file_aio_read with checks for cap bits on the inode.
1595 * Atomically grab references, so that those bits are not released
1596 * back to the MDS mid-read.
1597 *
1598 * Hmm, the sync read case isn't actually async... should it be?
1599 */
ceph_read_iter(struct kiocb * iocb,struct iov_iter * to)1600 static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
1601 {
1602 struct file *filp = iocb->ki_filp;
1603 struct ceph_file_info *fi = filp->private_data;
1604 size_t len = iov_iter_count(to);
1605 struct inode *inode = file_inode(filp);
1606 struct ceph_inode_info *ci = ceph_inode(inode);
1607 bool direct_lock = iocb->ki_flags & IOCB_DIRECT;
1608 ssize_t ret;
1609 int want = 0, got = 0;
1610 int retry_op = 0, read = 0;
1611
1612 again:
1613 dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
1614 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode);
1615
1616 if (ceph_inode_is_shutdown(inode))
1617 return -ESTALE;
1618
1619 if (direct_lock)
1620 ceph_start_io_direct(inode);
1621 else
1622 ceph_start_io_read(inode);
1623
1624 if (!(fi->flags & CEPH_F_SYNC) && !direct_lock)
1625 want |= CEPH_CAP_FILE_CACHE;
1626 if (fi->fmode & CEPH_FILE_MODE_LAZY)
1627 want |= CEPH_CAP_FILE_LAZYIO;
1628
1629 ret = ceph_get_caps(filp, CEPH_CAP_FILE_RD, want, -1, &got);
1630 if (ret < 0) {
1631 if (direct_lock)
1632 ceph_end_io_direct(inode);
1633 else
1634 ceph_end_io_read(inode);
1635 return ret;
1636 }
1637
1638 if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1639 (iocb->ki_flags & IOCB_DIRECT) ||
1640 (fi->flags & CEPH_F_SYNC)) {
1641
1642 dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1643 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1644 ceph_cap_string(got));
1645
1646 if (!ceph_has_inline_data(ci)) {
1647 if (!retry_op && (iocb->ki_flags & IOCB_DIRECT)) {
1648 ret = ceph_direct_read_write(iocb, to,
1649 NULL, NULL);
1650 if (ret >= 0 && ret < len)
1651 retry_op = CHECK_EOF;
1652 } else {
1653 ret = ceph_sync_read(iocb, to, &retry_op);
1654 }
1655 } else {
1656 retry_op = READ_INLINE;
1657 }
1658 } else {
1659 CEPH_DEFINE_RW_CONTEXT(rw_ctx, got);
1660 dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1661 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1662 ceph_cap_string(got));
1663 ceph_add_rw_context(fi, &rw_ctx);
1664 ret = generic_file_read_iter(iocb, to);
1665 ceph_del_rw_context(fi, &rw_ctx);
1666 }
1667
1668 dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
1669 inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
1670 ceph_put_cap_refs(ci, got);
1671
1672 if (direct_lock)
1673 ceph_end_io_direct(inode);
1674 else
1675 ceph_end_io_read(inode);
1676
1677 if (retry_op > HAVE_RETRIED && ret >= 0) {
1678 int statret;
1679 struct page *page = NULL;
1680 loff_t i_size;
1681 if (retry_op == READ_INLINE) {
1682 page = __page_cache_alloc(GFP_KERNEL);
1683 if (!page)
1684 return -ENOMEM;
1685 }
1686
1687 statret = __ceph_do_getattr(inode, page,
1688 CEPH_STAT_CAP_INLINE_DATA, !!page);
1689 if (statret < 0) {
1690 if (page)
1691 __free_page(page);
1692 if (statret == -ENODATA) {
1693 BUG_ON(retry_op != READ_INLINE);
1694 goto again;
1695 }
1696 return statret;
1697 }
1698
1699 i_size = i_size_read(inode);
1700 if (retry_op == READ_INLINE) {
1701 BUG_ON(ret > 0 || read > 0);
1702 if (iocb->ki_pos < i_size &&
1703 iocb->ki_pos < PAGE_SIZE) {
1704 loff_t end = min_t(loff_t, i_size,
1705 iocb->ki_pos + len);
1706 end = min_t(loff_t, end, PAGE_SIZE);
1707 if (statret < end)
1708 zero_user_segment(page, statret, end);
1709 ret = copy_page_to_iter(page,
1710 iocb->ki_pos & ~PAGE_MASK,
1711 end - iocb->ki_pos, to);
1712 iocb->ki_pos += ret;
1713 read += ret;
1714 }
1715 if (iocb->ki_pos < i_size && read < len) {
1716 size_t zlen = min_t(size_t, len - read,
1717 i_size - iocb->ki_pos);
1718 ret = iov_iter_zero(zlen, to);
1719 iocb->ki_pos += ret;
1720 read += ret;
1721 }
1722 __free_pages(page, 0);
1723 return read;
1724 }
1725
1726 /* hit EOF or hole? */
1727 if (retry_op == CHECK_EOF && iocb->ki_pos < i_size &&
1728 ret < len) {
1729 dout("sync_read hit hole, ppos %lld < size %lld"
1730 ", reading more\n", iocb->ki_pos, i_size);
1731
1732 read += ret;
1733 len -= ret;
1734 retry_op = HAVE_RETRIED;
1735 goto again;
1736 }
1737 }
1738
1739 if (ret >= 0)
1740 ret += read;
1741
1742 return ret;
1743 }
1744
1745 /*
1746 * Take cap references to avoid releasing caps to MDS mid-write.
1747 *
1748 * If we are synchronous, and write with an old snap context, the OSD
1749 * may return EOLDSNAPC. In that case, retry the write.. _after_
1750 * dropping our cap refs and allowing the pending snap to logically
1751 * complete _before_ this write occurs.
1752 *
1753 * If we are near ENOSPC, write synchronously.
1754 */
ceph_write_iter(struct kiocb * iocb,struct iov_iter * from)1755 static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
1756 {
1757 struct file *file = iocb->ki_filp;
1758 struct ceph_file_info *fi = file->private_data;
1759 struct inode *inode = file_inode(file);
1760 struct ceph_inode_info *ci = ceph_inode(inode);
1761 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1762 struct ceph_osd_client *osdc = &fsc->client->osdc;
1763 struct ceph_cap_flush *prealloc_cf;
1764 ssize_t count, written = 0;
1765 int err, want = 0, got;
1766 bool direct_lock = false;
1767 u32 map_flags;
1768 u64 pool_flags;
1769 loff_t pos;
1770 loff_t limit = max(i_size_read(inode), fsc->max_file_size);
1771
1772 if (ceph_inode_is_shutdown(inode))
1773 return -ESTALE;
1774
1775 if (ceph_snap(inode) != CEPH_NOSNAP)
1776 return -EROFS;
1777
1778 prealloc_cf = ceph_alloc_cap_flush();
1779 if (!prealloc_cf)
1780 return -ENOMEM;
1781
1782 if ((iocb->ki_flags & (IOCB_DIRECT | IOCB_APPEND)) == IOCB_DIRECT)
1783 direct_lock = true;
1784
1785 retry_snap:
1786 if (direct_lock)
1787 ceph_start_io_direct(inode);
1788 else
1789 ceph_start_io_write(inode);
1790
1791 /* We can write back this queue in page reclaim */
1792 current->backing_dev_info = inode_to_bdi(inode);
1793
1794 if (iocb->ki_flags & IOCB_APPEND) {
1795 err = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1796 if (err < 0)
1797 goto out;
1798 }
1799
1800 err = generic_write_checks(iocb, from);
1801 if (err <= 0)
1802 goto out;
1803
1804 pos = iocb->ki_pos;
1805 if (unlikely(pos >= limit)) {
1806 err = -EFBIG;
1807 goto out;
1808 } else {
1809 iov_iter_truncate(from, limit - pos);
1810 }
1811
1812 count = iov_iter_count(from);
1813 if (ceph_quota_is_max_bytes_exceeded(inode, pos + count)) {
1814 err = -EDQUOT;
1815 goto out;
1816 }
1817
1818 down_read(&osdc->lock);
1819 map_flags = osdc->osdmap->flags;
1820 pool_flags = ceph_pg_pool_flags(osdc->osdmap, ci->i_layout.pool_id);
1821 up_read(&osdc->lock);
1822 if ((map_flags & CEPH_OSDMAP_FULL) ||
1823 (pool_flags & CEPH_POOL_FLAG_FULL)) {
1824 err = -ENOSPC;
1825 goto out;
1826 }
1827
1828 err = file_remove_privs(file);
1829 if (err)
1830 goto out;
1831
1832 dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n",
1833 inode, ceph_vinop(inode), pos, count, i_size_read(inode));
1834 if (!(fi->flags & CEPH_F_SYNC) && !direct_lock)
1835 want |= CEPH_CAP_FILE_BUFFER;
1836 if (fi->fmode & CEPH_FILE_MODE_LAZY)
1837 want |= CEPH_CAP_FILE_LAZYIO;
1838 got = 0;
1839 err = ceph_get_caps(file, CEPH_CAP_FILE_WR, want, pos + count, &got);
1840 if (err < 0)
1841 goto out;
1842
1843 err = file_update_time(file);
1844 if (err)
1845 goto out_caps;
1846
1847 inode_inc_iversion_raw(inode);
1848
1849 dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n",
1850 inode, ceph_vinop(inode), pos, count, ceph_cap_string(got));
1851
1852 if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1853 (iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC) ||
1854 (ci->i_ceph_flags & CEPH_I_ERROR_WRITE)) {
1855 struct ceph_snap_context *snapc;
1856 struct iov_iter data;
1857
1858 spin_lock(&ci->i_ceph_lock);
1859 if (__ceph_have_pending_cap_snap(ci)) {
1860 struct ceph_cap_snap *capsnap =
1861 list_last_entry(&ci->i_cap_snaps,
1862 struct ceph_cap_snap,
1863 ci_item);
1864 snapc = ceph_get_snap_context(capsnap->context);
1865 } else {
1866 BUG_ON(!ci->i_head_snapc);
1867 snapc = ceph_get_snap_context(ci->i_head_snapc);
1868 }
1869 spin_unlock(&ci->i_ceph_lock);
1870
1871 /* we might need to revert back to that point */
1872 data = *from;
1873 if (iocb->ki_flags & IOCB_DIRECT)
1874 written = ceph_direct_read_write(iocb, &data, snapc,
1875 &prealloc_cf);
1876 else
1877 written = ceph_sync_write(iocb, &data, pos, snapc);
1878 if (direct_lock)
1879 ceph_end_io_direct(inode);
1880 else
1881 ceph_end_io_write(inode);
1882 if (written > 0)
1883 iov_iter_advance(from, written);
1884 ceph_put_snap_context(snapc);
1885 } else {
1886 /*
1887 * No need to acquire the i_truncate_mutex. Because
1888 * the MDS revokes Fwb caps before sending truncate
1889 * message to us. We can't get Fwb cap while there
1890 * are pending vmtruncate. So write and vmtruncate
1891 * can not run at the same time
1892 */
1893 written = generic_perform_write(iocb, from);
1894 if (likely(written >= 0))
1895 iocb->ki_pos = pos + written;
1896 ceph_end_io_write(inode);
1897 }
1898
1899 if (written >= 0) {
1900 int dirty;
1901
1902 spin_lock(&ci->i_ceph_lock);
1903 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1904 &prealloc_cf);
1905 spin_unlock(&ci->i_ceph_lock);
1906 if (dirty)
1907 __mark_inode_dirty(inode, dirty);
1908 if (ceph_quota_is_max_bytes_approaching(inode, iocb->ki_pos))
1909 ceph_check_caps(ci, CHECK_CAPS_FLUSH, NULL);
1910 }
1911
1912 dout("aio_write %p %llx.%llx %llu~%u dropping cap refs on %s\n",
1913 inode, ceph_vinop(inode), pos, (unsigned)count,
1914 ceph_cap_string(got));
1915 ceph_put_cap_refs(ci, got);
1916
1917 if (written == -EOLDSNAPC) {
1918 dout("aio_write %p %llx.%llx %llu~%u" "got EOLDSNAPC, retrying\n",
1919 inode, ceph_vinop(inode), pos, (unsigned)count);
1920 goto retry_snap;
1921 }
1922
1923 if (written >= 0) {
1924 if ((map_flags & CEPH_OSDMAP_NEARFULL) ||
1925 (pool_flags & CEPH_POOL_FLAG_NEARFULL))
1926 iocb->ki_flags |= IOCB_DSYNC;
1927 written = generic_write_sync(iocb, written);
1928 }
1929
1930 goto out_unlocked;
1931 out_caps:
1932 ceph_put_cap_refs(ci, got);
1933 out:
1934 if (direct_lock)
1935 ceph_end_io_direct(inode);
1936 else
1937 ceph_end_io_write(inode);
1938 out_unlocked:
1939 ceph_free_cap_flush(prealloc_cf);
1940 current->backing_dev_info = NULL;
1941 return written ? written : err;
1942 }
1943
1944 /*
1945 * llseek. be sure to verify file size on SEEK_END.
1946 */
ceph_llseek(struct file * file,loff_t offset,int whence)1947 static loff_t ceph_llseek(struct file *file, loff_t offset, int whence)
1948 {
1949 if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) {
1950 struct inode *inode = file_inode(file);
1951 int ret;
1952
1953 ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1954 if (ret < 0)
1955 return ret;
1956 }
1957 return generic_file_llseek(file, offset, whence);
1958 }
1959
ceph_zero_partial_page(struct inode * inode,loff_t offset,unsigned size)1960 static inline void ceph_zero_partial_page(
1961 struct inode *inode, loff_t offset, unsigned size)
1962 {
1963 struct page *page;
1964 pgoff_t index = offset >> PAGE_SHIFT;
1965
1966 page = find_lock_page(inode->i_mapping, index);
1967 if (page) {
1968 wait_on_page_writeback(page);
1969 zero_user(page, offset & (PAGE_SIZE - 1), size);
1970 unlock_page(page);
1971 put_page(page);
1972 }
1973 }
1974
ceph_zero_pagecache_range(struct inode * inode,loff_t offset,loff_t length)1975 static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset,
1976 loff_t length)
1977 {
1978 loff_t nearly = round_up(offset, PAGE_SIZE);
1979 if (offset < nearly) {
1980 loff_t size = nearly - offset;
1981 if (length < size)
1982 size = length;
1983 ceph_zero_partial_page(inode, offset, size);
1984 offset += size;
1985 length -= size;
1986 }
1987 if (length >= PAGE_SIZE) {
1988 loff_t size = round_down(length, PAGE_SIZE);
1989 truncate_pagecache_range(inode, offset, offset + size - 1);
1990 offset += size;
1991 length -= size;
1992 }
1993 if (length)
1994 ceph_zero_partial_page(inode, offset, length);
1995 }
1996
ceph_zero_partial_object(struct inode * inode,loff_t offset,loff_t * length)1997 static int ceph_zero_partial_object(struct inode *inode,
1998 loff_t offset, loff_t *length)
1999 {
2000 struct ceph_inode_info *ci = ceph_inode(inode);
2001 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
2002 struct ceph_osd_request *req;
2003 int ret = 0;
2004 loff_t zero = 0;
2005 int op;
2006
2007 if (!length) {
2008 op = offset ? CEPH_OSD_OP_DELETE : CEPH_OSD_OP_TRUNCATE;
2009 length = &zero;
2010 } else {
2011 op = CEPH_OSD_OP_ZERO;
2012 }
2013
2014 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
2015 ceph_vino(inode),
2016 offset, length,
2017 0, 1, op,
2018 CEPH_OSD_FLAG_WRITE,
2019 NULL, 0, 0, false);
2020 if (IS_ERR(req)) {
2021 ret = PTR_ERR(req);
2022 goto out;
2023 }
2024
2025 req->r_mtime = inode->i_mtime;
2026 ceph_osdc_start_request(&fsc->client->osdc, req);
2027 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
2028 if (ret == -ENOENT)
2029 ret = 0;
2030 ceph_osdc_put_request(req);
2031
2032 out:
2033 return ret;
2034 }
2035
ceph_zero_objects(struct inode * inode,loff_t offset,loff_t length)2036 static int ceph_zero_objects(struct inode *inode, loff_t offset, loff_t length)
2037 {
2038 int ret = 0;
2039 struct ceph_inode_info *ci = ceph_inode(inode);
2040 s32 stripe_unit = ci->i_layout.stripe_unit;
2041 s32 stripe_count = ci->i_layout.stripe_count;
2042 s32 object_size = ci->i_layout.object_size;
2043 u64 object_set_size = object_size * stripe_count;
2044 u64 nearly, t;
2045
2046 /* round offset up to next period boundary */
2047 nearly = offset + object_set_size - 1;
2048 t = nearly;
2049 nearly -= do_div(t, object_set_size);
2050
2051 while (length && offset < nearly) {
2052 loff_t size = length;
2053 ret = ceph_zero_partial_object(inode, offset, &size);
2054 if (ret < 0)
2055 return ret;
2056 offset += size;
2057 length -= size;
2058 }
2059 while (length >= object_set_size) {
2060 int i;
2061 loff_t pos = offset;
2062 for (i = 0; i < stripe_count; ++i) {
2063 ret = ceph_zero_partial_object(inode, pos, NULL);
2064 if (ret < 0)
2065 return ret;
2066 pos += stripe_unit;
2067 }
2068 offset += object_set_size;
2069 length -= object_set_size;
2070 }
2071 while (length) {
2072 loff_t size = length;
2073 ret = ceph_zero_partial_object(inode, offset, &size);
2074 if (ret < 0)
2075 return ret;
2076 offset += size;
2077 length -= size;
2078 }
2079 return ret;
2080 }
2081
ceph_fallocate(struct file * file,int mode,loff_t offset,loff_t length)2082 static long ceph_fallocate(struct file *file, int mode,
2083 loff_t offset, loff_t length)
2084 {
2085 struct ceph_file_info *fi = file->private_data;
2086 struct inode *inode = file_inode(file);
2087 struct ceph_inode_info *ci = ceph_inode(inode);
2088 struct ceph_cap_flush *prealloc_cf;
2089 int want, got = 0;
2090 int dirty;
2091 int ret = 0;
2092 loff_t endoff = 0;
2093 loff_t size;
2094
2095 if (mode != (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2096 return -EOPNOTSUPP;
2097
2098 if (!S_ISREG(inode->i_mode))
2099 return -EOPNOTSUPP;
2100
2101 prealloc_cf = ceph_alloc_cap_flush();
2102 if (!prealloc_cf)
2103 return -ENOMEM;
2104
2105 inode_lock(inode);
2106
2107 if (ceph_snap(inode) != CEPH_NOSNAP) {
2108 ret = -EROFS;
2109 goto unlock;
2110 }
2111
2112 size = i_size_read(inode);
2113
2114 /* Are we punching a hole beyond EOF? */
2115 if (offset >= size)
2116 goto unlock;
2117 if ((offset + length) > size)
2118 length = size - offset;
2119
2120 if (fi->fmode & CEPH_FILE_MODE_LAZY)
2121 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
2122 else
2123 want = CEPH_CAP_FILE_BUFFER;
2124
2125 ret = ceph_get_caps(file, CEPH_CAP_FILE_WR, want, endoff, &got);
2126 if (ret < 0)
2127 goto unlock;
2128
2129 filemap_invalidate_lock(inode->i_mapping);
2130 ceph_fscache_invalidate(inode, false);
2131 ceph_zero_pagecache_range(inode, offset, length);
2132 ret = ceph_zero_objects(inode, offset, length);
2133
2134 if (!ret) {
2135 spin_lock(&ci->i_ceph_lock);
2136 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
2137 &prealloc_cf);
2138 spin_unlock(&ci->i_ceph_lock);
2139 if (dirty)
2140 __mark_inode_dirty(inode, dirty);
2141 }
2142 filemap_invalidate_unlock(inode->i_mapping);
2143
2144 ceph_put_cap_refs(ci, got);
2145 unlock:
2146 inode_unlock(inode);
2147 ceph_free_cap_flush(prealloc_cf);
2148 return ret;
2149 }
2150
2151 /*
2152 * This function tries to get FILE_WR capabilities for dst_ci and FILE_RD for
2153 * src_ci. Two attempts are made to obtain both caps, and an error is return if
2154 * this fails; zero is returned on success.
2155 */
get_rd_wr_caps(struct file * src_filp,int * src_got,struct file * dst_filp,loff_t dst_endoff,int * dst_got)2156 static int get_rd_wr_caps(struct file *src_filp, int *src_got,
2157 struct file *dst_filp,
2158 loff_t dst_endoff, int *dst_got)
2159 {
2160 int ret = 0;
2161 bool retrying = false;
2162
2163 retry_caps:
2164 ret = ceph_get_caps(dst_filp, CEPH_CAP_FILE_WR, CEPH_CAP_FILE_BUFFER,
2165 dst_endoff, dst_got);
2166 if (ret < 0)
2167 return ret;
2168
2169 /*
2170 * Since we're already holding the FILE_WR capability for the dst file,
2171 * we would risk a deadlock by using ceph_get_caps. Thus, we'll do some
2172 * retry dance instead to try to get both capabilities.
2173 */
2174 ret = ceph_try_get_caps(file_inode(src_filp),
2175 CEPH_CAP_FILE_RD, CEPH_CAP_FILE_SHARED,
2176 false, src_got);
2177 if (ret <= 0) {
2178 /* Start by dropping dst_ci caps and getting src_ci caps */
2179 ceph_put_cap_refs(ceph_inode(file_inode(dst_filp)), *dst_got);
2180 if (retrying) {
2181 if (!ret)
2182 /* ceph_try_get_caps masks EAGAIN */
2183 ret = -EAGAIN;
2184 return ret;
2185 }
2186 ret = ceph_get_caps(src_filp, CEPH_CAP_FILE_RD,
2187 CEPH_CAP_FILE_SHARED, -1, src_got);
2188 if (ret < 0)
2189 return ret;
2190 /*... drop src_ci caps too, and retry */
2191 ceph_put_cap_refs(ceph_inode(file_inode(src_filp)), *src_got);
2192 retrying = true;
2193 goto retry_caps;
2194 }
2195 return ret;
2196 }
2197
put_rd_wr_caps(struct ceph_inode_info * src_ci,int src_got,struct ceph_inode_info * dst_ci,int dst_got)2198 static void put_rd_wr_caps(struct ceph_inode_info *src_ci, int src_got,
2199 struct ceph_inode_info *dst_ci, int dst_got)
2200 {
2201 ceph_put_cap_refs(src_ci, src_got);
2202 ceph_put_cap_refs(dst_ci, dst_got);
2203 }
2204
2205 /*
2206 * This function does several size-related checks, returning an error if:
2207 * - source file is smaller than off+len
2208 * - destination file size is not OK (inode_newsize_ok())
2209 * - max bytes quotas is exceeded
2210 */
is_file_size_ok(struct inode * src_inode,struct inode * dst_inode,loff_t src_off,loff_t dst_off,size_t len)2211 static int is_file_size_ok(struct inode *src_inode, struct inode *dst_inode,
2212 loff_t src_off, loff_t dst_off, size_t len)
2213 {
2214 loff_t size, endoff;
2215
2216 size = i_size_read(src_inode);
2217 /*
2218 * Don't copy beyond source file EOF. Instead of simply setting length
2219 * to (size - src_off), just drop to VFS default implementation, as the
2220 * local i_size may be stale due to other clients writing to the source
2221 * inode.
2222 */
2223 if (src_off + len > size) {
2224 dout("Copy beyond EOF (%llu + %zu > %llu)\n",
2225 src_off, len, size);
2226 return -EOPNOTSUPP;
2227 }
2228 size = i_size_read(dst_inode);
2229
2230 endoff = dst_off + len;
2231 if (inode_newsize_ok(dst_inode, endoff))
2232 return -EOPNOTSUPP;
2233
2234 if (ceph_quota_is_max_bytes_exceeded(dst_inode, endoff))
2235 return -EDQUOT;
2236
2237 return 0;
2238 }
2239
2240 static struct ceph_osd_request *
ceph_alloc_copyfrom_request(struct ceph_osd_client * osdc,u64 src_snapid,struct ceph_object_id * src_oid,struct ceph_object_locator * src_oloc,struct ceph_object_id * dst_oid,struct ceph_object_locator * dst_oloc,u32 truncate_seq,u64 truncate_size)2241 ceph_alloc_copyfrom_request(struct ceph_osd_client *osdc,
2242 u64 src_snapid,
2243 struct ceph_object_id *src_oid,
2244 struct ceph_object_locator *src_oloc,
2245 struct ceph_object_id *dst_oid,
2246 struct ceph_object_locator *dst_oloc,
2247 u32 truncate_seq, u64 truncate_size)
2248 {
2249 struct ceph_osd_request *req;
2250 int ret;
2251 u32 src_fadvise_flags =
2252 CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
2253 CEPH_OSD_OP_FLAG_FADVISE_NOCACHE;
2254 u32 dst_fadvise_flags =
2255 CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
2256 CEPH_OSD_OP_FLAG_FADVISE_DONTNEED;
2257
2258 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_KERNEL);
2259 if (!req)
2260 return ERR_PTR(-ENOMEM);
2261
2262 req->r_flags = CEPH_OSD_FLAG_WRITE;
2263
2264 ceph_oloc_copy(&req->r_t.base_oloc, dst_oloc);
2265 ceph_oid_copy(&req->r_t.base_oid, dst_oid);
2266
2267 ret = osd_req_op_copy_from_init(req, src_snapid, 0,
2268 src_oid, src_oloc,
2269 src_fadvise_flags,
2270 dst_fadvise_flags,
2271 truncate_seq,
2272 truncate_size,
2273 CEPH_OSD_COPY_FROM_FLAG_TRUNCATE_SEQ);
2274 if (ret)
2275 goto out;
2276
2277 ret = ceph_osdc_alloc_messages(req, GFP_KERNEL);
2278 if (ret)
2279 goto out;
2280
2281 return req;
2282
2283 out:
2284 ceph_osdc_put_request(req);
2285 return ERR_PTR(ret);
2286 }
2287
ceph_do_objects_copy(struct ceph_inode_info * src_ci,u64 * src_off,struct ceph_inode_info * dst_ci,u64 * dst_off,struct ceph_fs_client * fsc,size_t len,unsigned int flags)2288 static ssize_t ceph_do_objects_copy(struct ceph_inode_info *src_ci, u64 *src_off,
2289 struct ceph_inode_info *dst_ci, u64 *dst_off,
2290 struct ceph_fs_client *fsc,
2291 size_t len, unsigned int flags)
2292 {
2293 struct ceph_object_locator src_oloc, dst_oloc;
2294 struct ceph_object_id src_oid, dst_oid;
2295 struct ceph_osd_client *osdc;
2296 struct ceph_osd_request *req;
2297 size_t bytes = 0;
2298 u64 src_objnum, src_objoff, dst_objnum, dst_objoff;
2299 u32 src_objlen, dst_objlen;
2300 u32 object_size = src_ci->i_layout.object_size;
2301 int ret;
2302
2303 src_oloc.pool = src_ci->i_layout.pool_id;
2304 src_oloc.pool_ns = ceph_try_get_string(src_ci->i_layout.pool_ns);
2305 dst_oloc.pool = dst_ci->i_layout.pool_id;
2306 dst_oloc.pool_ns = ceph_try_get_string(dst_ci->i_layout.pool_ns);
2307 osdc = &fsc->client->osdc;
2308
2309 while (len >= object_size) {
2310 ceph_calc_file_object_mapping(&src_ci->i_layout, *src_off,
2311 object_size, &src_objnum,
2312 &src_objoff, &src_objlen);
2313 ceph_calc_file_object_mapping(&dst_ci->i_layout, *dst_off,
2314 object_size, &dst_objnum,
2315 &dst_objoff, &dst_objlen);
2316 ceph_oid_init(&src_oid);
2317 ceph_oid_printf(&src_oid, "%llx.%08llx",
2318 src_ci->i_vino.ino, src_objnum);
2319 ceph_oid_init(&dst_oid);
2320 ceph_oid_printf(&dst_oid, "%llx.%08llx",
2321 dst_ci->i_vino.ino, dst_objnum);
2322 /* Do an object remote copy */
2323 req = ceph_alloc_copyfrom_request(osdc, src_ci->i_vino.snap,
2324 &src_oid, &src_oloc,
2325 &dst_oid, &dst_oloc,
2326 dst_ci->i_truncate_seq,
2327 dst_ci->i_truncate_size);
2328 if (IS_ERR(req))
2329 ret = PTR_ERR(req);
2330 else {
2331 ceph_osdc_start_request(osdc, req);
2332 ret = ceph_osdc_wait_request(osdc, req);
2333 ceph_update_copyfrom_metrics(&fsc->mdsc->metric,
2334 req->r_start_latency,
2335 req->r_end_latency,
2336 object_size, ret);
2337 ceph_osdc_put_request(req);
2338 }
2339 if (ret) {
2340 if (ret == -EOPNOTSUPP) {
2341 fsc->have_copy_from2 = false;
2342 pr_notice("OSDs don't support copy-from2; disabling copy offload\n");
2343 }
2344 dout("ceph_osdc_copy_from returned %d\n", ret);
2345 if (!bytes)
2346 bytes = ret;
2347 goto out;
2348 }
2349 len -= object_size;
2350 bytes += object_size;
2351 *src_off += object_size;
2352 *dst_off += object_size;
2353 }
2354
2355 out:
2356 ceph_oloc_destroy(&src_oloc);
2357 ceph_oloc_destroy(&dst_oloc);
2358 return bytes;
2359 }
2360
__ceph_copy_file_range(struct file * src_file,loff_t src_off,struct file * dst_file,loff_t dst_off,size_t len,unsigned int flags)2361 static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
2362 struct file *dst_file, loff_t dst_off,
2363 size_t len, unsigned int flags)
2364 {
2365 struct inode *src_inode = file_inode(src_file);
2366 struct inode *dst_inode = file_inode(dst_file);
2367 struct ceph_inode_info *src_ci = ceph_inode(src_inode);
2368 struct ceph_inode_info *dst_ci = ceph_inode(dst_inode);
2369 struct ceph_cap_flush *prealloc_cf;
2370 struct ceph_fs_client *src_fsc = ceph_inode_to_client(src_inode);
2371 loff_t size;
2372 ssize_t ret = -EIO, bytes;
2373 u64 src_objnum, dst_objnum, src_objoff, dst_objoff;
2374 u32 src_objlen, dst_objlen;
2375 int src_got = 0, dst_got = 0, err, dirty;
2376
2377 if (src_inode->i_sb != dst_inode->i_sb) {
2378 struct ceph_fs_client *dst_fsc = ceph_inode_to_client(dst_inode);
2379
2380 if (ceph_fsid_compare(&src_fsc->client->fsid,
2381 &dst_fsc->client->fsid)) {
2382 dout("Copying files across clusters: src: %pU dst: %pU\n",
2383 &src_fsc->client->fsid, &dst_fsc->client->fsid);
2384 return -EXDEV;
2385 }
2386 }
2387 if (ceph_snap(dst_inode) != CEPH_NOSNAP)
2388 return -EROFS;
2389
2390 /*
2391 * Some of the checks below will return -EOPNOTSUPP, which will force a
2392 * fallback to the default VFS copy_file_range implementation. This is
2393 * desirable in several cases (for ex, the 'len' is smaller than the
2394 * size of the objects, or in cases where that would be more
2395 * efficient).
2396 */
2397
2398 if (ceph_test_mount_opt(src_fsc, NOCOPYFROM))
2399 return -EOPNOTSUPP;
2400
2401 if (!src_fsc->have_copy_from2)
2402 return -EOPNOTSUPP;
2403
2404 /*
2405 * Striped file layouts require that we copy partial objects, but the
2406 * OSD copy-from operation only supports full-object copies. Limit
2407 * this to non-striped file layouts for now.
2408 */
2409 if ((src_ci->i_layout.stripe_unit != dst_ci->i_layout.stripe_unit) ||
2410 (src_ci->i_layout.stripe_count != 1) ||
2411 (dst_ci->i_layout.stripe_count != 1) ||
2412 (src_ci->i_layout.object_size != dst_ci->i_layout.object_size)) {
2413 dout("Invalid src/dst files layout\n");
2414 return -EOPNOTSUPP;
2415 }
2416
2417 if (len < src_ci->i_layout.object_size)
2418 return -EOPNOTSUPP; /* no remote copy will be done */
2419
2420 prealloc_cf = ceph_alloc_cap_flush();
2421 if (!prealloc_cf)
2422 return -ENOMEM;
2423
2424 /* Start by sync'ing the source and destination files */
2425 ret = file_write_and_wait_range(src_file, src_off, (src_off + len));
2426 if (ret < 0) {
2427 dout("failed to write src file (%zd)\n", ret);
2428 goto out;
2429 }
2430 ret = file_write_and_wait_range(dst_file, dst_off, (dst_off + len));
2431 if (ret < 0) {
2432 dout("failed to write dst file (%zd)\n", ret);
2433 goto out;
2434 }
2435
2436 /*
2437 * We need FILE_WR caps for dst_ci and FILE_RD for src_ci as other
2438 * clients may have dirty data in their caches. And OSDs know nothing
2439 * about caps, so they can't safely do the remote object copies.
2440 */
2441 err = get_rd_wr_caps(src_file, &src_got,
2442 dst_file, (dst_off + len), &dst_got);
2443 if (err < 0) {
2444 dout("get_rd_wr_caps returned %d\n", err);
2445 ret = -EOPNOTSUPP;
2446 goto out;
2447 }
2448
2449 ret = is_file_size_ok(src_inode, dst_inode, src_off, dst_off, len);
2450 if (ret < 0)
2451 goto out_caps;
2452
2453 /* Drop dst file cached pages */
2454 ceph_fscache_invalidate(dst_inode, false);
2455 ret = invalidate_inode_pages2_range(dst_inode->i_mapping,
2456 dst_off >> PAGE_SHIFT,
2457 (dst_off + len) >> PAGE_SHIFT);
2458 if (ret < 0) {
2459 dout("Failed to invalidate inode pages (%zd)\n", ret);
2460 ret = 0; /* XXX */
2461 }
2462 ceph_calc_file_object_mapping(&src_ci->i_layout, src_off,
2463 src_ci->i_layout.object_size,
2464 &src_objnum, &src_objoff, &src_objlen);
2465 ceph_calc_file_object_mapping(&dst_ci->i_layout, dst_off,
2466 dst_ci->i_layout.object_size,
2467 &dst_objnum, &dst_objoff, &dst_objlen);
2468 /* object-level offsets need to the same */
2469 if (src_objoff != dst_objoff) {
2470 ret = -EOPNOTSUPP;
2471 goto out_caps;
2472 }
2473
2474 /*
2475 * Do a manual copy if the object offset isn't object aligned.
2476 * 'src_objlen' contains the bytes left until the end of the object,
2477 * starting at the src_off
2478 */
2479 if (src_objoff) {
2480 dout("Initial partial copy of %u bytes\n", src_objlen);
2481
2482 /*
2483 * we need to temporarily drop all caps as we'll be calling
2484 * {read,write}_iter, which will get caps again.
2485 */
2486 put_rd_wr_caps(src_ci, src_got, dst_ci, dst_got);
2487 ret = do_splice_direct(src_file, &src_off, dst_file,
2488 &dst_off, src_objlen, flags);
2489 /* Abort on short copies or on error */
2490 if (ret < src_objlen) {
2491 dout("Failed partial copy (%zd)\n", ret);
2492 goto out;
2493 }
2494 len -= ret;
2495 err = get_rd_wr_caps(src_file, &src_got,
2496 dst_file, (dst_off + len), &dst_got);
2497 if (err < 0)
2498 goto out;
2499 err = is_file_size_ok(src_inode, dst_inode,
2500 src_off, dst_off, len);
2501 if (err < 0)
2502 goto out_caps;
2503 }
2504
2505 size = i_size_read(dst_inode);
2506 bytes = ceph_do_objects_copy(src_ci, &src_off, dst_ci, &dst_off,
2507 src_fsc, len, flags);
2508 if (bytes <= 0) {
2509 if (!ret)
2510 ret = bytes;
2511 goto out_caps;
2512 }
2513 dout("Copied %zu bytes out of %zu\n", bytes, len);
2514 len -= bytes;
2515 ret += bytes;
2516
2517 file_update_time(dst_file);
2518 inode_inc_iversion_raw(dst_inode);
2519
2520 if (dst_off > size) {
2521 /* Let the MDS know about dst file size change */
2522 if (ceph_inode_set_size(dst_inode, dst_off) ||
2523 ceph_quota_is_max_bytes_approaching(dst_inode, dst_off))
2524 ceph_check_caps(dst_ci, CHECK_CAPS_AUTHONLY | CHECK_CAPS_FLUSH,
2525 NULL);
2526 }
2527 /* Mark Fw dirty */
2528 spin_lock(&dst_ci->i_ceph_lock);
2529 dirty = __ceph_mark_dirty_caps(dst_ci, CEPH_CAP_FILE_WR, &prealloc_cf);
2530 spin_unlock(&dst_ci->i_ceph_lock);
2531 if (dirty)
2532 __mark_inode_dirty(dst_inode, dirty);
2533
2534 out_caps:
2535 put_rd_wr_caps(src_ci, src_got, dst_ci, dst_got);
2536
2537 /*
2538 * Do the final manual copy if we still have some bytes left, unless
2539 * there were errors in remote object copies (len >= object_size).
2540 */
2541 if (len && (len < src_ci->i_layout.object_size)) {
2542 dout("Final partial copy of %zu bytes\n", len);
2543 bytes = do_splice_direct(src_file, &src_off, dst_file,
2544 &dst_off, len, flags);
2545 if (bytes > 0)
2546 ret += bytes;
2547 else
2548 dout("Failed partial copy (%zd)\n", bytes);
2549 }
2550
2551 out:
2552 ceph_free_cap_flush(prealloc_cf);
2553
2554 return ret;
2555 }
2556
ceph_copy_file_range(struct file * src_file,loff_t src_off,struct file * dst_file,loff_t dst_off,size_t len,unsigned int flags)2557 static ssize_t ceph_copy_file_range(struct file *src_file, loff_t src_off,
2558 struct file *dst_file, loff_t dst_off,
2559 size_t len, unsigned int flags)
2560 {
2561 ssize_t ret;
2562
2563 ret = __ceph_copy_file_range(src_file, src_off, dst_file, dst_off,
2564 len, flags);
2565
2566 if (ret == -EOPNOTSUPP || ret == -EXDEV)
2567 ret = generic_copy_file_range(src_file, src_off, dst_file,
2568 dst_off, len, flags);
2569 return ret;
2570 }
2571
2572 const struct file_operations ceph_file_fops = {
2573 .open = ceph_open,
2574 .release = ceph_release,
2575 .llseek = ceph_llseek,
2576 .read_iter = ceph_read_iter,
2577 .write_iter = ceph_write_iter,
2578 .mmap = ceph_mmap,
2579 .fsync = ceph_fsync,
2580 .lock = ceph_lock,
2581 .setlease = simple_nosetlease,
2582 .flock = ceph_flock,
2583 .splice_read = generic_file_splice_read,
2584 .splice_write = iter_file_splice_write,
2585 .unlocked_ioctl = ceph_ioctl,
2586 .compat_ioctl = compat_ptr_ioctl,
2587 .fallocate = ceph_fallocate,
2588 .copy_file_range = ceph_copy_file_range,
2589 };
2590