1 /*
2 * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it would be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
11 *
12 * Further, this software is distributed without any warranty that it is
13 * free of the rightful claim of any third person regarding infringement
14 * or the like. Any license provided herein, whether implied or
15 * otherwise, applies only to this software file. Patent licenses, if
16 * any, provided herein do not apply to combinations of this program with
17 * other software, or any other product whatsoever.
18 *
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write the Free Software Foundation, Inc., 59
21 * Temple Place - Suite 330, Boston MA 02111-1307, USA.
22 *
23 * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
24 * Mountain View, CA 94043, or:
25 *
26 * http://www.sgi.com
27 *
28 * For further information regarding this notice, see:
29 *
30 * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
31 */
32
33 #include "xfs.h"
34 #include "xfs_fs.h"
35 #include "xfs_inum.h"
36 #include "xfs_log.h"
37 #include "xfs_trans.h"
38 #include "xfs_sb.h"
39 #include "xfs_ag.h"
40 #include "xfs_dir.h"
41 #include "xfs_dir2.h"
42 #include "xfs_alloc.h"
43 #include "xfs_dmapi.h"
44 #include "xfs_quota.h"
45 #include "xfs_mount.h"
46 #include "xfs_alloc_btree.h"
47 #include "xfs_bmap_btree.h"
48 #include "xfs_ialloc_btree.h"
49 #include "xfs_btree.h"
50 #include "xfs_ialloc.h"
51 #include "xfs_attr_sf.h"
52 #include "xfs_dir_sf.h"
53 #include "xfs_dir2_sf.h"
54 #include "xfs_dinode.h"
55 #include "xfs_inode.h"
56 #include "xfs_bmap.h"
57 #include "xfs_bit.h"
58 #include "xfs_rtalloc.h"
59 #include "xfs_error.h"
60 #include "xfs_itable.h"
61 #include "xfs_rw.h"
62 #include "xfs_acl.h"
63 #include "xfs_cap.h"
64 #include "xfs_mac.h"
65 #include "xfs_attr.h"
66 #include "xfs_buf_item.h"
67 #include "xfs_trans_priv.h"
68
69 #include "xfs_qm.h"
70
71
72 /*
73 * returns the number of iovecs needed to log the given dquot item.
74 */
75 /* ARGSUSED */
76 STATIC uint
xfs_qm_dquot_logitem_size(xfs_dq_logitem_t * logitem)77 xfs_qm_dquot_logitem_size(
78 xfs_dq_logitem_t *logitem)
79 {
80 /*
81 * we need only two iovecs, one for the format, one for the real thing
82 */
83 return (2);
84 }
85
86 /*
87 * fills in the vector of log iovecs for the given dquot log item.
88 */
89 STATIC void
xfs_qm_dquot_logitem_format(xfs_dq_logitem_t * logitem,xfs_log_iovec_t * logvec)90 xfs_qm_dquot_logitem_format(
91 xfs_dq_logitem_t *logitem,
92 xfs_log_iovec_t *logvec)
93 {
94 ASSERT(logitem);
95 ASSERT(logitem->qli_dquot);
96
97 logvec->i_addr = (xfs_caddr_t)&logitem->qli_format;
98 logvec->i_len = sizeof(xfs_dq_logformat_t);
99 logvec++;
100 logvec->i_addr = (xfs_caddr_t)&logitem->qli_dquot->q_core;
101 logvec->i_len = sizeof(xfs_disk_dquot_t);
102
103 ASSERT(2 == logitem->qli_item.li_desc->lid_size);
104 logitem->qli_format.qlf_size = 2;
105
106 }
107
108 /*
109 * Increment the pin count of the given dquot.
110 * This value is protected by pinlock spinlock in the xQM structure.
111 */
112 STATIC void
xfs_qm_dquot_logitem_pin(xfs_dq_logitem_t * logitem)113 xfs_qm_dquot_logitem_pin(
114 xfs_dq_logitem_t *logitem)
115 {
116 unsigned long s;
117 xfs_dquot_t *dqp;
118
119 dqp = logitem->qli_dquot;
120 ASSERT(XFS_DQ_IS_LOCKED(dqp));
121 s = XFS_DQ_PINLOCK(dqp);
122 dqp->q_pincount++;
123 XFS_DQ_PINUNLOCK(dqp, s);
124 }
125
126 /*
127 * Decrement the pin count of the given dquot, and wake up
128 * anyone in xfs_dqwait_unpin() if the count goes to 0. The
129 * dquot must have been previously pinned with a call to xfs_dqpin().
130 */
131 /* ARGSUSED */
132 STATIC void
xfs_qm_dquot_logitem_unpin(xfs_dq_logitem_t * logitem,int stale)133 xfs_qm_dquot_logitem_unpin(
134 xfs_dq_logitem_t *logitem,
135 int stale)
136 {
137 unsigned long s;
138 xfs_dquot_t *dqp;
139
140 dqp = logitem->qli_dquot;
141 ASSERT(dqp->q_pincount > 0);
142 s = XFS_DQ_PINLOCK(dqp);
143 dqp->q_pincount--;
144 if (dqp->q_pincount == 0) {
145 sv_broadcast(&dqp->q_pinwait);
146 }
147 XFS_DQ_PINUNLOCK(dqp, s);
148 }
149
150 /* ARGSUSED */
151 STATIC void
xfs_qm_dquot_logitem_unpin_remove(xfs_dq_logitem_t * logitem,xfs_trans_t * tp)152 xfs_qm_dquot_logitem_unpin_remove(
153 xfs_dq_logitem_t *logitem,
154 xfs_trans_t *tp)
155 {
156 xfs_qm_dquot_logitem_unpin(logitem, 0);
157 }
158
159 /*
160 * Given the logitem, this writes the corresponding dquot entry to disk
161 * asynchronously. This is called with the dquot entry securely locked;
162 * we simply get xfs_qm_dqflush() to do the work, and unlock the dquot
163 * at the end.
164 */
165 STATIC void
xfs_qm_dquot_logitem_push(xfs_dq_logitem_t * logitem)166 xfs_qm_dquot_logitem_push(
167 xfs_dq_logitem_t *logitem)
168 {
169 xfs_dquot_t *dqp;
170
171 dqp = logitem->qli_dquot;
172
173 ASSERT(XFS_DQ_IS_LOCKED(dqp));
174 ASSERT(XFS_DQ_IS_FLUSH_LOCKED(dqp));
175
176 /*
177 * Since we were able to lock the dquot's flush lock and
178 * we found it on the AIL, the dquot must be dirty. This
179 * is because the dquot is removed from the AIL while still
180 * holding the flush lock in xfs_dqflush_done(). Thus, if
181 * we found it in the AIL and were able to obtain the flush
182 * lock without sleeping, then there must not have been
183 * anyone in the process of flushing the dquot.
184 */
185 xfs_qm_dqflush(dqp, XFS_B_DELWRI);
186 xfs_dqunlock(dqp);
187 }
188
189 /*ARGSUSED*/
190 STATIC xfs_lsn_t
xfs_qm_dquot_logitem_committed(xfs_dq_logitem_t * l,xfs_lsn_t lsn)191 xfs_qm_dquot_logitem_committed(
192 xfs_dq_logitem_t *l,
193 xfs_lsn_t lsn)
194 {
195 /*
196 * We always re-log the entire dquot when it becomes dirty,
197 * so, the latest copy _is_ the only one that matters.
198 */
199 return (lsn);
200 }
201
202
203 /*
204 * This is called to wait for the given dquot to be unpinned.
205 * Most of these pin/unpin routines are plagiarized from inode code.
206 */
207 void
xfs_qm_dqunpin_wait(xfs_dquot_t * dqp)208 xfs_qm_dqunpin_wait(
209 xfs_dquot_t *dqp)
210 {
211 SPLDECL(s);
212
213 ASSERT(XFS_DQ_IS_LOCKED(dqp));
214 if (dqp->q_pincount == 0) {
215 return;
216 }
217
218 /*
219 * Give the log a push so we don't wait here too long.
220 */
221 xfs_log_force(dqp->q_mount, (xfs_lsn_t)0, XFS_LOG_FORCE);
222 s = XFS_DQ_PINLOCK(dqp);
223 if (dqp->q_pincount == 0) {
224 XFS_DQ_PINUNLOCK(dqp, s);
225 return;
226 }
227 sv_wait(&(dqp->q_pinwait), PINOD,
228 &(XFS_DQ_TO_QINF(dqp)->qi_pinlock), s);
229 }
230
231 /*
232 * This is called when IOP_TRYLOCK returns XFS_ITEM_PUSHBUF to indicate that
233 * the dquot is locked by us, but the flush lock isn't. So, here we are
234 * going to see if the relevant dquot buffer is incore, waiting on DELWRI.
235 * If so, we want to push it out to help us take this item off the AIL as soon
236 * as possible.
237 *
238 * We must not be holding the AIL_LOCK at this point. Calling incore() to
239 * search the buffercache can be a time consuming thing, and AIL_LOCK is a
240 * spinlock.
241 */
242 STATIC void
xfs_qm_dquot_logitem_pushbuf(xfs_dq_logitem_t * qip)243 xfs_qm_dquot_logitem_pushbuf(
244 xfs_dq_logitem_t *qip)
245 {
246 xfs_dquot_t *dqp;
247 xfs_mount_t *mp;
248 xfs_buf_t *bp;
249 uint dopush;
250
251 dqp = qip->qli_dquot;
252 ASSERT(XFS_DQ_IS_LOCKED(dqp));
253
254 /*
255 * The qli_pushbuf_flag keeps others from
256 * trying to duplicate our effort.
257 */
258 ASSERT(qip->qli_pushbuf_flag != 0);
259 ASSERT(qip->qli_push_owner == get_thread_id());
260
261 /*
262 * If flushlock isn't locked anymore, chances are that the
263 * inode flush completed and the inode was taken off the AIL.
264 * So, just get out.
265 */
266 if ((valusema(&(dqp->q_flock)) > 0) ||
267 ((qip->qli_item.li_flags & XFS_LI_IN_AIL) == 0)) {
268 qip->qli_pushbuf_flag = 0;
269 xfs_dqunlock(dqp);
270 return;
271 }
272 mp = dqp->q_mount;
273 bp = xfs_incore(mp->m_ddev_targp, qip->qli_format.qlf_blkno,
274 XFS_QI_DQCHUNKLEN(mp),
275 XFS_INCORE_TRYLOCK);
276 if (bp != NULL) {
277 if (XFS_BUF_ISDELAYWRITE(bp)) {
278 dopush = ((qip->qli_item.li_flags & XFS_LI_IN_AIL) &&
279 (valusema(&(dqp->q_flock)) <= 0));
280 qip->qli_pushbuf_flag = 0;
281 xfs_dqunlock(dqp);
282
283 if (XFS_BUF_ISPINNED(bp)) {
284 xfs_log_force(mp, (xfs_lsn_t)0,
285 XFS_LOG_FORCE);
286 }
287 if (dopush) {
288 #ifdef XFSRACEDEBUG
289 delay_for_intr();
290 delay(300);
291 #endif
292 xfs_bawrite(mp, bp);
293 } else {
294 xfs_buf_relse(bp);
295 }
296 } else {
297 qip->qli_pushbuf_flag = 0;
298 xfs_dqunlock(dqp);
299 xfs_buf_relse(bp);
300 }
301 return;
302 }
303
304 qip->qli_pushbuf_flag = 0;
305 xfs_dqunlock(dqp);
306 }
307
308 /*
309 * This is called to attempt to lock the dquot associated with this
310 * dquot log item. Don't sleep on the dquot lock or the flush lock.
311 * If the flush lock is already held, indicating that the dquot has
312 * been or is in the process of being flushed, then see if we can
313 * find the dquot's buffer in the buffer cache without sleeping. If
314 * we can and it is marked delayed write, then we want to send it out.
315 * We delay doing so until the push routine, though, to avoid sleeping
316 * in any device strategy routines.
317 */
318 STATIC uint
xfs_qm_dquot_logitem_trylock(xfs_dq_logitem_t * qip)319 xfs_qm_dquot_logitem_trylock(
320 xfs_dq_logitem_t *qip)
321 {
322 xfs_dquot_t *dqp;
323 uint retval;
324
325 dqp = qip->qli_dquot;
326 if (dqp->q_pincount > 0)
327 return (XFS_ITEM_PINNED);
328
329 if (! xfs_qm_dqlock_nowait(dqp))
330 return (XFS_ITEM_LOCKED);
331
332 retval = XFS_ITEM_SUCCESS;
333 if (! xfs_qm_dqflock_nowait(dqp)) {
334 /*
335 * The dquot is already being flushed. It may have been
336 * flushed delayed write, however, and we don't want to
337 * get stuck waiting for that to complete. So, we want to check
338 * to see if we can lock the dquot's buffer without sleeping.
339 * If we can and it is marked for delayed write, then we
340 * hold it and send it out from the push routine. We don't
341 * want to do that now since we might sleep in the device
342 * strategy routine. We also don't want to grab the buffer lock
343 * here because we'd like not to call into the buffer cache
344 * while holding the AIL_LOCK.
345 * Make sure to only return PUSHBUF if we set pushbuf_flag
346 * ourselves. If someone else is doing it then we don't
347 * want to go to the push routine and duplicate their efforts.
348 */
349 if (qip->qli_pushbuf_flag == 0) {
350 qip->qli_pushbuf_flag = 1;
351 ASSERT(qip->qli_format.qlf_blkno == dqp->q_blkno);
352 #ifdef DEBUG
353 qip->qli_push_owner = get_thread_id();
354 #endif
355 /*
356 * The dquot is left locked.
357 */
358 retval = XFS_ITEM_PUSHBUF;
359 } else {
360 retval = XFS_ITEM_FLUSHING;
361 xfs_dqunlock_nonotify(dqp);
362 }
363 }
364
365 ASSERT(qip->qli_item.li_flags & XFS_LI_IN_AIL);
366 return (retval);
367 }
368
369
370 /*
371 * Unlock the dquot associated with the log item.
372 * Clear the fields of the dquot and dquot log item that
373 * are specific to the current transaction. If the
374 * hold flags is set, do not unlock the dquot.
375 */
376 STATIC void
xfs_qm_dquot_logitem_unlock(xfs_dq_logitem_t * ql)377 xfs_qm_dquot_logitem_unlock(
378 xfs_dq_logitem_t *ql)
379 {
380 xfs_dquot_t *dqp;
381
382 ASSERT(ql != NULL);
383 dqp = ql->qli_dquot;
384 ASSERT(XFS_DQ_IS_LOCKED(dqp));
385
386 /*
387 * Clear the transaction pointer in the dquot
388 */
389 dqp->q_transp = NULL;
390
391 /*
392 * dquots are never 'held' from getting unlocked at the end of
393 * a transaction. Their locking and unlocking is hidden inside the
394 * transaction layer, within trans_commit. Hence, no LI_HOLD flag
395 * for the logitem.
396 */
397 xfs_dqunlock(dqp);
398 }
399
400
401 /*
402 * The transaction with the dquot locked has aborted. The dquot
403 * must not be dirty within the transaction. We simply unlock just
404 * as if the transaction had been cancelled.
405 */
406 STATIC void
xfs_qm_dquot_logitem_abort(xfs_dq_logitem_t * ql)407 xfs_qm_dquot_logitem_abort(
408 xfs_dq_logitem_t *ql)
409 {
410 xfs_qm_dquot_logitem_unlock(ql);
411 }
412
413 /*
414 * this needs to stamp an lsn into the dquot, I think.
415 * rpc's that look at user dquot's would then have to
416 * push on the dependency recorded in the dquot
417 */
418 /* ARGSUSED */
419 STATIC void
xfs_qm_dquot_logitem_committing(xfs_dq_logitem_t * l,xfs_lsn_t lsn)420 xfs_qm_dquot_logitem_committing(
421 xfs_dq_logitem_t *l,
422 xfs_lsn_t lsn)
423 {
424 return;
425 }
426
427
428 /*
429 * This is the ops vector for dquots
430 */
431 struct xfs_item_ops xfs_dquot_item_ops = {
432 .iop_size = (uint(*)(xfs_log_item_t*))xfs_qm_dquot_logitem_size,
433 .iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*))
434 xfs_qm_dquot_logitem_format,
435 .iop_pin = (void(*)(xfs_log_item_t*))xfs_qm_dquot_logitem_pin,
436 .iop_unpin = (void(*)(xfs_log_item_t*, int))
437 xfs_qm_dquot_logitem_unpin,
438 .iop_unpin_remove = (void(*)(xfs_log_item_t*, xfs_trans_t*))
439 xfs_qm_dquot_logitem_unpin_remove,
440 .iop_trylock = (uint(*)(xfs_log_item_t*))
441 xfs_qm_dquot_logitem_trylock,
442 .iop_unlock = (void(*)(xfs_log_item_t*))xfs_qm_dquot_logitem_unlock,
443 .iop_committed = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t))
444 xfs_qm_dquot_logitem_committed,
445 .iop_push = (void(*)(xfs_log_item_t*))xfs_qm_dquot_logitem_push,
446 .iop_abort = (void(*)(xfs_log_item_t*))xfs_qm_dquot_logitem_abort,
447 .iop_pushbuf = (void(*)(xfs_log_item_t*))
448 xfs_qm_dquot_logitem_pushbuf,
449 .iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t))
450 xfs_qm_dquot_logitem_committing
451 };
452
453 /*
454 * Initialize the dquot log item for a newly allocated dquot.
455 * The dquot isn't locked at this point, but it isn't on any of the lists
456 * either, so we don't care.
457 */
458 void
xfs_qm_dquot_logitem_init(struct xfs_dquot * dqp)459 xfs_qm_dquot_logitem_init(
460 struct xfs_dquot *dqp)
461 {
462 xfs_dq_logitem_t *lp;
463 lp = &dqp->q_logitem;
464
465 lp->qli_item.li_type = XFS_LI_DQUOT;
466 lp->qli_item.li_ops = &xfs_dquot_item_ops;
467 lp->qli_item.li_mountp = dqp->q_mount;
468 lp->qli_dquot = dqp;
469 lp->qli_format.qlf_type = XFS_LI_DQUOT;
470 lp->qli_format.qlf_id = INT_GET(dqp->q_core.d_id, ARCH_CONVERT);
471 lp->qli_format.qlf_blkno = dqp->q_blkno;
472 lp->qli_format.qlf_len = 1;
473 /*
474 * This is just the offset of this dquot within its buffer
475 * (which is currently 1 FSB and probably won't change).
476 * Hence 32 bits for this offset should be just fine.
477 * Alternatively, we can store (bufoffset / sizeof(xfs_dqblk_t))
478 * here, and recompute it at recovery time.
479 */
480 lp->qli_format.qlf_boffset = (__uint32_t)dqp->q_bufoffset;
481 }
482
483 /*------------------ QUOTAOFF LOG ITEMS -------------------*/
484
485 /*
486 * This returns the number of iovecs needed to log the given quotaoff item.
487 * We only need 1 iovec for an quotaoff item. It just logs the
488 * quotaoff_log_format structure.
489 */
490 /*ARGSUSED*/
491 STATIC uint
xfs_qm_qoff_logitem_size(xfs_qoff_logitem_t * qf)492 xfs_qm_qoff_logitem_size(xfs_qoff_logitem_t *qf)
493 {
494 return (1);
495 }
496
497 /*
498 * This is called to fill in the vector of log iovecs for the
499 * given quotaoff log item. We use only 1 iovec, and we point that
500 * at the quotaoff_log_format structure embedded in the quotaoff item.
501 * It is at this point that we assert that all of the extent
502 * slots in the quotaoff item have been filled.
503 */
504 STATIC void
xfs_qm_qoff_logitem_format(xfs_qoff_logitem_t * qf,xfs_log_iovec_t * log_vector)505 xfs_qm_qoff_logitem_format(xfs_qoff_logitem_t *qf,
506 xfs_log_iovec_t *log_vector)
507 {
508 ASSERT(qf->qql_format.qf_type == XFS_LI_QUOTAOFF);
509
510 log_vector->i_addr = (xfs_caddr_t)&(qf->qql_format);
511 log_vector->i_len = sizeof(xfs_qoff_logitem_t);
512 qf->qql_format.qf_size = 1;
513 }
514
515
516 /*
517 * Pinning has no meaning for an quotaoff item, so just return.
518 */
519 /*ARGSUSED*/
520 STATIC void
xfs_qm_qoff_logitem_pin(xfs_qoff_logitem_t * qf)521 xfs_qm_qoff_logitem_pin(xfs_qoff_logitem_t *qf)
522 {
523 return;
524 }
525
526
527 /*
528 * Since pinning has no meaning for an quotaoff item, unpinning does
529 * not either.
530 */
531 /*ARGSUSED*/
532 STATIC void
xfs_qm_qoff_logitem_unpin(xfs_qoff_logitem_t * qf,int stale)533 xfs_qm_qoff_logitem_unpin(xfs_qoff_logitem_t *qf, int stale)
534 {
535 return;
536 }
537
538 /*ARGSUSED*/
539 STATIC void
xfs_qm_qoff_logitem_unpin_remove(xfs_qoff_logitem_t * qf,xfs_trans_t * tp)540 xfs_qm_qoff_logitem_unpin_remove(xfs_qoff_logitem_t *qf, xfs_trans_t *tp)
541 {
542 return;
543 }
544
545 /*
546 * Quotaoff items have no locking, so just return success.
547 */
548 /*ARGSUSED*/
549 STATIC uint
xfs_qm_qoff_logitem_trylock(xfs_qoff_logitem_t * qf)550 xfs_qm_qoff_logitem_trylock(xfs_qoff_logitem_t *qf)
551 {
552 return XFS_ITEM_LOCKED;
553 }
554
555 /*
556 * Quotaoff items have no locking or pushing, so return failure
557 * so that the caller doesn't bother with us.
558 */
559 /*ARGSUSED*/
560 STATIC void
xfs_qm_qoff_logitem_unlock(xfs_qoff_logitem_t * qf)561 xfs_qm_qoff_logitem_unlock(xfs_qoff_logitem_t *qf)
562 {
563 return;
564 }
565
566 /*
567 * The quotaoff-start-item is logged only once and cannot be moved in the log,
568 * so simply return the lsn at which it's been logged.
569 */
570 /*ARGSUSED*/
571 STATIC xfs_lsn_t
xfs_qm_qoff_logitem_committed(xfs_qoff_logitem_t * qf,xfs_lsn_t lsn)572 xfs_qm_qoff_logitem_committed(xfs_qoff_logitem_t *qf, xfs_lsn_t lsn)
573 {
574 return (lsn);
575 }
576
577 /*
578 * The transaction of which this QUOTAOFF is a part has been aborted.
579 * Just clean up after ourselves.
580 * Shouldn't this never happen in the case of qoffend logitems? XXX
581 */
582 STATIC void
xfs_qm_qoff_logitem_abort(xfs_qoff_logitem_t * qf)583 xfs_qm_qoff_logitem_abort(xfs_qoff_logitem_t *qf)
584 {
585 kmem_free(qf, sizeof(xfs_qoff_logitem_t));
586 }
587
588 /*
589 * There isn't much you can do to push on an quotaoff item. It is simply
590 * stuck waiting for the log to be flushed to disk.
591 */
592 /*ARGSUSED*/
593 STATIC void
xfs_qm_qoff_logitem_push(xfs_qoff_logitem_t * qf)594 xfs_qm_qoff_logitem_push(xfs_qoff_logitem_t *qf)
595 {
596 return;
597 }
598
599
600 /*ARGSUSED*/
601 STATIC xfs_lsn_t
xfs_qm_qoffend_logitem_committed(xfs_qoff_logitem_t * qfe,xfs_lsn_t lsn)602 xfs_qm_qoffend_logitem_committed(
603 xfs_qoff_logitem_t *qfe,
604 xfs_lsn_t lsn)
605 {
606 xfs_qoff_logitem_t *qfs;
607 SPLDECL(s);
608
609 qfs = qfe->qql_start_lip;
610 AIL_LOCK(qfs->qql_item.li_mountp,s);
611 /*
612 * Delete the qoff-start logitem from the AIL.
613 * xfs_trans_delete_ail() drops the AIL lock.
614 */
615 xfs_trans_delete_ail(qfs->qql_item.li_mountp, (xfs_log_item_t *)qfs, s);
616 kmem_free(qfs, sizeof(xfs_qoff_logitem_t));
617 kmem_free(qfe, sizeof(xfs_qoff_logitem_t));
618 return (xfs_lsn_t)-1;
619 }
620
621 /*
622 * XXX rcc - don't know quite what to do with this. I think we can
623 * just ignore it. The only time that isn't the case is if we allow
624 * the client to somehow see that quotas have been turned off in which
625 * we can't allow that to get back until the quotaoff hits the disk.
626 * So how would that happen? Also, do we need different routines for
627 * quotaoff start and quotaoff end? I suspect the answer is yes but
628 * to be sure, I need to look at the recovery code and see how quota off
629 * recovery is handled (do we roll forward or back or do something else).
630 * If we roll forwards or backwards, then we need two separate routines,
631 * one that does nothing and one that stamps in the lsn that matters
632 * (truly makes the quotaoff irrevocable). If we do something else,
633 * then maybe we don't need two.
634 */
635 /* ARGSUSED */
636 STATIC void
xfs_qm_qoff_logitem_committing(xfs_qoff_logitem_t * qip,xfs_lsn_t commit_lsn)637 xfs_qm_qoff_logitem_committing(xfs_qoff_logitem_t *qip, xfs_lsn_t commit_lsn)
638 {
639 return;
640 }
641
642 /* ARGSUSED */
643 STATIC void
xfs_qm_qoffend_logitem_committing(xfs_qoff_logitem_t * qip,xfs_lsn_t commit_lsn)644 xfs_qm_qoffend_logitem_committing(xfs_qoff_logitem_t *qip, xfs_lsn_t commit_lsn)
645 {
646 return;
647 }
648
649 struct xfs_item_ops xfs_qm_qoffend_logitem_ops = {
650 .iop_size = (uint(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_size,
651 .iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*))
652 xfs_qm_qoff_logitem_format,
653 .iop_pin = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_pin,
654 .iop_unpin = (void(*)(xfs_log_item_t* ,int))
655 xfs_qm_qoff_logitem_unpin,
656 .iop_unpin_remove = (void(*)(xfs_log_item_t*,xfs_trans_t*))
657 xfs_qm_qoff_logitem_unpin_remove,
658 .iop_trylock = (uint(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_trylock,
659 .iop_unlock = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_unlock,
660 .iop_committed = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t))
661 xfs_qm_qoffend_logitem_committed,
662 .iop_push = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_push,
663 .iop_abort = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_abort,
664 .iop_pushbuf = NULL,
665 .iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t))
666 xfs_qm_qoffend_logitem_committing
667 };
668
669 /*
670 * This is the ops vector shared by all quotaoff-start log items.
671 */
672 struct xfs_item_ops xfs_qm_qoff_logitem_ops = {
673 .iop_size = (uint(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_size,
674 .iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*))
675 xfs_qm_qoff_logitem_format,
676 .iop_pin = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_pin,
677 .iop_unpin = (void(*)(xfs_log_item_t*, int))
678 xfs_qm_qoff_logitem_unpin,
679 .iop_unpin_remove = (void(*)(xfs_log_item_t*,xfs_trans_t*))
680 xfs_qm_qoff_logitem_unpin_remove,
681 .iop_trylock = (uint(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_trylock,
682 .iop_unlock = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_unlock,
683 .iop_committed = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t))
684 xfs_qm_qoff_logitem_committed,
685 .iop_push = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_push,
686 .iop_abort = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_abort,
687 .iop_pushbuf = NULL,
688 .iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t))
689 xfs_qm_qoff_logitem_committing
690 };
691
692 /*
693 * Allocate and initialize an quotaoff item of the correct quota type(s).
694 */
695 xfs_qoff_logitem_t *
xfs_qm_qoff_logitem_init(struct xfs_mount * mp,xfs_qoff_logitem_t * start,uint flags)696 xfs_qm_qoff_logitem_init(
697 struct xfs_mount *mp,
698 xfs_qoff_logitem_t *start,
699 uint flags)
700 {
701 xfs_qoff_logitem_t *qf;
702
703 qf = (xfs_qoff_logitem_t*) kmem_zalloc(sizeof(xfs_qoff_logitem_t), KM_SLEEP);
704
705 qf->qql_item.li_type = XFS_LI_QUOTAOFF;
706 if (start)
707 qf->qql_item.li_ops = &xfs_qm_qoffend_logitem_ops;
708 else
709 qf->qql_item.li_ops = &xfs_qm_qoff_logitem_ops;
710 qf->qql_item.li_mountp = mp;
711 qf->qql_format.qf_type = XFS_LI_QUOTAOFF;
712 qf->qql_format.qf_flags = flags;
713 qf->qql_start_lip = start;
714 return (qf);
715 }
716