1 /*
2 * Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it would be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
11 *
12 * Further, this software is distributed without any warranty that it is
13 * free of the rightful claim of any third person regarding infringement
14 * or the like. Any license provided herein, whether implied or
15 * otherwise, applies only to this software file. Patent licenses, if
16 * any, provided herein do not apply to combinations of this program with
17 * other software, or any other product whatsoever.
18 *
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write the Free Software Foundation, Inc., 59
21 * Temple Place - Suite 330, Boston MA 02111-1307, USA.
22 *
23 * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
24 * Mountain View, CA 94043, or:
25 *
26 * http://www.sgi.com
27 *
28 * For further information regarding this notice, see:
29 *
30 * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
31 */
32
33 /*
34 * Written by Steve Lord, Jim Mostek, Russell Cattelan at SGI
35 */
36
37 #ifndef __XFS_BUF_H__
38 #define __XFS_BUF_H__
39
40 #include <linux/version.h>
41 #include <linux/config.h>
42 #include <linux/list.h>
43 #include <linux/types.h>
44 #include <linux/spinlock.h>
45 #include <asm/system.h>
46 #include <linux/mm.h>
47 #include <linux/fs.h>
48 #include <linux/uio.h>
49
50 /* nptl patch changes where the sigmask_lock is defined */
51 #ifdef CLONE_SIGNAL /* stock */
52 #define sigmask_lock() spin_lock_irq(¤t->sigmask_lock);
53 #define sigmask_unlock() spin_unlock_irq(¤t->sigmask_lock);
54 #define __recalc_sigpending(x) recalc_sigpending(x)
55 #else /* nptl */
56 #define sigmask_lock() spin_lock_irq(¤t->sighand->siglock);
57 #define sigmask_unlock() spin_unlock_irq(¤t->sighand->siglock);
58 #define __recalc_sigpending(x) recalc_sigpending()
59 #endif
60 /*
61 * Base types
62 */
63
64 #define XFS_BUF_DADDR_NULL ((xfs_daddr_t) (-1LL))
65
66 #define page_buf_ctob(pp) ((pp) * PAGE_CACHE_SIZE)
67 #define page_buf_btoc(dd) (((dd) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT)
68 #define page_buf_btoct(dd) ((dd) >> PAGE_CACHE_SHIFT)
69 #define page_buf_poff(aa) ((aa) & ~PAGE_CACHE_MASK)
70
71 typedef enum page_buf_rw_e {
72 PBRW_READ = 1, /* transfer into target memory */
73 PBRW_WRITE = 2, /* transfer from target memory */
74 PBRW_ZERO = 3 /* Zero target memory */
75 } page_buf_rw_t;
76
77
78 typedef enum page_buf_flags_e { /* pb_flags values */
79 PBF_READ = (1 << 0), /* buffer intended for reading from device */
80 PBF_WRITE = (1 << 1), /* buffer intended for writing to device */
81 PBF_MAPPED = (1 << 2), /* buffer mapped (pb_addr valid) */
82 PBF_PARTIAL = (1 << 3), /* buffer partially read */
83 PBF_ASYNC = (1 << 4), /* initiator will not wait for completion */
84 PBF_NONE = (1 << 5), /* buffer not read at all */
85 PBF_DELWRI = (1 << 6), /* buffer has dirty pages */
86 PBF_STALE = (1 << 7), /* buffer has been staled, do not find it */
87 PBF_FS_MANAGED = (1 << 8), /* filesystem controls freeing memory */
88 PBF_FS_DATAIOD = (1 << 9), /* schedule IO completion on fs datad */
89 PBF_FORCEIO = (1 << 10), /* ignore any cache state */
90 PBF_FLUSH = (1 << 11), /* flush disk write cache */
91 PBF_READ_AHEAD = (1 << 12), /* asynchronous read-ahead */
92 PBF_DIRECTIO = (1 << 13), /* used for a direct IO mapping */
93
94 /* flags used only as arguments to access routines */
95 PBF_LOCK = (1 << 14), /* lock requested */
96 PBF_TRYLOCK = (1 << 15), /* lock requested, but do not wait */
97 PBF_DONT_BLOCK = (1 << 16), /* do not block in current thread */
98
99 /* flags used only internally */
100 _PBF_PAGE_CACHE = (1 << 17),/* backed by pagecache */
101 _PBF_KMEM_ALLOC = (1 << 18),/* backed by kmem_alloc() */
102 _PBF_RUN_QUEUES = (1 << 19),/* run block device task queue */
103 _PBF_PRIVATE_BH = (1 << 20),/* do not use public buffer heads */
104 } page_buf_flags_t;
105
106 #define PBF_UPDATE (PBF_READ | PBF_WRITE)
107 #define PBF_NOT_DONE(pb) (((pb)->pb_flags & (PBF_PARTIAL|PBF_NONE)) != 0)
108 #define PBF_DONE(pb) (((pb)->pb_flags & (PBF_PARTIAL|PBF_NONE)) == 0)
109
110 #define PBR_SECTOR_ONLY 1 /* only use sector size buffer heads */
111 #define PBR_ALIGNED_ONLY 2 /* only use aligned I/O */
112
113 typedef struct xfs_buftarg {
114 int pbr_flags;
115 dev_t pbr_dev;
116 kdev_t pbr_kdev;
117 struct block_device *pbr_bdev;
118 struct address_space *pbr_mapping;
119 unsigned int pbr_bsize;
120 unsigned int pbr_sshift;
121 size_t pbr_smask;
122 } xfs_buftarg_t;
123
124 /*
125 * xfs_buf_t: Buffer structure for page cache-based buffers
126 *
127 * This buffer structure is used by the page cache buffer management routines
128 * to refer to an assembly of pages forming a logical buffer. The actual I/O
129 * is performed with buffer_head structures, as required by drivers.
130 *
131 * The buffer structure is used on temporary basis only, and discarded when
132 * released. The real data storage is recorded in the page cache. Metadata is
133 * hashed to the block device on which the file system resides.
134 */
135
136 struct xfs_buf;
137
138 /* call-back function on I/O completion */
139 typedef void (*page_buf_iodone_t)(struct xfs_buf *);
140 /* call-back function on I/O completion */
141 typedef void (*page_buf_relse_t)(struct xfs_buf *);
142 /* pre-write function */
143 typedef int (*page_buf_bdstrat_t)(struct xfs_buf *);
144
145 #define PB_PAGES 4
146
147 typedef struct xfs_buf {
148 struct semaphore pb_sema; /* semaphore for lockables */
149 unsigned long pb_queuetime; /* time buffer was queued */
150 atomic_t pb_pin_count; /* pin count */
151 wait_queue_head_t pb_waiters; /* unpin waiters */
152 struct list_head pb_list;
153 page_buf_flags_t pb_flags; /* status flags */
154 struct list_head pb_hash_list;
155 xfs_buftarg_t *pb_target; /* logical object */
156 atomic_t pb_hold; /* reference count */
157 xfs_daddr_t pb_bn; /* block number for I/O */
158 loff_t pb_file_offset; /* offset in file */
159 size_t pb_buffer_length; /* size of buffer in bytes */
160 size_t pb_count_desired; /* desired transfer size */
161 void *pb_addr; /* virtual address of buffer */
162 struct tq_struct pb_iodone_sched;
163 atomic_t pb_io_remaining;/* #outstanding I/O requests */
164 page_buf_iodone_t pb_iodone; /* I/O completion function */
165 page_buf_relse_t pb_relse; /* releasing function */
166 page_buf_bdstrat_t pb_strat; /* pre-write function */
167 struct semaphore pb_iodonesema; /* Semaphore for I/O waiters */
168 void *pb_fspriv;
169 void *pb_fspriv2;
170 void *pb_fspriv3;
171 unsigned short pb_error; /* error code on I/O */
172 unsigned short pb_page_count; /* size of page array */
173 unsigned short pb_offset; /* page offset in first page */
174 unsigned char pb_locked; /* page array is locked */
175 unsigned char pb_hash_index; /* hash table index */
176 struct page **pb_pages; /* array of page pointers */
177 struct page *pb_page_array[PB_PAGES]; /* inline pages */
178 #ifdef PAGEBUF_LOCK_TRACKING
179 int pb_last_holder;
180 #endif
181 } xfs_buf_t;
182
183
184 /* Finding and Reading Buffers */
185
186 extern xfs_buf_t *_pagebuf_find( /* find buffer for block if */
187 /* the block is in memory */
188 xfs_buftarg_t *, /* inode for block */
189 loff_t, /* starting offset of range */
190 size_t, /* length of range */
191 page_buf_flags_t, /* PBF_LOCK */
192 xfs_buf_t *); /* newly allocated buffer */
193
194 #define xfs_incore(buftarg,blkno,len,lockit) \
195 _pagebuf_find(buftarg, blkno ,len, lockit, NULL)
196
197 extern xfs_buf_t *xfs_buf_get_flags( /* allocate a buffer */
198 xfs_buftarg_t *, /* inode for buffer */
199 loff_t, /* starting offset of range */
200 size_t, /* length of range */
201 page_buf_flags_t); /* PBF_LOCK, PBF_READ, */
202 /* PBF_ASYNC */
203
204 #define xfs_buf_get(target, blkno, len, flags) \
205 xfs_buf_get_flags((target), (blkno), (len), PBF_LOCK | PBF_MAPPED)
206
207 extern xfs_buf_t *xfs_buf_read_flags( /* allocate and read a buffer */
208 xfs_buftarg_t *, /* inode for buffer */
209 loff_t, /* starting offset of range */
210 size_t, /* length of range */
211 page_buf_flags_t); /* PBF_LOCK, PBF_ASYNC */
212
213 #define xfs_buf_read(target, blkno, len, flags) \
214 xfs_buf_read_flags((target), (blkno), (len), PBF_LOCK | PBF_MAPPED)
215
216 extern xfs_buf_t *pagebuf_lookup(
217 xfs_buftarg_t *,
218 loff_t, /* starting offset of range */
219 size_t, /* length of range */
220 page_buf_flags_t); /* PBF_READ, PBF_WRITE, */
221 /* PBF_FORCEIO, */
222
223 extern xfs_buf_t *pagebuf_get_empty( /* allocate pagebuf struct with */
224 /* no memory or disk address */
225 size_t len,
226 xfs_buftarg_t *); /* mount point "fake" inode */
227
228 extern xfs_buf_t *pagebuf_get_no_daddr(/* allocate pagebuf struct */
229 /* without disk address */
230 size_t len,
231 xfs_buftarg_t *); /* mount point "fake" inode */
232
233 extern int pagebuf_associate_memory(
234 xfs_buf_t *,
235 void *,
236 size_t);
237
238 extern void pagebuf_hold( /* increment reference count */
239 xfs_buf_t *); /* buffer to hold */
240
241 extern void pagebuf_readahead( /* read ahead into cache */
242 xfs_buftarg_t *, /* target for buffer (or NULL) */
243 loff_t, /* starting offset of range */
244 size_t, /* length of range */
245 page_buf_flags_t); /* additional read flags */
246
247 /* Releasing Buffers */
248
249 extern void pagebuf_free( /* deallocate a buffer */
250 xfs_buf_t *); /* buffer to deallocate */
251
252 extern void pagebuf_rele( /* release hold on a buffer */
253 xfs_buf_t *); /* buffer to release */
254
255 /* Locking and Unlocking Buffers */
256
257 extern int pagebuf_cond_lock( /* lock buffer, if not locked */
258 /* (returns -EBUSY if locked) */
259 xfs_buf_t *); /* buffer to lock */
260
261 extern int pagebuf_lock_value( /* return count on lock */
262 xfs_buf_t *); /* buffer to check */
263
264 extern int pagebuf_lock( /* lock buffer */
265 xfs_buf_t *); /* buffer to lock */
266
267 extern void pagebuf_unlock( /* unlock buffer */
268 xfs_buf_t *); /* buffer to unlock */
269
270 /* Buffer Read and Write Routines */
271
272 extern void pagebuf_iodone( /* mark buffer I/O complete */
273 xfs_buf_t *, /* buffer to mark */
274 int, /* use data/log helper thread. */
275 int); /* run completion locally, or in
276 * a helper thread. */
277
278 extern void pagebuf_ioerror( /* mark buffer in error (or not) */
279 xfs_buf_t *, /* buffer to mark */
280 int); /* error to store (0 if none) */
281
282 extern int pagebuf_iostart( /* start I/O on a buffer */
283 xfs_buf_t *, /* buffer to start */
284 page_buf_flags_t); /* PBF_LOCK, PBF_ASYNC, */
285 /* PBF_READ, PBF_WRITE, */
286 /* PBF_DELWRI */
287
288 extern int pagebuf_iorequest( /* start real I/O */
289 xfs_buf_t *); /* buffer to convey to device */
290
291 extern int pagebuf_iowait( /* wait for buffer I/O done */
292 xfs_buf_t *); /* buffer to wait on */
293
294 extern void pagebuf_iomove( /* move data in/out of pagebuf */
295 xfs_buf_t *, /* buffer to manipulate */
296 size_t, /* starting buffer offset */
297 size_t, /* length in buffer */
298 caddr_t, /* data pointer */
299 page_buf_rw_t); /* direction */
300
pagebuf_iostrategy(xfs_buf_t * pb)301 static inline int pagebuf_iostrategy(xfs_buf_t *pb)
302 {
303 return pb->pb_strat ? pb->pb_strat(pb) : pagebuf_iorequest(pb);
304 }
305
pagebuf_geterror(xfs_buf_t * pb)306 static inline int pagebuf_geterror(xfs_buf_t *pb)
307 {
308 return pb ? pb->pb_error : ENOMEM;
309 }
310
311 /* Buffer Utility Routines */
312
313 extern caddr_t pagebuf_offset( /* pointer at offset in buffer */
314 xfs_buf_t *, /* buffer to offset into */
315 size_t); /* offset */
316
317 /* Pinning Buffer Storage in Memory */
318
319 extern void pagebuf_pin( /* pin buffer in memory */
320 xfs_buf_t *); /* buffer to pin */
321
322 extern void pagebuf_unpin( /* unpin buffered data */
323 xfs_buf_t *); /* buffer to unpin */
324
325 extern int pagebuf_ispin( /* check if buffer is pinned */
326 xfs_buf_t *); /* buffer to check */
327
328 /* Delayed Write Buffer Routines */
329
330 extern void pagebuf_delwri_dequeue(xfs_buf_t *);
331
332 /* Buffer Daemon Setup Routines */
333
334 extern int pagebuf_init(void);
335 extern void pagebuf_terminate(void);
336
337
338 #ifdef PAGEBUF_TRACE
339 extern ktrace_t *pagebuf_trace_buf;
340 extern void pagebuf_trace(
341 xfs_buf_t *, /* buffer being traced */
342 char *, /* description of operation */
343 void *, /* arbitrary diagnostic value */
344 void *); /* return address */
345 #else
346 # define pagebuf_trace(pb, id, ptr, ra) do { } while (0)
347 #endif
348
349 #define pagebuf_target_name(target) bdevname((target)->pbr_kdev)
350
351 /*
352 * Kernel version compatibility macros
353 */
354
355 #define page_buffers(page) ((page)->buffers)
356 #define page_has_buffers(page) ((page)->buffers)
357 #define PageUptodate(x) Page_Uptodate(x)
358 /*
359 * macro tricks to expand the set_buffer_foo() and clear_buffer_foo()
360 * functions.
361 */
362 #define BUFFER_FNS(bit, name) \
363 static inline void set_buffer_##name(struct buffer_head *bh) \
364 { \
365 set_bit(BH_##bit, &(bh)->b_state); \
366 } \
367 static inline void clear_buffer_##name(struct buffer_head *bh) \
368 { \
369 clear_bit(BH_##bit, &(bh)->b_state); \
370 } \
371
372 /*
373 * Emit the buffer bitops functions. Note that there are also functions
374 * of the form "mark_buffer_foo()". These are higher-level functions which
375 * do something in addition to setting a b_state bit.
376 */
BUFFER_FNS(Uptodate,uptodate)377 BUFFER_FNS(Uptodate, uptodate)
378 BUFFER_FNS(Dirty, dirty)
379 BUFFER_FNS(Lock, locked)
380 BUFFER_FNS(Req, req)
381 BUFFER_FNS(Mapped, mapped)
382 BUFFER_FNS(New, new)
383 BUFFER_FNS(Async, async)
384 BUFFER_FNS(Wait_IO, wait_io)
385 BUFFER_FNS(Launder, launder)
386 BUFFER_FNS(Sync, sync)
387 BUFFER_FNS(Delay, delay)
388
389 #define get_seconds() CURRENT_TIME
390 #define i_size_read(inode) ((inode)->i_size)
391 #define i_size_write(inode, sz) ((inode)->i_size = (sz))
392
393 /* These are just for xfs_syncsub... it sets an internal variable
394 * then passes it to VOP_FLUSH_PAGES or adds the flags to a newly gotten buf_t
395 */
396 #define XFS_B_ASYNC PBF_ASYNC
397 #define XFS_B_DELWRI PBF_DELWRI
398 #define XFS_B_READ PBF_READ
399 #define XFS_B_WRITE PBF_WRITE
400 #define XFS_B_STALE PBF_STALE
401
402 #define XFS_BUF_TRYLOCK PBF_TRYLOCK
403 #define XFS_INCORE_TRYLOCK PBF_TRYLOCK
404 #define XFS_BUF_LOCK PBF_LOCK
405 #define XFS_BUF_MAPPED PBF_MAPPED
406
407 #define BUF_BUSY PBF_DONT_BLOCK
408
409 #define XFS_BUF_BFLAGS(x) ((x)->pb_flags)
410 #define XFS_BUF_ZEROFLAGS(x) \
411 ((x)->pb_flags &= ~(PBF_READ|PBF_WRITE|PBF_ASYNC|PBF_DELWRI))
412
413 #define XFS_BUF_STALE(x) ((x)->pb_flags |= XFS_B_STALE)
414 #define XFS_BUF_UNSTALE(x) ((x)->pb_flags &= ~XFS_B_STALE)
415 #define XFS_BUF_ISSTALE(x) ((x)->pb_flags & XFS_B_STALE)
416 #define XFS_BUF_SUPER_STALE(x) do { \
417 XFS_BUF_STALE(x); \
418 pagebuf_delwri_dequeue(x); \
419 XFS_BUF_DONE(x); \
420 } while (0)
421
422 #define XFS_BUF_MANAGE PBF_FS_MANAGED
423 #define XFS_BUF_UNMANAGE(x) ((x)->pb_flags &= ~PBF_FS_MANAGED)
424
425 #define XFS_BUF_DELAYWRITE(x) ((x)->pb_flags |= PBF_DELWRI)
426 #define XFS_BUF_UNDELAYWRITE(x) pagebuf_delwri_dequeue(x)
427 #define XFS_BUF_ISDELAYWRITE(x) ((x)->pb_flags & PBF_DELWRI)
428
429 #define XFS_BUF_ERROR(x,no) pagebuf_ioerror(x,no)
430 #define XFS_BUF_GETERROR(x) pagebuf_geterror(x)
431 #define XFS_BUF_ISERROR(x) (pagebuf_geterror(x)?1:0)
432
433 #define XFS_BUF_DONE(x) ((x)->pb_flags &= ~(PBF_PARTIAL|PBF_NONE))
434 #define XFS_BUF_UNDONE(x) ((x)->pb_flags |= PBF_PARTIAL|PBF_NONE)
435 #define XFS_BUF_ISDONE(x) (!(PBF_NOT_DONE(x)))
436
437 #define XFS_BUF_BUSY(x) ((x)->pb_flags |= PBF_FORCEIO)
438 #define XFS_BUF_UNBUSY(x) ((x)->pb_flags &= ~PBF_FORCEIO)
439 #define XFS_BUF_ISBUSY(x) (1)
440
441 #define XFS_BUF_ASYNC(x) ((x)->pb_flags |= PBF_ASYNC)
442 #define XFS_BUF_UNASYNC(x) ((x)->pb_flags &= ~PBF_ASYNC)
443 #define XFS_BUF_ISASYNC(x) ((x)->pb_flags & PBF_ASYNC)
444
445 #define XFS_BUF_FLUSH(x) ((x)->pb_flags |= PBF_FLUSH)
446 #define XFS_BUF_UNFLUSH(x) ((x)->pb_flags &= ~PBF_FLUSH)
447 #define XFS_BUF_ISFLUSH(x) ((x)->pb_flags & PBF_FLUSH)
448
449 #define XFS_BUF_SHUT(x) printk("XFS_BUF_SHUT not implemented yet\n")
450 #define XFS_BUF_UNSHUT(x) printk("XFS_BUF_UNSHUT not implemented yet\n")
451 #define XFS_BUF_ISSHUT(x) (0)
452
453 #define XFS_BUF_HOLD(x) pagebuf_hold(x)
454 #define XFS_BUF_READ(x) ((x)->pb_flags |= PBF_READ)
455 #define XFS_BUF_UNREAD(x) ((x)->pb_flags &= ~PBF_READ)
456 #define XFS_BUF_ISREAD(x) ((x)->pb_flags & PBF_READ)
457
458 #define XFS_BUF_WRITE(x) ((x)->pb_flags |= PBF_WRITE)
459 #define XFS_BUF_UNWRITE(x) ((x)->pb_flags &= ~PBF_WRITE)
460 #define XFS_BUF_ISWRITE(x) ((x)->pb_flags & PBF_WRITE)
461
462 #define XFS_BUF_ISUNINITIAL(x) (0)
463 #define XFS_BUF_UNUNINITIAL(x) (0)
464
465 #define XFS_BUF_BP_ISMAPPED(bp) 1
466
467 #define XFS_BUF_DATAIO(x) ((x)->pb_flags |= PBF_FS_DATAIOD)
468 #define XFS_BUF_UNDATAIO(x) ((x)->pb_flags &= ~PBF_FS_DATAIOD)
469
470 #define XFS_BUF_IODONE_FUNC(buf) (buf)->pb_iodone
471 #define XFS_BUF_SET_IODONE_FUNC(buf, func) \
472 (buf)->pb_iodone = (func)
473 #define XFS_BUF_CLR_IODONE_FUNC(buf) \
474 (buf)->pb_iodone = NULL
475 #define XFS_BUF_SET_BDSTRAT_FUNC(buf, func) \
476 (buf)->pb_strat = (func)
477 #define XFS_BUF_CLR_BDSTRAT_FUNC(buf) \
478 (buf)->pb_strat = NULL
479
480 #define XFS_BUF_FSPRIVATE(buf, type) \
481 ((type)(buf)->pb_fspriv)
482 #define XFS_BUF_SET_FSPRIVATE(buf, value) \
483 (buf)->pb_fspriv = (void *)(value)
484 #define XFS_BUF_FSPRIVATE2(buf, type) \
485 ((type)(buf)->pb_fspriv2)
486 #define XFS_BUF_SET_FSPRIVATE2(buf, value) \
487 (buf)->pb_fspriv2 = (void *)(value)
488 #define XFS_BUF_FSPRIVATE3(buf, type) \
489 ((type)(buf)->pb_fspriv3)
490 #define XFS_BUF_SET_FSPRIVATE3(buf, value) \
491 (buf)->pb_fspriv3 = (void *)(value)
492 #define XFS_BUF_SET_START(buf)
493
494 #define XFS_BUF_SET_BRELSE_FUNC(buf, value) \
495 (buf)->pb_relse = (value)
496
497 #define XFS_BUF_PTR(bp) (xfs_caddr_t)((bp)->pb_addr)
498
499 extern inline xfs_caddr_t xfs_buf_offset(xfs_buf_t *bp, size_t offset)
500 {
501 if (bp->pb_flags & PBF_MAPPED)
502 return XFS_BUF_PTR(bp) + offset;
503 return (xfs_caddr_t) pagebuf_offset(bp, offset);
504 }
505
506 #define XFS_BUF_SET_PTR(bp, val, count) \
507 pagebuf_associate_memory(bp, val, count)
508 #define XFS_BUF_ADDR(bp) ((bp)->pb_bn)
509 #define XFS_BUF_SET_ADDR(bp, blk) \
510 ((bp)->pb_bn = (xfs_daddr_t)(blk))
511 #define XFS_BUF_OFFSET(bp) ((bp)->pb_file_offset)
512 #define XFS_BUF_SET_OFFSET(bp, off) \
513 ((bp)->pb_file_offset = (off))
514 #define XFS_BUF_COUNT(bp) ((bp)->pb_count_desired)
515 #define XFS_BUF_SET_COUNT(bp, cnt) \
516 ((bp)->pb_count_desired = (cnt))
517 #define XFS_BUF_SIZE(bp) ((bp)->pb_buffer_length)
518 #define XFS_BUF_SET_SIZE(bp, cnt) \
519 ((bp)->pb_buffer_length = (cnt))
520 #define XFS_BUF_SET_VTYPE_REF(bp, type, ref)
521 #define XFS_BUF_SET_VTYPE(bp, type)
522 #define XFS_BUF_SET_REF(bp, ref)
523
524 #define XFS_BUF_ISPINNED(bp) pagebuf_ispin(bp)
525
526 #define XFS_BUF_VALUSEMA(bp) pagebuf_lock_value(bp)
527 #define XFS_BUF_CPSEMA(bp) (pagebuf_cond_lock(bp) == 0)
528 #define XFS_BUF_VSEMA(bp) pagebuf_unlock(bp)
529 #define XFS_BUF_PSEMA(bp,x) pagebuf_lock(bp)
530 #define XFS_BUF_V_IODONESEMA(bp) up(&bp->pb_iodonesema);
531
532 /* setup the buffer target from a buftarg structure */
533 #define XFS_BUF_SET_TARGET(bp, target) \
534 (bp)->pb_target = (target)
535 #define XFS_BUF_TARGET(bp) ((bp)->pb_target)
536 #define XFS_BUFTARG_NAME(target) \
537 pagebuf_target_name(target)
538
539 #define XFS_BUF_SET_VTYPE_REF(bp, type, ref)
540 #define XFS_BUF_SET_VTYPE(bp, type)
541 #define XFS_BUF_SET_REF(bp, ref)
542
xfs_bawrite(void * mp,xfs_buf_t * bp)543 static inline int xfs_bawrite(void *mp, xfs_buf_t *bp)
544 {
545 bp->pb_fspriv3 = mp;
546 bp->pb_strat = xfs_bdstrat_cb;
547 pagebuf_delwri_dequeue(bp);
548 return pagebuf_iostart(bp, PBF_WRITE | PBF_ASYNC | _PBF_RUN_QUEUES);
549 }
550
xfs_buf_relse(xfs_buf_t * bp)551 static inline void xfs_buf_relse(xfs_buf_t *bp)
552 {
553 if (!bp->pb_relse)
554 pagebuf_unlock(bp);
555 pagebuf_rele(bp);
556 }
557
558 #define xfs_bpin(bp) pagebuf_pin(bp)
559 #define xfs_bunpin(bp) pagebuf_unpin(bp)
560
561 #define xfs_buftrace(id, bp) \
562 pagebuf_trace(bp, id, NULL, (void *)__builtin_return_address(0))
563
564 #define xfs_biodone(pb) \
565 pagebuf_iodone(pb, (pb->pb_flags & PBF_FS_DATAIOD), 0)
566
567 #define xfs_biomove(pb, off, len, data, rw) \
568 pagebuf_iomove((pb), (off), (len), (data), \
569 ((rw) == XFS_B_WRITE) ? PBRW_WRITE : PBRW_READ)
570
571 #define xfs_biozero(pb, off, len) \
572 pagebuf_iomove((pb), (off), (len), NULL, PBRW_ZERO)
573
574
XFS_bwrite(xfs_buf_t * pb)575 static inline int XFS_bwrite(xfs_buf_t *pb)
576 {
577 int iowait = (pb->pb_flags & PBF_ASYNC) == 0;
578 int error = 0;
579
580 if (!iowait)
581 pb->pb_flags |= _PBF_RUN_QUEUES;
582
583 pagebuf_delwri_dequeue(pb);
584 pagebuf_iostrategy(pb);
585 if (iowait) {
586 error = pagebuf_iowait(pb);
587 xfs_buf_relse(pb);
588 }
589 return error;
590 }
591
592 #define XFS_bdwrite(pb) \
593 pagebuf_iostart(pb, PBF_DELWRI | PBF_ASYNC)
594
xfs_bdwrite(void * mp,xfs_buf_t * bp)595 static inline int xfs_bdwrite(void *mp, xfs_buf_t *bp)
596 {
597 bp->pb_strat = xfs_bdstrat_cb;
598 bp->pb_fspriv3 = mp;
599
600 return pagebuf_iostart(bp, PBF_DELWRI | PBF_ASYNC);
601 }
602
603 #define XFS_bdstrat(bp) pagebuf_iorequest(bp)
604
605 #define xfs_iowait(pb) pagebuf_iowait(pb)
606
607 #define xfs_baread(target, rablkno, ralen) \
608 pagebuf_readahead((target), (rablkno), (ralen), PBF_DONT_BLOCK)
609
610 #define xfs_buf_get_empty(len, target) pagebuf_get_empty((len), (target))
611 #define xfs_buf_get_noaddr(len, target) pagebuf_get_no_daddr((len), (target))
612 #define xfs_buf_free(bp) pagebuf_free(bp)
613
614 /*
615 * Handling of buftargs.
616 */
617
618 extern xfs_buftarg_t *xfs_alloc_buftarg(struct block_device *);
619 extern void xfs_free_buftarg(xfs_buftarg_t *, int);
620 extern void xfs_wait_buftarg(xfs_buftarg_t *);
621 extern int xfs_setsize_buftarg(xfs_buftarg_t *, unsigned int, unsigned int);
622 extern void xfs_incore_relse(xfs_buftarg_t *, int, int);
623 extern int xfs_flush_buftarg(xfs_buftarg_t *, int);
624
625 #define xfs_getsize_buftarg(buftarg) \
626 block_size((buftarg)->pbr_kdev)
627 #define xfs_readonly_buftarg(buftarg) \
628 is_read_only((buftarg)->pbr_kdev)
629 #define xfs_binval(buftarg) \
630 xfs_flush_buftarg(buftarg, 1)
631 #define XFS_bflush(buftarg) \
632 xfs_flush_buftarg(buftarg, 1)
633
634 #endif /* __XFS_BUF_H__ */
635