Lines Matching refs:write_sq
529 if (sq == &write_sq) { in sq_setup()
575 if (write_sq.locked == 0) { in sq_write()
576 if ((uWritten = sq_setup(&write_sq)) < 0) return uWritten ; in sq_write()
601 write_sq.syncing &= ~2 ; /* take out POST status */ in sq_write()
604 if (write_sq.count > 0 && in sq_write()
605 (bLeft = write_sq.block_size-write_sq.rear_size) > 0) { in sq_write()
606 dest = write_sq.buffers[write_sq.rear]; in sq_write()
607 bUsed = write_sq.rear_size; in sq_write()
615 write_sq.rear_size = bUsed; in sq_write()
621 while (write_sq.count >= write_sq.max_active) { in sq_write()
622 prepare_to_wait(&write_sq.action_queue, &wait, TASK_INTERRUPTIBLE); in sq_write()
624 if (write_sq.non_blocking) { in sq_write()
625 finish_wait(&write_sq.action_queue, &wait); in sq_write()
628 if (write_sq.count < write_sq.max_active) in sq_write()
633 finish_wait(&write_sq.action_queue, &wait); in sq_write()
638 finish_wait(&write_sq.action_queue, &wait); in sq_write()
647 dest = write_sq.buffers[(write_sq.rear+1) % write_sq.max_count]; in sq_write()
649 bLeft = write_sq.block_size; in sq_write()
658 write_sq.rear = (write_sq.rear+1) % write_sq.max_count; in sq_write()
659 write_sq.rear_size = bUsed; in sq_write()
660 write_sq.count++; in sq_write()
674 if (write_sq.locked == 0) { in sq_poll()
675 if ((retVal = sq_setup(&write_sq)) < 0) in sq_poll()
680 poll_wait(file, &write_sq.action_queue, wait); in sq_poll()
682 if (write_sq.count < write_sq.max_active || write_sq.block_size - write_sq.rear_size > 0) in sq_poll()
750 #define write_sq_init_waitqueue() sq_init_waitqueue(&write_sq)
752 #define write_sq_wake_up(file) sq_wake_up(&write_sq, file, FMODE_WRITE)
754 #define write_sq_release_buffers() sq_release_buffers(&write_sq)
756 sq_open2(&write_sq, file, FMODE_WRITE, numWriteBufs, writeBufSize )
820 write_sq.active = 0; in sq_reset_output()
821 write_sq.count = 0; in sq_reset_output()
822 write_sq.rear_size = 0; in sq_reset_output()
824 write_sq.front = 0 ; in sq_reset_output()
825 write_sq.rear = -1 ; /* same as for set-up */ in sq_reset_output()
828 write_sq.locked = 0 ; in sq_reset_output()
829 write_sq.user_frags = 0 ; in sq_reset_output()
830 write_sq.user_frag_size = 0 ; in sq_reset_output()
849 write_sq.syncing |= 1; in sq_fsync()
852 while (write_sq.active) { in sq_fsync()
853 wait_event_interruptible_timeout(write_sq.sync_queue, in sq_fsync()
854 !write_sq.active, HZ); in sq_fsync()
872 write_sq.syncing = 0 ; in sq_fsync()
883 if (write_sq.busy) in sq_release()
888 write_sq.busy = 0; in sq_release()
938 if (write_sq.locked) in queues_are_quiescent()
1011 if ( !write_sq.locked ) in sq_ioctl()
1012 sq_setup(&write_sq) ; in sq_ioctl()
1013 size = write_sq.user_frag_size ; in sq_ioctl()
1022 write_sq.syncing |= 0x2 ; in sq_ioctl()
1109 result = set_queue_frags(&write_sq, nbufs, size) ; in sq_ioctl()
1121 if ( !write_sq.locked ) in sq_ioctl()
1122 sq_setup(&write_sq) ; in sq_ioctl()
1123 info.fragments = write_sq.max_active - write_sq.count; in sq_ioctl()
1124 info.fragstotal = write_sq.max_active; in sq_ioctl()
1125 info.fragsize = write_sq.user_frag_size; in sq_ioctl()
1305 "write", write_sq.numBufs, write_sq.bufSize) ; in state_open()
1310 "write", write_sq.max_count, write_sq.block_size, in state_open()
1311 write_sq.max_active, write_sq.front, write_sq.rear, in state_open()
1312 write_sq.count, write_sq.rear_size, write_sq.active, in state_open()
1313 write_sq.busy, write_sq.syncing, write_sq.locked, write_sq.xruns) ; in state_open()