Home
last modified time | relevance | path

Searched refs:rq (Results 1 – 25 of 141) sorted by relevance

123456

/linux-2.4.37.9/drivers/ide/
Dide-io.c70 struct request *rq; in ide_end_request() local
75 rq = HWGROUP(drive)->rq; in ide_end_request()
86 if (!end_that_request_first(rq, uptodate, drive->name)) { in ide_end_request()
87 add_blkdev_randomness(MAJOR(rq->rq_dev)); in ide_end_request()
88 blkdev_dequeue_request(rq); in ide_end_request()
89 HWGROUP(drive)->rq = NULL; in ide_end_request()
90 end_that_request_last(rq); in ide_end_request()
118 struct request *rq; in ide_end_drive_cmd() local
121 rq = HWGROUP(drive)->rq; in ide_end_drive_cmd()
124 switch(rq->cmd) { in ide_end_drive_cmd()
[all …]
Dide-taskfile.c65 #define task_rq_offset(rq) \ argument
66 (((rq)->nr_sectors - (rq)->current_nr_sectors) * SECTOR_SIZE)
76 inline char *task_map_rq (struct request *rq, unsigned long *flags) in task_map_rq() argument
78 if (rq->bh) in task_map_rq()
79 return ide_map_buffer(rq, flags); in task_map_rq()
80 return rq->buffer + task_rq_offset(rq); in task_map_rq()
83 inline void task_unmap_rq (struct request *rq, char *buf, unsigned long *flags) in task_unmap_rq() argument
85 if (rq->bh) in task_unmap_rq()
206 return task->prehandler(drive, task->rq); in do_rw_taskfile()
211 if (blk_fs_request(task->rq) && drive->using_dma) { in do_rw_taskfile()
[all …]
Dide-cd.c522 struct request *rq; in cdrom_queue_request_sense() local
534 rq = &info->request_sense_request; in cdrom_queue_request_sense()
535 ide_init_drive_cmd(rq); in cdrom_queue_request_sense()
536 rq->cmd = REQUEST_SENSE_COMMAND; in cdrom_queue_request_sense()
537 rq->buffer = (char *) pc; in cdrom_queue_request_sense()
538 rq->waiting = wait; in cdrom_queue_request_sense()
539 (void) ide_do_drive_cmd(drive, rq, ide_preempt); in cdrom_queue_request_sense()
548 struct request *rq; in ide_cdrom_end_request() local
553 rq = HWGROUP(drive)->rq; in ide_cdrom_end_request()
564 if (!end_that_request_first(rq, uptodate, drive->name)) { in ide_cdrom_end_request()
[all …]
Dide-disk.c154 struct request *rq; in read_intr() local
170 rq = HWGROUP(drive)->rq; in read_intr()
172 if ((nsect = rq->current_nr_sectors) > msect) in read_intr()
177 to = ide_map_buffer(rq, &flags); in read_intr()
182 drive->name, rq->sector, rq->sector+nsect-1, in read_intr()
183 (unsigned long) rq->buffer+(nsect<<9), rq->nr_sectors-nsect); in read_intr()
187 rq->sector += nsect; in read_intr()
188 rq->errors = 0; in read_intr()
189 i = (rq->nr_sectors -= nsect); in read_intr()
190 if (((long)(rq->current_nr_sectors -= nsect)) <= 0) in read_intr()
[all …]
Dide-floppy.c155 struct request *rq; /* The corresponding request */ member
533 struct request *rq; in idefloppy_end_request() local
538 rq = HWGROUP(drive)->rq; in idefloppy_end_request()
549 if (!end_that_request_first(rq, uptodate, drive->name)) { in idefloppy_end_request()
550 add_blkdev_randomness(MAJOR(rq->rq_dev)); in idefloppy_end_request()
551 blkdev_dequeue_request(rq); in idefloppy_end_request()
552 HWGROUP(drive)->rq = NULL; in idefloppy_end_request()
553 end_that_request_last(rq); in idefloppy_end_request()
569 struct request *rq = HWGROUP(drive)->rq; in idefloppy_do_end_request() local
584 if (!rq) in idefloppy_do_end_request()
[all …]
Dide-dma.c223 struct request *rq = HWGROUP(drive)->rq; in ide_dma_intr() local
225 for (i = rq->nr_sectors; i > 0;) { in ide_dma_intr()
226 i -= rq->current_nr_sectors; in ide_dma_intr()
251 static int ide_build_sglist (ide_hwif_t *hwif, struct request *rq, int ddir) in ide_build_sglist() argument
261 bh = rq->bh; in ide_build_sglist()
318 static int ide_raw_build_sglist (ide_hwif_t *hwif, struct request *rq) in ide_raw_build_sglist() argument
322 ide_task_t *args = rq->special; in ide_raw_build_sglist()
323 u8 *virt_addr = rq->buffer; in ide_raw_build_sglist()
324 int sector_count = rq->nr_sectors; in ide_raw_build_sglist()
372 int ide_build_dmatable (ide_drive_t *drive, struct request *rq, int ddir) in ide_build_dmatable() argument
[all …]
Dide-tape.c836 struct request rq; /* The corresponding request */ member
1733 struct request *rq = &stage->rq; in idetape_active_next_stage() local
1747 rq->buffer = NULL; in idetape_active_next_stage()
1748 rq->bh = stage->bh; in idetape_active_next_stage()
1749 tape->active_data_request = rq; in idetape_active_next_stage()
1877 struct request *rq = HWGROUP(drive)->rq; in idetape_end_request() local
1899 rq->errors = error; in idetape_end_request()
1904 if (tape->active_data_request == rq) { in idetape_end_request()
1910 if (rq->cmd == IDETAPE_WRITE_RQ) { in idetape_end_request()
1930 rq->waiting = NULL; in idetape_end_request()
[all …]
/linux-2.4.37.9/drivers/ide/legacy/
Dpdc4030.c97 static ide_startstop_t promise_rw_disk (ide_drive_t *drive, struct request *rq, unsigned long block…
401 struct request *rq; in promise_read_intr() local
418 rq = HWGROUP(drive)->rq; in promise_read_intr()
419 sectors_avail = rq->nr_sectors - sectors_left; in promise_read_intr()
424 rq = HWGROUP(drive)->rq; in promise_read_intr()
425 nsect = rq->current_nr_sectors; in promise_read_intr()
430 to = ide_map_buffer(rq, &flags); in promise_read_intr()
433 HWIF(drive)->ata_input_data(drive, rq->buffer, nsect * SECTOR_WORDS); in promise_read_intr()
438 "buf=0x%08lx, rem=%ld\n", drive->name, rq->sector, in promise_read_intr()
439 rq->sector+nsect-1, in promise_read_intr()
[all …]
/linux-2.4.37.9/drivers/usb/
Dusbvideo.h117 #define RING_QUEUE_ADVANCE_INDEX(rq,ind,n) (rq)->ind = ((rq)->ind + (n)) & ((rq)->length-1) argument
118 #define RING_QUEUE_DEQUEUE_BYTES(rq,n) RING_QUEUE_ADVANCE_INDEX(rq,ri,n) argument
119 #define RING_QUEUE_PEEK(rq,ofs) ((rq)->queue[((ofs) + (rq)->ri) & ((rq)->length-1)]) argument
307 int RingQueue_Dequeue(struct RingQueue *rq, unsigned char *dst, int len);
308 int RingQueue_Enqueue(struct RingQueue *rq, const unsigned char *cdata, int n);
309 void RingQueue_WakeUpInterruptible(struct RingQueue *rq);
310 void RingQueue_Flush(struct RingQueue *rq);
312 static inline int RingQueue_GetLength(const struct RingQueue *rq) in RingQueue_GetLength() argument
314 return (rq->wi - rq->ri + rq->length) & (rq->length-1); in RingQueue_GetLength()
317 static inline int RingQueue_GetFreeSpace(const struct RingQueue *rq) in RingQueue_GetFreeSpace() argument
[all …]
Dusbvideo.c112 static void RingQueue_Initialize(struct RingQueue *rq) in RingQueue_Initialize() argument
114 assert(rq != NULL); in RingQueue_Initialize()
115 init_waitqueue_head(&rq->wqh); in RingQueue_Initialize()
118 static void RingQueue_Allocate(struct RingQueue *rq, int rqLen) in RingQueue_Allocate() argument
125 assert(rq != NULL); in RingQueue_Allocate()
133 rq->length = rqLen; in RingQueue_Allocate()
134 rq->ri = rq->wi = 0; in RingQueue_Allocate()
135 rq->queue = usbvideo_rvmalloc(rq->length); in RingQueue_Allocate()
136 assert(rq->queue != NULL); in RingQueue_Allocate()
139 static int RingQueue_IsAllocated(const struct RingQueue *rq) in RingQueue_IsAllocated() argument
[all …]
/linux-2.4.37.9/drivers/scsi/
Dide-scsi.c75 struct request *rq; /* The corresponding request */ member
327 struct request *rq; in idescsi_do_end_request() local
332 rq = HWGROUP(drive)->rq; in idescsi_do_end_request()
343 if (!end_that_request_first(rq, uptodate, drive->name)) { in idescsi_do_end_request()
344 add_blkdev_randomness(MAJOR(rq->rq_dev)); in idescsi_do_end_request()
345 blkdev_dequeue_request(rq); in idescsi_do_end_request()
346 HWGROUP(drive)->rq = NULL; in idescsi_do_end_request()
347 end_that_request_last(rq); in idescsi_do_end_request()
357 struct request *rq = HWGROUP(drive)->rq; in idescsi_end_request() local
358 idescsi_pc_t *pc = (idescsi_pc_t *) rq->special; in idescsi_end_request()
[all …]
/linux-2.4.37.9/drivers/char/
Draw.c194 struct raw_config_request rq; in raw_ctl_ioctl() local
204 err = copy_from_user(&rq, (void *) arg, sizeof(rq)); in raw_ctl_ioctl()
208 minor = rq.raw_minor; in raw_ctl_ioctl()
231 if ((rq.block_major == NODEV && in raw_ctl_ioctl()
232 rq.block_minor != NODEV) || in raw_ctl_ioctl()
233 rq.block_major > MAX_BLKDEV || in raw_ctl_ioctl()
234 rq.block_minor > MINORMASK) { in raw_ctl_ioctl()
248 bdget(kdev_t_to_nr(MKDEV(rq.block_major, rq.block_minor))); in raw_ctl_ioctl()
257 rq.block_major = MAJOR(dev); in raw_ctl_ioctl()
258 rq.block_minor = MINOR(dev); in raw_ctl_ioctl()
[all …]
/linux-2.4.37.9/include/linux/
Dblkdev.h79 struct request_list rq; member
163 #define blk_fs_request(rq) ((rq)->cmd == READ || (rq)->cmd == WRITE) argument
166 extern inline int rq_data_dir(struct request *rq) in rq_data_dir() argument
168 if (rq->cmd == READ) in rq_data_dir()
170 else if (rq->cmd == WRITE) in rq_data_dir()
297 return q->rq.count == 0; in blk_oversized_queue()
304 return q->rq.count == 0; in blk_oversized_queue_reads()
315 static inline void blk_started_sectors(struct request *rq, int count) in blk_started_sectors() argument
317 request_queue_t *q = rq->q; in blk_started_sectors()
327 static inline void blk_finished_sectors(struct request *rq, int count) in blk_finished_sectors() argument
[all …]
Delevator.h67 #define BHRQ_IN_ORDER(bh, rq) \ argument
68 ((((bh)->b_rdev == (rq)->rq_dev && \
69 (bh)->b_rsector < (rq)->sector)) || \
70 (bh)->b_rdev < (rq)->rq_dev)
/linux-2.4.37.9/drivers/block/
Dll_rw_blk.c153 struct request *rq; in __blk_cleanup_queue() local
157 rq = list_entry(head->next, struct request, queue); in __blk_cleanup_queue()
158 list_del(&rq->queue); in __blk_cleanup_queue()
159 kmem_cache_free(request_cachep, rq); in __blk_cleanup_queue()
188 count -= __blk_cleanup_queue(&q->rq); in blk_cleanup_queue()
419 struct request *rq; in blk_grow_request_list() local
421 rq = kmem_cache_alloc(request_cachep, SLAB_ATOMIC); in blk_grow_request_list()
422 if (rq == NULL) in blk_grow_request_list()
424 memset(rq, 0, sizeof(*rq)); in blk_grow_request_list()
425 rq->rq_status = RQ_INACTIVE; in blk_grow_request_list()
[all …]
Delevator.c36 inline int bh_rq_in_between(struct buffer_head *bh, struct request *rq, in bh_rq_in_between() argument
42 next = rq->queue.next; in bh_rq_in_between()
51 if (next_rq->rq_dev != rq->rq_dev) in bh_rq_in_between()
52 return bh->b_rsector > rq->sector; in bh_rq_in_between()
58 if (bh->b_rsector < next_rq->sector && bh->b_rsector > rq->sector) in bh_rq_in_between()
64 if (next_rq->sector > rq->sector) in bh_rq_in_between()
71 if (bh->b_rsector > rq->sector || bh->b_rsector < next_rq->sector) in bh_rq_in_between()
Dsx8.c247 struct request *rq; member
654 static void carm_insert_special(request_queue_t *q, struct request *rq, in carm_insert_special() argument
659 rq->cmd = SPECIAL; in carm_insert_special()
660 rq->special = data; in carm_insert_special()
661 rq->q = NULL; in carm_insert_special()
662 rq->nr_segments = 0; in carm_insert_special()
663 rq->elevator_sequence = 0; in carm_insert_special()
667 list_add(&rq->queue, &q->queue_head); in carm_insert_special()
669 list_add_tail(&rq->queue, &q->queue_head); in carm_insert_special()
693 crq->rq = &crq->special_rq; in carm_get_special()
[all …]
/linux-2.4.37.9/drivers/ide/pci/
Dtrm290.c183 struct request *rq = HWGROUP(drive)->rq; in trm290_ide_dma_write() local
195 if (!(count = ide_build_dmatable(drive, rq, PCI_DMA_TODEVICE))) { in trm290_ide_dma_write()
216 ide_task_t *args = rq->special; in trm290_ide_dma_write()
221 if (rq->cmd == IDE_DRIVE_TASKFILE) { in trm290_ide_dma_write()
222 ide_task_t *args = rq->special; in trm290_ide_dma_write()
234 struct request *rq = HWGROUP(drive)->rq; in trm290_ide_dma_read() local
239 if (!(count = ide_build_dmatable(drive, rq, PCI_DMA_FROMDEVICE))) { in trm290_ide_dma_read()
260 ide_task_t *args = rq->special; in trm290_ide_dma_read()
265 if (rq->cmd == IDE_DRIVE_TASKFILE) { in trm290_ide_dma_read()
266 ide_task_t *args = rq->special; in trm290_ide_dma_read()
Dsgiioc4.c223 struct request *rq = HWGROUP(drive)->rq; in sgiioc4_ide_dma_read() local
226 if (!(count = sgiioc4_build_dma_table(drive, rq, PCI_DMA_FROMDEVICE))) { in sgiioc4_ide_dma_read()
239 struct request *rq = HWGROUP(drive)->rq; in sgiioc4_ide_dma_write() local
242 if (!(count = sgiioc4_build_dma_table(drive, rq, PCI_DMA_TODEVICE))) { in sgiioc4_ide_dma_write()
553 sgiioc4_build_dma_table(ide_drive_t * drive, struct request *rq, int ddir) in sgiioc4_build_dma_table() argument
560 if (rq->cmd == IDE_DRIVE_TASKFILE) in sgiioc4_build_dma_table()
561 hwif->sg_nents = i = sgiioc4_ide_raw_build_sglist(hwif, rq); in sgiioc4_build_dma_table()
563 hwif->sg_nents = i = sgiioc4_ide_build_sglist(hwif, rq, ddir); in sgiioc4_build_dma_table()
688 sgiioc4_ide_build_sglist(ide_hwif_t * hwif, struct request *rq, int ddir) in sgiioc4_ide_build_sglist() argument
698 bh = rq->bh; in sgiioc4_ide_build_sglist()
[all …]
Dsgiioc4.h148 static int sgiioc4_ide_build_sglist(ide_hwif_t * hwif, struct request *rq,
150 static int sgiioc4_ide_raw_build_sglist(ide_hwif_t * hwif, struct request *rq);
155 static unsigned int sgiioc4_build_dma_table(ide_drive_t * drive, struct request *rq,
/linux-2.4.37.9/include/linux/nfsd/
Dauth.h15 #define nfsd_luid(rq, uid) ((u32)(uid)) argument
16 #define nfsd_lgid(rq, gid) ((u32)(gid)) argument
17 #define nfsd_ruid(rq, uid) ((u32)(uid)) argument
18 #define nfsd_rgid(rq, gid) ((u32)(gid)) argument
/linux-2.4.37.9/drivers/ide/arm/
Dicside.c237 static int ide_build_sglist(ide_hwif_t *hwif, struct request *rq) in ide_build_sglist() argument
243 if (rq->cmd == READ) in ide_build_sglist()
247 bh = rq->bh; in ide_build_sglist()
269 return HWIF(drive)->sg_nents = ide_build_sglist(HWIF(drive), HWGROUP(drive)->rq, ddir); in icside_build_dmatable()
344 struct request *rq = HWGROUP(drive)->rq; in icside_dmaintr() local
345 rq = HWGROUP(drive)->rq; in icside_dmaintr()
346 for (i = rq->nr_sectors; i > 0;) { in icside_dmaintr()
347 i -= rq->current_nr_sectors; in icside_dmaintr()
589 ide_task_t *args = HWGROUP(drive)->rq->special; in icside_dma_read()
594 if (HWGROUP(drive)->rq->flags & REQ_DRIVE_TASKFILE) { in icside_dma_read()
[all …]
/linux-2.4.37.9/arch/cris/drivers/
Dide.c687 struct request *rq = HWGROUP(drive)->rq; in e100_ide_build_dmatable() local
688 struct buffer_head *bh = rq->bh; in e100_ide_build_dmatable()
703 addr = virt_to_phys (rq->buffer); in e100_ide_build_dmatable()
704 size = rq->nr_sectors << 9; in e100_ide_build_dmatable()
815 struct request *rq; in etrax_dma_intr() local
816 rq = HWGROUP(drive)->rq; in etrax_dma_intr()
817 for (i = rq->nr_sectors; i > 0;) { in etrax_dma_intr()
818 i -= rq->current_nr_sectors; in etrax_dma_intr()
873 if ((HWGROUP(drive)->rq->cmd == IDE_DRIVE_TASKFILE) && in e100_start_dma()
875 ide_task_t *args = HWGROUP(drive)->rq->special; in e100_start_dma()
[all …]
/linux-2.4.37.9/drivers/net/
Dmyri_sbus.c248 struct recvq *rq = mp->rq; in myri_clean_rings() local
251 sbus_writel(0, &rq->tail); in myri_clean_rings()
252 sbus_writel(0, &rq->head); in myri_clean_rings()
255 struct myri_rxd *rxd = &rq->myri_rxd[i]; in myri_clean_rings()
284 struct recvq *rq = mp->rq; in myri_init_rings() local
285 struct myri_rxd *rxd = &rq->myri_rxd[0]; in myri_init_rings()
310 sbus_writel(0, &rq->head); in myri_init_rings()
311 sbus_writel(RX_RING_SIZE, &rq->tail); in myri_init_rings()
421 struct recvq *rq = mp->rq; in myri_rx() local
437 struct myri_rxd *rxd = &rq->myri_rxd[rq->tail]; in myri_rx()
[all …]
/linux-2.4.37.9/drivers/ide/ppc/
Dpmac.c344 static int pmac_ide_build_dmatable(ide_drive_t *drive, struct request *rq, int ddir);
1215 pmac_ide_build_sglist(ide_hwif_t *hwif, struct request *rq, int data_dir) in pmac_ide_build_sglist() argument
1228 bh = rq->bh; in pmac_ide_build_sglist()
1274 pmac_ide_raw_build_sglist(ide_hwif_t *hwif, struct request *rq) in pmac_ide_raw_build_sglist() argument
1279 ide_task_t *args = rq->special; in pmac_ide_raw_build_sglist()
1280 unsigned char *virt_addr = rq->buffer; in pmac_ide_raw_build_sglist()
1281 int sector_count = rq->nr_sectors; in pmac_ide_raw_build_sglist()
1309 pmac_ide_build_dmatable(ide_drive_t *drive, struct request *rq, int ddir) in pmac_ide_build_dmatable() argument
1327 if (rq->cmd == IDE_DRIVE_TASKFILE) in pmac_ide_build_dmatable()
1328 pmif->sg_nents = i = pmac_ide_raw_build_sglist(hwif, rq); in pmac_ide_build_dmatable()
[all …]

123456