/linux-3.4.99/drivers/scsi/fnic/ |
D | vnic_rq.c | 27 static int vnic_rq_alloc_bufs(struct vnic_rq *rq) in vnic_rq_alloc_bufs() argument 31 unsigned int i, j, count = rq->ring.desc_count; in vnic_rq_alloc_bufs() 34 vdev = rq->vdev; in vnic_rq_alloc_bufs() 37 rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ, GFP_ATOMIC); in vnic_rq_alloc_bufs() 38 if (!rq->bufs[i]) { in vnic_rq_alloc_bufs() 45 buf = rq->bufs[i]; in vnic_rq_alloc_bufs() 48 buf->desc = (u8 *)rq->ring.descs + in vnic_rq_alloc_bufs() 49 rq->ring.desc_size * buf->index; in vnic_rq_alloc_bufs() 51 buf->next = rq->bufs[0]; in vnic_rq_alloc_bufs() 54 buf->next = rq->bufs[i + 1]; in vnic_rq_alloc_bufs() [all …]
|
D | vnic_rq.h | 105 static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq) in vnic_rq_desc_avail() argument 108 return rq->ring.desc_avail; in vnic_rq_desc_avail() 111 static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq) in vnic_rq_desc_used() argument 114 return rq->ring.desc_count - rq->ring.desc_avail - 1; in vnic_rq_desc_used() 117 static inline void *vnic_rq_next_desc(struct vnic_rq *rq) in vnic_rq_next_desc() argument 119 return rq->to_use->desc; in vnic_rq_next_desc() 122 static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq) in vnic_rq_next_index() argument 124 return rq->to_use->index; in vnic_rq_next_index() 127 static inline unsigned int vnic_rq_next_buf_index(struct vnic_rq *rq) in vnic_rq_next_buf_index() argument 129 return rq->buf_index++; in vnic_rq_next_buf_index() [all …]
|
/linux-3.4.99/drivers/net/ethernet/cisco/enic/ |
D | vnic_rq.c | 30 static int vnic_rq_alloc_bufs(struct vnic_rq *rq) in vnic_rq_alloc_bufs() argument 34 unsigned int i, j, count = rq->ring.desc_count; in vnic_rq_alloc_bufs() 37 vdev = rq->vdev; in vnic_rq_alloc_bufs() 40 rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ(count), GFP_ATOMIC); in vnic_rq_alloc_bufs() 41 if (!rq->bufs[i]) in vnic_rq_alloc_bufs() 46 buf = rq->bufs[i]; in vnic_rq_alloc_bufs() 49 buf->desc = (u8 *)rq->ring.descs + in vnic_rq_alloc_bufs() 50 rq->ring.desc_size * buf->index; in vnic_rq_alloc_bufs() 52 buf->next = rq->bufs[0]; in vnic_rq_alloc_bufs() 55 buf->next = rq->bufs[i + 1]; in vnic_rq_alloc_bufs() [all …]
|
D | vnic_rq.h | 89 static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq) in vnic_rq_desc_avail() argument 92 return rq->ring.desc_avail; in vnic_rq_desc_avail() 95 static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq) in vnic_rq_desc_used() argument 98 return rq->ring.desc_count - rq->ring.desc_avail - 1; in vnic_rq_desc_used() 101 static inline void *vnic_rq_next_desc(struct vnic_rq *rq) in vnic_rq_next_desc() argument 103 return rq->to_use->desc; in vnic_rq_next_desc() 106 static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq) in vnic_rq_next_index() argument 108 return rq->to_use->index; in vnic_rq_next_index() 111 static inline void vnic_rq_post(struct vnic_rq *rq, in vnic_rq_post() argument 115 struct vnic_rq_buf *buf = rq->to_use; in vnic_rq_post() [all …]
|
/linux-3.4.99/kernel/sched/ |
D | stop_task.c | 21 check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags) in check_preempt_curr_stop() argument 26 static struct task_struct *pick_next_task_stop(struct rq *rq) in pick_next_task_stop() argument 28 struct task_struct *stop = rq->stop; in pick_next_task_stop() 31 stop->se.exec_start = rq->clock_task; in pick_next_task_stop() 39 enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags) in enqueue_task_stop() argument 41 inc_nr_running(rq); in enqueue_task_stop() 45 dequeue_task_stop(struct rq *rq, struct task_struct *p, int flags) in dequeue_task_stop() argument 47 dec_nr_running(rq); in dequeue_task_stop() 50 static void yield_task_stop(struct rq *rq) in yield_task_stop() argument 55 static void put_prev_task_stop(struct rq *rq, struct task_struct *prev) in put_prev_task_stop() argument [all …]
|
D | rt.c | 60 void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq) in init_rt_rq() argument 103 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) in rq_of_rt_rq() 105 return rt_rq->rq; in rq_of_rt_rq() 135 struct rq *rq = cpu_rq(cpu); in init_tg_rt_entry() local 139 rt_rq->rq = rq; in init_tg_rt_entry() 149 rt_se->rt_rq = &rq->rt; in init_tg_rt_entry() 207 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) in rq_of_rt_rq() 209 return container_of(rt_rq, struct rq, rt); in rq_of_rt_rq() 215 struct rq *rq = task_rq(p); in rt_rq_of_se() local 217 return &rq->rt; in rt_rq_of_se() [all …]
|
D | idle_task.c | 20 static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int flags) in check_preempt_curr_idle() argument 22 resched_task(rq->idle); in check_preempt_curr_idle() 25 static struct task_struct *pick_next_task_idle(struct rq *rq) in pick_next_task_idle() argument 27 schedstat_inc(rq, sched_goidle); in pick_next_task_idle() 28 return rq->idle; in pick_next_task_idle() 36 dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags) in dequeue_task_idle() argument 38 raw_spin_unlock_irq(&rq->lock); in dequeue_task_idle() 41 raw_spin_lock_irq(&rq->lock); in dequeue_task_idle() 44 static void put_prev_task_idle(struct rq *rq, struct task_struct *prev) in put_prev_task_idle() argument 48 static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued) in task_tick_idle() argument [all …]
|
D | stats.h | 8 rq_sched_info_arrive(struct rq *rq, unsigned long long delta) in rq_sched_info_arrive() argument 10 if (rq) { in rq_sched_info_arrive() 11 rq->rq_sched_info.run_delay += delta; in rq_sched_info_arrive() 12 rq->rq_sched_info.pcount++; in rq_sched_info_arrive() 20 rq_sched_info_depart(struct rq *rq, unsigned long long delta) in rq_sched_info_depart() argument 22 if (rq) in rq_sched_info_depart() 23 rq->rq_cpu_time += delta; in rq_sched_info_depart() 27 rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) in rq_sched_info_dequeued() argument 29 if (rq) in rq_sched_info_dequeued() 30 rq->rq_sched_info.run_delay += delta; in rq_sched_info_dequeued() [all …]
|
D | sched.h | 226 struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */ member 306 struct rq *rq; member 348 struct rq { struct 468 static inline int cpu_of(struct rq *rq) in cpu_of() argument 471 return rq->cpu; in cpu_of() 477 DECLARE_PER_CPU(struct rq, runqueues); 660 static inline int task_current(struct rq *rq, struct task_struct *p) in task_current() argument 662 return rq->curr == p; in task_current() 665 static inline int task_running(struct rq *rq, struct task_struct *p) in task_running() argument 670 return task_current(rq, p); in task_running() [all …]
|
D | core.c | 111 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); 113 static void update_rq_clock_task(struct rq *rq, s64 delta); 115 void update_rq_clock(struct rq *rq) in update_rq_clock() argument 119 if (rq->skip_clock_update > 0) in update_rq_clock() 122 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; in update_rq_clock() 123 rq->clock += delta; in update_rq_clock() 124 update_rq_clock_task(rq, delta); in update_rq_clock() 295 static inline struct rq *__task_rq_lock(struct task_struct *p) in __task_rq_lock() 296 __acquires(rq->lock) in __task_rq_lock() 298 struct rq *rq; in __task_rq_lock() local [all …]
|
/linux-3.4.99/block/ |
D | blk-flush.c | 94 static unsigned int blk_flush_policy(unsigned int fflags, struct request *rq) in blk_flush_policy() argument 98 if (blk_rq_sectors(rq)) in blk_flush_policy() 102 if (rq->cmd_flags & REQ_FLUSH) in blk_flush_policy() 104 if (!(fflags & REQ_FUA) && (rq->cmd_flags & REQ_FUA)) in blk_flush_policy() 110 static unsigned int blk_flush_cur_seq(struct request *rq) in blk_flush_cur_seq() argument 112 return 1 << ffz(rq->flush.seq); in blk_flush_cur_seq() 115 static void blk_flush_restore_request(struct request *rq) in blk_flush_restore_request() argument 122 rq->bio = rq->biotail; in blk_flush_restore_request() 125 rq->cmd_flags &= ~REQ_FLUSH_SEQ; in blk_flush_restore_request() 126 rq->end_io = rq->flush.saved_end_io; in blk_flush_restore_request() [all …]
|
D | blk-exec.c | 21 static void blk_end_sync_rq(struct request *rq, int error) in blk_end_sync_rq() argument 23 struct completion *waiting = rq->end_io_data; in blk_end_sync_rq() 25 rq->end_io_data = NULL; in blk_end_sync_rq() 26 __blk_put_request(rq->q, rq); in blk_end_sync_rq() 48 struct request *rq, int at_head, in blk_execute_rq_nowait() argument 56 rq->rq_disk = bd_disk; in blk_execute_rq_nowait() 57 rq->end_io = done; in blk_execute_rq_nowait() 62 is_pm_resume = rq->cmd_type == REQ_TYPE_PM_RESUME; in blk_execute_rq_nowait() 68 rq->errors = -ENXIO; in blk_execute_rq_nowait() 69 if (rq->end_io) in blk_execute_rq_nowait() [all …]
|
D | blk-core.c | 59 static void drive_stat_acct(struct request *rq, int new_io) in drive_stat_acct() argument 62 int rw = rq_data_dir(rq); in drive_stat_acct() 65 if (!blk_do_io_stat(rq)) in drive_stat_acct() 71 part = rq->part; in drive_stat_acct() 74 part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq)); in drive_stat_acct() 84 part = &rq->rq_disk->part0; in drive_stat_acct() 89 rq->part = part; in drive_stat_acct() 130 void blk_rq_init(struct request_queue *q, struct request *rq) in blk_rq_init() argument 132 memset(rq, 0, sizeof(*rq)); in blk_rq_init() 134 INIT_LIST_HEAD(&rq->queuelist); in blk_rq_init() [all …]
|
D | elevator.c | 53 #define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq)) argument 59 static int elv_iosched_allow_merge(struct request *rq, struct bio *bio) in elv_iosched_allow_merge() argument 61 struct request_queue *q = rq->q; in elv_iosched_allow_merge() 65 return e->type->ops.elevator_allow_merge_fn(q, rq, bio); in elv_iosched_allow_merge() 73 bool elv_rq_merge_ok(struct request *rq, struct bio *bio) in elv_rq_merge_ok() argument 75 if (!blk_rq_merge_ok(rq, bio)) in elv_rq_merge_ok() 78 if (!elv_iosched_allow_merge(rq, bio)) in elv_rq_merge_ok() 251 static inline void __elv_rqhash_del(struct request *rq) in __elv_rqhash_del() argument 253 hlist_del_init(&rq->hash); in __elv_rqhash_del() 256 static void elv_rqhash_del(struct request_queue *q, struct request *rq) in elv_rqhash_del() argument [all …]
|
D | blk.h | 22 void blk_rq_bio_prep(struct request_queue *q, struct request *rq, 24 int blk_rq_append_bio(struct request_queue *q, struct request *rq, 27 void blk_dequeue_request(struct request *rq); 29 bool __blk_end_bidi_request(struct request *rq, int error, 48 static inline int blk_mark_rq_complete(struct request *rq) in blk_mark_rq_complete() argument 50 return test_and_set_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags); in blk_mark_rq_complete() 53 static inline void blk_clear_rq_complete(struct request *rq) in blk_clear_rq_complete() argument 55 clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags); in blk_clear_rq_complete() 61 #define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash)) argument 63 void blk_insert_flush(struct request *rq); [all …]
|
D | scsi_ioctl.c | 223 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq, in blk_fill_sghdr_rq() argument 226 if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len)) in blk_fill_sghdr_rq() 228 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE)) in blk_fill_sghdr_rq() 234 rq->cmd_len = hdr->cmd_len; in blk_fill_sghdr_rq() 235 rq->cmd_type = REQ_TYPE_BLOCK_PC; in blk_fill_sghdr_rq() 237 rq->timeout = msecs_to_jiffies(hdr->timeout); in blk_fill_sghdr_rq() 238 if (!rq->timeout) in blk_fill_sghdr_rq() 239 rq->timeout = q->sg_timeout; in blk_fill_sghdr_rq() 240 if (!rq->timeout) in blk_fill_sghdr_rq() 241 rq->timeout = BLK_DEFAULT_SG_TIMEOUT; in blk_fill_sghdr_rq() [all …]
|
D | deadline-iosched.c | 57 deadline_rb_root(struct deadline_data *dd, struct request *rq) in deadline_rb_root() argument 59 return &dd->sort_list[rq_data_dir(rq)]; in deadline_rb_root() 66 deadline_latter_request(struct request *rq) in deadline_latter_request() argument 68 struct rb_node *node = rb_next(&rq->rb_node); in deadline_latter_request() 77 deadline_add_rq_rb(struct deadline_data *dd, struct request *rq) in deadline_add_rq_rb() argument 79 struct rb_root *root = deadline_rb_root(dd, rq); in deadline_add_rq_rb() 81 elv_rb_add(root, rq); in deadline_add_rq_rb() 85 deadline_del_rq_rb(struct deadline_data *dd, struct request *rq) in deadline_del_rq_rb() argument 87 const int data_dir = rq_data_dir(rq); in deadline_del_rq_rb() 89 if (dd->next_rq[data_dir] == rq) in deadline_del_rq_rb() [all …]
|
/linux-3.4.99/drivers/ide/ |
D | ide-io.c | 57 int ide_end_rq(ide_drive_t *drive, struct request *rq, int error, in ide_end_rq() argument 70 return blk_end_request(rq, error, nr_bytes); in ide_end_rq() 78 struct request *rq = cmd->rq; in ide_complete_cmd() local 105 if (rq && rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { in ide_complete_cmd() 106 struct ide_cmd *orig_cmd = rq->special; in ide_complete_cmd() 118 struct request *rq = hwif->rq; in ide_complete_rq() local 125 if (blk_noretry_request(rq) && error <= 0) in ide_complete_rq() 126 nr_bytes = blk_rq_sectors(rq) << 9; in ide_complete_rq() 128 rc = ide_end_rq(drive, rq, error, nr_bytes); in ide_complete_rq() 130 hwif->rq = NULL; in ide_complete_rq() [all …]
|
D | ide-cd.c | 96 static int cdrom_log_sense(ide_drive_t *drive, struct request *rq) in cdrom_log_sense() argument 101 if (!sense || !rq || (rq->cmd_flags & REQ_QUIET)) in cdrom_log_sense() 124 if (rq->cmd[0] == GPCMD_START_STOP_UNIT && sense->asc == 0x24) in cdrom_log_sense() 210 static void ide_cd_complete_failed_rq(ide_drive_t *drive, struct request *rq) in ide_cd_complete_failed_rq() argument 218 struct request *failed = (struct request *)rq->special; in ide_cd_complete_failed_rq() 219 void *sense = bio_data(rq->bio); in ide_cd_complete_failed_rq() 229 failed->sense_len = rq->sense_len; in ide_cd_complete_failed_rq() 247 static int ide_cd_breathe(ide_drive_t *drive, struct request *rq) in ide_cd_breathe() argument 252 if (!rq->errors) in ide_cd_breathe() 255 rq->errors = 1; in ide_cd_breathe() [all …]
|
D | ide-eh.c | 7 static ide_startstop_t ide_ata_error(ide_drive_t *drive, struct request *rq, in ide_ata_error() argument 15 rq->errors |= ERROR_RESET; in ide_ata_error() 28 rq->errors = ERROR_MAX; in ide_ata_error() 31 rq->errors |= ERROR_RECAL; in ide_ata_error() 35 if ((stat & ATA_DRQ) && rq_data_dir(rq) == READ && in ide_ata_error() 42 if (rq->errors >= ERROR_MAX || blk_noretry_request(rq)) { in ide_ata_error() 43 ide_kill_rq(drive, rq); in ide_ata_error() 48 rq->errors |= ERROR_RESET; in ide_ata_error() 50 if ((rq->errors & ERROR_RESET) == ERROR_RESET) { in ide_ata_error() 51 ++rq->errors; in ide_ata_error() [all …]
|
D | ide-pm.c | 10 struct request *rq; in generic_ide_suspend() local 21 rq = blk_get_request(drive->queue, READ, __GFP_WAIT); in generic_ide_suspend() 22 rq->cmd_type = REQ_TYPE_PM_SUSPEND; in generic_ide_suspend() 23 rq->special = &rqpm; in generic_ide_suspend() 29 ret = blk_execute_rq(drive->queue, NULL, rq, 0); in generic_ide_suspend() 30 blk_put_request(rq); in generic_ide_suspend() 46 struct request *rq; in generic_ide_resume() local 61 rq = blk_get_request(drive->queue, READ, __GFP_WAIT); in generic_ide_resume() 62 rq->cmd_type = REQ_TYPE_PM_RESUME; in generic_ide_resume() 63 rq->cmd_flags |= REQ_PREEMPT; in generic_ide_resume() [all …]
|
D | ide-floppy.c | 66 struct request *rq = pc->rq; in ide_floppy_callback() local 75 rq->cmd_type == REQ_TYPE_BLOCK_PC) in ide_floppy_callback() 79 u8 *buf = bio_data(rq->bio); in ide_floppy_callback() 100 if (rq->cmd_type == REQ_TYPE_SPECIAL) in ide_floppy_callback() 101 rq->errors = uptodate ? 0 : IDE_DRV_ERROR_GENERAL; in ide_floppy_callback() 136 unsigned int done = blk_rq_bytes(drive->hwif->rq); in ide_floppy_issue_pc() 191 struct ide_atapi_pc *pc, struct request *rq, in idefloppy_create_rw_cmd() argument 196 int blocks = blk_rq_sectors(rq) / floppy->bs_factor; in idefloppy_create_rw_cmd() 197 int cmd = rq_data_dir(rq); in idefloppy_create_rw_cmd() 206 memcpy(rq->cmd, pc->c, 12); in idefloppy_create_rw_cmd() [all …]
|
/linux-3.4.99/drivers/scsi/device_handler/ |
D | scsi_dh_alua.c | 104 struct request *rq; in get_alua_req() local 107 rq = blk_get_request(q, rw, GFP_NOIO); in get_alua_req() 109 if (!rq) { in get_alua_req() 115 if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_NOIO)) { in get_alua_req() 116 blk_put_request(rq); in get_alua_req() 122 rq->cmd_type = REQ_TYPE_BLOCK_PC; in get_alua_req() 123 rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | in get_alua_req() 125 rq->retries = ALUA_FAILOVER_RETRIES; in get_alua_req() 126 rq->timeout = ALUA_FAILOVER_TIMEOUT; in get_alua_req() 128 return rq; in get_alua_req() [all …]
|
/linux-3.4.99/drivers/s390/char/ |
D | raw3270.c | 118 struct raw3270_request *rq; in raw3270_request_alloc() local 121 rq = kzalloc(sizeof(struct raw3270_request), GFP_KERNEL | GFP_DMA); in raw3270_request_alloc() 122 if (!rq) in raw3270_request_alloc() 127 rq->buffer = kmalloc(size, GFP_KERNEL | GFP_DMA); in raw3270_request_alloc() 128 if (!rq->buffer) { in raw3270_request_alloc() 129 kfree(rq); in raw3270_request_alloc() 133 rq->size = size; in raw3270_request_alloc() 134 INIT_LIST_HEAD(&rq->list); in raw3270_request_alloc() 139 rq->ccw.cda = __pa(rq->buffer); in raw3270_request_alloc() 140 rq->ccw.flags = CCW_FLAG_SLI; in raw3270_request_alloc() [all …]
|
/linux-3.4.99/drivers/infiniband/hw/ipath/ |
D | ipath_srq.c | 61 if ((unsigned) wr->num_sge > srq->rq.max_sge) { in ipath_post_srq_receive() 67 spin_lock_irqsave(&srq->rq.lock, flags); in ipath_post_srq_receive() 68 wq = srq->rq.wq; in ipath_post_srq_receive() 70 if (next >= srq->rq.size) in ipath_post_srq_receive() 73 spin_unlock_irqrestore(&srq->rq.lock, flags); in ipath_post_srq_receive() 79 wqe = get_rwqe_ptr(&srq->rq, wq->head); in ipath_post_srq_receive() 87 spin_unlock_irqrestore(&srq->rq.lock, flags); in ipath_post_srq_receive() 135 srq->rq.size = srq_init_attr->attr.max_wr + 1; in ipath_create_srq() 136 srq->rq.max_sge = srq_init_attr->attr.max_sge; in ipath_create_srq() 137 sz = sizeof(struct ib_sge) * srq->rq.max_sge + in ipath_create_srq() [all …]
|