Home
last modified time | relevance | path

Searched refs:request (Results 1 – 25 of 280) sorted by relevance

12345678910>>...12

/linux-2.4.37.9/drivers/s390/
Dccwcache.c61 enchain ( ccw_req_t *request ) in enchain() argument
66 if ( request == NULL ) in enchain()
70 ccwreq_actual = request; in enchain()
71 request->int_prev = ccwreq_actual; in enchain()
72 request->int_next = ccwreq_actual; in enchain()
74 request->int_next = ccwreq_actual; in enchain()
75 request->int_prev = ccwreq_actual->int_prev; in enchain()
76 request->int_prev->int_next = request; in enchain()
77 request->int_next->int_prev = request; in enchain()
87 dechain ( ccw_req_t *request ) in dechain() argument
[all …]
/linux-2.4.37.9/drivers/char/drm-4.0/
Di810_bufs.c43 drm_buf_desc_t request; in i810_addbufs_agp() local
59 if (copy_from_user(&request, in i810_addbufs_agp()
61 sizeof(request))) in i810_addbufs_agp()
64 count = request.count; in i810_addbufs_agp()
65 order = drm_order(request.size); in i810_addbufs_agp()
67 agp_offset = request.agp_start; in i810_addbufs_agp()
68 alignment = (request.flags & _DRM_PAGE_ALIGN) ? PAGE_ALIGN(size) :size; in i810_addbufs_agp()
162 request.count = entry->buf_count; in i810_addbufs_agp()
163 request.size = size; in i810_addbufs_agp()
166 &request, in i810_addbufs_agp()
[all …]
Dmga_bufs.c44 drm_buf_desc_t request; in mga_addbufs_agp() local
60 if (copy_from_user(&request, in mga_addbufs_agp()
62 sizeof(request))) in mga_addbufs_agp()
65 count = request.count; in mga_addbufs_agp()
66 order = drm_order(request.size); in mga_addbufs_agp()
68 agp_offset = request.agp_start; in mga_addbufs_agp()
69 alignment = (request.flags & _DRM_PAGE_ALIGN) ? PAGE_ALIGN(size):size; in mga_addbufs_agp()
179 request.count = entry->buf_count; in mga_addbufs_agp()
180 request.size = size; in mga_addbufs_agp()
183 &request, in mga_addbufs_agp()
[all …]
Dbufs.c153 drm_buf_desc_t request; in drm_addbufs() local
170 if (copy_from_user(&request, in drm_addbufs()
172 sizeof(request))) in drm_addbufs()
175 count = request.count; in drm_addbufs()
176 order = drm_order(request.size); in drm_addbufs()
180 request.count, request.size, size, order, dev->queue_count); in drm_addbufs()
185 alignment = (request.flags & _DRM_PAGE_ALIGN) ? PAGE_ALIGN(size):size; in drm_addbufs()
302 request.count = entry->buf_count; in drm_addbufs()
303 request.size = size; in drm_addbufs()
306 &request, in drm_addbufs()
[all …]
Dr128_bufs.c47 drm_buf_desc_t request; in r128_addbufs_agp() local
63 if (copy_from_user(&request, in r128_addbufs_agp()
65 sizeof(request))) in r128_addbufs_agp()
68 count = request.count; in r128_addbufs_agp()
69 order = drm_order(request.size); in r128_addbufs_agp()
72 alignment = (request.flags & _DRM_PAGE_ALIGN) ? PAGE_ALIGN(size):size; in r128_addbufs_agp()
77 agp_offset = dev->agp->base + request.agp_start; in r128_addbufs_agp()
183 request.count = entry->buf_count; in r128_addbufs_agp()
184 request.size = size; in r128_addbufs_agp()
187 &request, in r128_addbufs_agp()
[all …]
Dradeon_bufs.c46 drm_buf_desc_t request; in radeon_addbufs_agp() local
62 if (copy_from_user(&request, (drm_buf_desc_t *)arg, sizeof(request))) in radeon_addbufs_agp()
65 count = request.count; in radeon_addbufs_agp()
66 order = drm_order(request.size); in radeon_addbufs_agp()
69 alignment = (request.flags & _DRM_PAGE_ALIGN) ? PAGE_ALIGN(size):size; in radeon_addbufs_agp()
74 agp_offset = dev->agp->base + request.agp_start; in radeon_addbufs_agp()
184 request.count = entry->buf_count; in radeon_addbufs_agp()
185 request.size = size; in radeon_addbufs_agp()
187 if (copy_to_user((drm_buf_desc_t *)arg, &request, sizeof(request))) in radeon_addbufs_agp()
203 drm_buf_desc_t request; in radeon_addbufs() local
[all …]
Dagpsupport.c124 drm_agp_buffer_t request; in drm_agp_alloc() local
130 if (copy_from_user(&request, (drm_agp_buffer_t *)arg, sizeof(request))) in drm_agp_alloc()
137 pages = (request.size + PAGE_SIZE - 1) / PAGE_SIZE; in drm_agp_alloc()
138 type = (u32) request.type; in drm_agp_alloc()
154 request.handle = entry->handle; in drm_agp_alloc()
155 request.physical = memory->physical; in drm_agp_alloc()
157 if (copy_to_user((drm_agp_buffer_t *)arg, &request, sizeof(request))) { in drm_agp_alloc()
183 drm_agp_binding_t request; in drm_agp_unbind() local
187 if (copy_from_user(&request, (drm_agp_binding_t *)arg, sizeof(request))) in drm_agp_unbind()
189 if (!(entry = drm_agp_lookup_entry(dev, request.handle))) in drm_agp_unbind()
[all …]
/linux-2.4.37.9/include/linux/
Delevator.h4 typedef void (elevator_fn) (struct request *, elevator_t *,
8 typedef int (elevator_merge_fn) (request_queue_t *, struct request **, struct list_head *,
11 typedef void (elevator_merge_cleanup_fn) (request_queue_t *, struct request *, int);
13 typedef void (elevator_merge_req_fn) (struct request *, struct request *);
26 int elevator_noop_merge(request_queue_t *, struct request **, struct list_head *, struct buffer_hea…
27 void elevator_noop_merge_cleanup(request_queue_t *, struct request *, int);
28 void elevator_noop_merge_req(struct request *, struct request *);
30 int elevator_linus_merge(request_queue_t *, struct request **, struct list_head *, struct buffer_he…
31 void elevator_linus_merge_cleanup(request_queue_t *, struct request *, int);
32 void elevator_linus_merge_req(struct request *, struct request *);
/linux-2.4.37.9/drivers/char/drm/
Ddrm_bufs.h202 drm_map_t request; in DRM() local
205 if (copy_from_user(&request, (drm_map_t *)arg, in DRM()
206 sizeof(request))) { in DRM()
216 r_list->map->handle == request.handle && in DRM()
312 drm_buf_desc_t request; in DRM() local
329 if ( copy_from_user( &request, (drm_buf_desc_t *)arg, in DRM()
330 sizeof(request) ) ) in DRM()
333 count = request.count; in DRM()
334 order = DRM(order)( request.size ); in DRM()
337 alignment = (request.flags & _DRM_PAGE_ALIGN) in DRM()
[all …]
Ddrm_proc.h39 int request, int *eof, void *data);
41 int request, int *eof, void *data);
43 int request, int *eof, void *data);
45 int request, int *eof, void *data);
47 int request, int *eof, void *data);
50 int request, int *eof, void *data);
54 int request, int *eof, void *data);
135 static int DRM(name_info)(char *buf, char **start, off_t offset, int request, in DRM()
156 if (len > request + offset) return request; in DRM()
161 static int DRM(_vm_info)(char *buf, char **start, off_t offset, int request, in DRM()
[all …]
Ddrm_agpsupport.h132 drm_agp_buffer_t request; in DRM() local
139 if (copy_from_user(&request, (drm_agp_buffer_t *)arg, sizeof(request))) in DRM()
146 pages = (request.size + PAGE_SIZE - 1) / PAGE_SIZE; in DRM()
147 type = (u32) request.type; in DRM()
163 request.handle = entry->handle; in DRM()
164 request.physical = memory->physical; in DRM()
166 if (copy_to_user((drm_agp_buffer_t *)arg, &request, sizeof(request))) { in DRM()
192 drm_agp_binding_t request; in DRM() local
196 if (copy_from_user(&request, (drm_agp_binding_t *)arg, sizeof(request))) in DRM()
198 if (!(entry = DRM(agp_lookup_entry)(dev, request.handle))) in DRM()
[all …]
Ddrm_scatter.h65 drm_scatter_gather_t request; in DRM() local
74 if ( copy_from_user( &request, in DRM()
76 sizeof(request) ) ) in DRM()
85 pages = (request.size + PAGE_SIZE - 1) / PAGE_SIZE; in DRM()
86 DRM_DEBUG( "sg size=%ld pages=%ld\n", request.size, pages ); in DRM()
142 request.handle = entry->handle; in DRM()
145 &request, in DRM()
146 sizeof(request) ) ) { in DRM()
205 drm_scatter_gather_t request; in DRM() local
208 if ( copy_from_user( &request, in DRM()
[all …]
/linux-2.4.37.9/drivers/fc4/
Dsocal.c442 socal_req *request; in socal_hw_enque() local
470 request = sw_cq->pool + sw_cq->in; in socal_hw_enque()
471 fch = &request->fchdr; in socal_hw_enque()
475 request->shdr.token = TOKEN(TYPE_SCSI_FCP, port->mask, fcmd->token); in socal_hw_enque()
476 request->data[0].base = fc->dma_scsi_cmd + fcmd->token * sizeof(fcp_cmd); in socal_hw_enque()
477 request->data[0].count = sizeof(fcp_cmd); in socal_hw_enque()
478 request->data[1].base = fc->dma_scsi_rsp + fcmd->token * fc->rsp_size; in socal_hw_enque()
479 request->data[1].count = fc->rsp_size; in socal_hw_enque()
481 request->shdr.segcnt = 3; in socal_hw_enque()
483 request->shdr.bytecnt = i; in socal_hw_enque()
[all …]
Dsoc.c362 soc_req *request; in soc_hw_enque() local
387 request = sw_cq->pool + sw_cq->in; in soc_hw_enque()
388 fch = &request->fchdr; in soc_hw_enque()
392 request->shdr.token = TOKEN(TYPE_SCSI_FCP, port->mask, fcmd->token); in soc_hw_enque()
393 request->data[0].base = fc->dma_scsi_cmd + fcmd->token * sizeof(fcp_cmd); in soc_hw_enque()
394 request->data[0].count = sizeof(fcp_cmd); in soc_hw_enque()
395 request->data[1].base = fc->dma_scsi_rsp + fcmd->token * fc->rsp_size; in soc_hw_enque()
396 request->data[1].count = fc->rsp_size; in soc_hw_enque()
398 request->shdr.segcnt = 3; in soc_hw_enque()
400 request->shdr.bytecnt = i; in soc_hw_enque()
[all …]
/linux-2.4.37.9/arch/sparc64/kernel/
Dptrace.c105 int request = regs->u_regs[UREG_I0]; in do_ptrace() local
122 if ((request >= 0) && (request <= 24)) in do_ptrace()
123 s = pt_rq [request]; in do_ptrace()
127 if (request == PTRACE_POKEDATA && data == 0x91d02001){ in do_ptrace()
132 s, request, pid, addr, data, addr2); in do_ptrace()
135 if (request == PTRACE_TRACEME) { in do_ptrace()
164 if ((current->personality == PER_SUNOS && request == PTRACE_SUNATTACH) in do_ptrace()
165 || (current->personality != PER_SUNOS && request == PTRACE_ATTACH)) { in do_ptrace()
178 if (request != PTRACE_KILL) { in do_ptrace()
189 ((request == PTRACE_READDATA64) || in do_ptrace()
[all …]
/linux-2.4.37.9/drivers/block/
Delevator.c36 inline int bh_rq_in_between(struct buffer_head *bh, struct request *rq, in bh_rq_in_between()
40 struct request *next_rq; in bh_rq_in_between()
78 int elevator_linus_merge(request_queue_t *q, struct request **req, in elevator_linus_merge()
85 struct request *__rq; in elevator_linus_merge()
136 void elevator_linus_merge_req(struct request *req, struct request *next) in elevator_linus_merge_req()
145 int elevator_noop_merge(request_queue_t *q, struct request **req, in elevator_noop_merge()
158 struct request *__rq = blkdev_entry_to_request(entry); in elevator_noop_merge()
181 void elevator_noop_merge_req(struct request *req, struct request *next) {} in elevator_noop_merge_req()
Dnbd.c78 nbd_end_request(struct request *req) in nbd_end_request()
190 void nbd_send_req(struct nbd_device *lo, struct request *req) in nbd_send_req()
193 struct nbd_request request; in nbd_send_req() local
198 request.magic = htonl(NBD_REQUEST_MAGIC); in nbd_send_req()
199 request.type = htonl(req->cmd); in nbd_send_req()
200 request.from = cpu_to_be64( (u64) req->sector << 9); in nbd_send_req()
201 request.len = htonl(size); in nbd_send_req()
202 memcpy(request.handle, &req, sizeof(req)); in nbd_send_req()
210 result = nbd_xmit(1, sock, (char *) &request, sizeof(request), req->cmd == WRITE ? MSG_MORE : 0); in nbd_send_req()
232 static struct request *nbd_find_request(struct nbd_device *lo, char *handle) in nbd_find_request()
[all …]
Dll_rw_blk.c153 struct request *rq; in __blk_cleanup_queue()
157 rq = list_entry(head->next, struct request, queue); in __blk_cleanup_queue()
312 static inline int ll_new_segment(request_queue_t *q, struct request *req, int max_segments) in ll_new_segment()
321 static int ll_back_merge_fn(request_queue_t *q, struct request *req, in ll_back_merge_fn()
330 static int ll_front_merge_fn(request_queue_t *q, struct request *req, in ll_front_merge_fn()
339 static int ll_merge_requests_fn(request_queue_t *q, struct request *req, in ll_merge_requests_fn()
340 struct request *next, int max_segments) in ll_merge_requests_fn()
419 struct request *rq; in blk_grow_request_list()
544 #define blkdev_free_rq(list) list_entry((list)->next, struct request, queue);
549 static struct request *get_request(request_queue_t *q, int rw) in get_request()
[all …]
/linux-2.4.37.9/fs/nfs/
Ddirect.c182 int len, request; in nfs_direct_read() local
185 request = count; in nfs_direct_read()
187 request = rsize; in nfs_direct_read()
188 args.count = request; in nfs_direct_read()
203 if (request < len) in nfs_direct_read()
205 request -= len; in nfs_direct_read()
208 } while (request != 0); in nfs_direct_read()
261 int len, request; in nfs_direct_write() local
264 request = count; in nfs_direct_write()
266 request = wsize; in nfs_direct_write()
[all …]
/linux-2.4.37.9/Documentation/
Dpm.txt63 * cback - request handler callback (suspend, resume, ...)
87 * cback - previously registered request callback
135 * to put a device to sleep. If a new device request arrives
137 * callback, the driver should fail the pm_callback request.
142 * Power management request callback
146 * rqst - request type
147 * data - data, if any, associated with the request
149 * Returns: 0 if the request is successful
150 * EINVAL if the request is not supported
151 * EBUSY if the device is now busy and can not handle the request
[all …]
/linux-2.4.37.9/drivers/scsi/
Dscsi_lib.c68 static void __scsi_insert_special(request_queue_t *q, struct request *rq, in __scsi_insert_special()
123 __scsi_insert_special(q, &SCpnt->request, SCpnt, at_head); in scsi_insert_special_cmd()
264 SCpnt->request.special = (void *) SCpnt; in scsi_queue_next_request()
265 list_add(&SCpnt->request.queue, &q->queue_head); in scsi_queue_next_request()
364 struct request *req; in __scsi_end_request()
371 req = &SCpnt->request; in __scsi_end_request()
517 if (SCpnt->request_buffer != SCpnt->request.buffer) { in scsi_release_buffers()
555 struct request *req = &SCpnt->request; in scsi_io_completion()
623 SCpnt->request.nr_sectors, in scsi_io_completion()
726 kdevname(SCpnt->request.rq_dev)); in scsi_io_completion()
[all …]
Dqlogicfas.c192 static int ql_pdma(int phase, char *request, int reqlen) in ql_pdma() argument
201 insl( qbase + 4, request, 32 ); in ql_pdma()
203 request += 128; in ql_pdma()
207 insl( qbase + 4, request, 21 ); in ql_pdma()
209 request += 84; in ql_pdma()
212 insl( qbase + 4, request, 11 ); in ql_pdma()
214 request += 44; in ql_pdma()
224 *request++ = inb(qbase + 4); in ql_pdma()
236 outsl(qbase + 4, request, 32 ); in ql_pdma()
238 request += 128; in ql_pdma()
[all …]
/linux-2.4.37.9/fs/ncpfs/
Dsock.c98 struct ncp_request_header request = in do_ncp_rpc_call() local
126 request.type, in do_ncp_rpc_call()
127 (request.conn_high << 8) + request.conn_low, in do_ncp_rpc_call()
128 request.sequence); in do_ncp_rpc_call()
130 request.function); in do_ncp_rpc_call()
228 && ((request.type == NCP_ALLOC_SLOT_REQUEST) in do_ncp_rpc_call()
229 || ((reply.sequence == request.sequence) in do_ncp_rpc_call()
230 && (reply.conn_low == request.conn_low) in do_ncp_rpc_call()
232 && (reply.conn_high == request.conn_high)))) { in do_ncp_rpc_call()
347 struct ncp_request_header request = in do_ncp_tcp_rpc_call() local
[all …]
/linux-2.4.37.9/drivers/s390/char/
Dtapedefs.h41 static inline struct request *
47 tape_dequeue_request( request_queue_t * q, struct request *req ) in tape_dequeue_request()
53 typedef struct request *request_queue_t;
65 static inline struct request *
71 tape_dequeue_request( request_queue_t * q, struct request *req ) in tape_dequeue_request()
/linux-2.4.37.9/arch/mips64/kernel/
Dptrace.c48 asmlinkage int sys32_ptrace(int request, int pid, int addr, int data) in sys32_ptrace() argument
55 if (request == PTRACE_TRACEME) { in sys32_ptrace()
77 if (request == PTRACE_ATTACH) { in sys32_ptrace()
82 ret = ptrace_check_attach(child, request == PTRACE_KILL); in sys32_ptrace()
86 switch (request) { in sys32_ptrace()
239 if (request == PTRACE_SYSCALL) in sys32_ptrace()
287 asmlinkage int sys_ptrace(long request, long pid, long addr, long data) in sys_ptrace() argument
295 (int) request, (int) pid, (unsigned long) addr, in sys_ptrace()
299 if (request == PTRACE_TRACEME) { in sys_ptrace()
321 if (request == PTRACE_ATTACH) { in sys_ptrace()
[all …]

12345678910>>...12