/linux-6.1.9/fs/nfs/ |
D | direct.c | 71 static void nfs_direct_write_complete(struct nfs_direct_req *dreq); 74 static inline void get_dreq(struct nfs_direct_req *dreq) in get_dreq() argument 76 atomic_inc(&dreq->io_count); in get_dreq() 79 static inline int put_dreq(struct nfs_direct_req *dreq) in put_dreq() argument 81 return atomic_dec_and_test(&dreq->io_count); in put_dreq() 85 nfs_direct_handle_truncated(struct nfs_direct_req *dreq, in nfs_direct_handle_truncated() argument 92 if (dreq->max_count >= dreq_len) { in nfs_direct_handle_truncated() 93 dreq->max_count = dreq_len; in nfs_direct_handle_truncated() 94 if (dreq->count > dreq_len) in nfs_direct_handle_truncated() 95 dreq->count = dreq_len; in nfs_direct_handle_truncated() [all …]
|
D | cache_lib.c | 68 void nfs_cache_defer_req_put(struct nfs_cache_defer_req *dreq) in nfs_cache_defer_req_put() argument 70 if (refcount_dec_and_test(&dreq->count)) in nfs_cache_defer_req_put() 71 kfree(dreq); in nfs_cache_defer_req_put() 76 struct nfs_cache_defer_req *dreq; in nfs_dns_cache_revisit() local 78 dreq = container_of(d, struct nfs_cache_defer_req, deferred_req); in nfs_dns_cache_revisit() 80 complete(&dreq->completion); in nfs_dns_cache_revisit() 81 nfs_cache_defer_req_put(dreq); in nfs_dns_cache_revisit() 86 struct nfs_cache_defer_req *dreq; in nfs_dns_cache_defer() local 88 dreq = container_of(req, struct nfs_cache_defer_req, req); in nfs_dns_cache_defer() 89 dreq->deferred_req.revisit = nfs_dns_cache_revisit; in nfs_dns_cache_defer() [all …]
|
D | dns_resolve.c | 285 struct nfs_cache_defer_req *dreq) in do_cache_lookup() argument 291 ret = cache_check(cd, &(*item)->h, &dreq->req); in do_cache_lookup() 327 struct nfs_cache_defer_req *dreq; in do_cache_lookup_wait() local 330 dreq = nfs_cache_defer_req_alloc(); in do_cache_lookup_wait() 331 if (!dreq) in do_cache_lookup_wait() 333 ret = do_cache_lookup(cd, key, item, dreq); in do_cache_lookup_wait() 335 ret = nfs_cache_wait_for_upcall(dreq); in do_cache_lookup_wait() 339 nfs_cache_defer_req_put(dreq); in do_cache_lookup_wait()
|
D | cache_lib.h | 24 extern void nfs_cache_defer_req_put(struct nfs_cache_defer_req *dreq); 25 extern int nfs_cache_wait_for_upcall(struct nfs_cache_defer_req *dreq);
|
D | nfstrace.h | 1592 const struct nfs_direct_req *dreq 1595 TP_ARGS(dreq), 1609 const struct inode *inode = dreq->inode; 1616 __entry->offset = dreq->io_start; 1617 __entry->count = dreq->count; 1618 __entry->bytes_left = dreq->bytes_left; 1619 __entry->error = dreq->error; 1620 __entry->flags = dreq->flags; 1638 const struct nfs_direct_req *dreq \ 1640 TP_ARGS(dreq))
|
D | write.c | 923 cinfo->dreq = NULL; in nfs_init_cinfo_from_inode() 929 struct nfs_direct_req *dreq) in nfs_init_cinfo() argument 931 if (dreq) in nfs_init_cinfo() 932 nfs_init_cinfo_from_dreq(cinfo, dreq); in nfs_init_cinfo() 1051 if ((ret == max) && !cinfo->dreq) in nfs_scan_commit_list() 1762 data->dreq = cinfo->dreq; in nfs_init_commit() 1787 if (!cinfo->dreq) in nfs_retry_commit() 1894 nfs_init_cinfo(&cinfo, data->inode, data->dreq); in nfs_commit_release_pages()
|
D | internal.h | 542 struct nfs_direct_req *dreq); 641 struct nfs_direct_req *dreq); 642 extern ssize_t nfs_dreq_bytes_left(struct nfs_direct_req *dreq); 766 if (!cinfo->dreq) { in nfs_mark_page_unstable()
|
D | pagelist.c | 70 hdr->dreq = desc->pg_dreq; in nfs_pgheader_init() 1362 desc->pg_dreq = hdr->dreq; in nfs_pageio_resend()
|
/linux-6.1.9/net/dccp/ |
D | minisocks.c | 91 struct dccp_request_sock *dreq = dccp_rsk(req); in dccp_create_openreq_child() local 100 newdp->dccps_service = dreq->dreq_service; in dccp_create_openreq_child() 101 newdp->dccps_timestamp_echo = dreq->dreq_timestamp_echo; in dccp_create_openreq_child() 102 newdp->dccps_timestamp_time = dreq->dreq_timestamp_time; in dccp_create_openreq_child() 117 newdp->dccps_iss = dreq->dreq_iss; in dccp_create_openreq_child() 118 newdp->dccps_gss = dreq->dreq_gss; in dccp_create_openreq_child() 120 newdp->dccps_isr = dreq->dreq_isr; in dccp_create_openreq_child() 121 newdp->dccps_gsr = dreq->dreq_gsr; in dccp_create_openreq_child() 126 if (dccp_feat_activate_values(newsk, &dreq->dreq_featneg)) { in dccp_create_openreq_child() 147 struct dccp_request_sock *dreq = dccp_rsk(req); in dccp_check_req() local [all …]
|
D | options.c | 48 int dccp_parse_options(struct sock *sk, struct dccp_request_sock *dreq, in dccp_parse_options() argument 99 if (dreq != NULL && (opt >= DCCPO_MIN_RX_CCID_SPECIFIC || in dccp_parse_options() 125 rc = dccp_feat_parse_options(sk, dreq, mandatory, opt, in dccp_parse_options() 144 if (dreq != NULL) { in dccp_parse_options() 145 dreq->dreq_timestamp_echo = ntohl(opt_val); in dccp_parse_options() 146 dreq->dreq_timestamp_time = dccp_timestamp(); in dccp_parse_options() 353 struct dccp_request_sock *dreq, in dccp_insert_option_timestamp_echo() argument 360 if (dreq != NULL) { in dccp_insert_option_timestamp_echo() 361 elapsed_time = dccp_timestamp() - dreq->dreq_timestamp_time; in dccp_insert_option_timestamp_echo() 362 tstamp_echo = htonl(dreq->dreq_timestamp_echo); in dccp_insert_option_timestamp_echo() [all …]
|
D | output.c | 403 struct dccp_request_sock *dreq; in dccp_make_response() local 422 dreq = dccp_rsk(req); in dccp_make_response() 424 dccp_inc_seqno(&dreq->dreq_gss); in dccp_make_response() 426 DCCP_SKB_CB(skb)->dccpd_seq = dreq->dreq_gss; in dccp_make_response() 429 if (dccp_feat_server_ccid_dependencies(dreq)) in dccp_make_response() 432 if (dccp_insert_options_rsk(dreq, skb)) in dccp_make_response() 444 dccp_hdr_set_seq(dh, dreq->dreq_gss); in dccp_make_response() 445 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dreq->dreq_gsr); in dccp_make_response() 446 dccp_hdr_response(skb)->dccph_resp_service = dreq->dreq_service; in dccp_make_response()
|
D | ipv4.c | 596 struct dccp_request_sock *dreq; in dccp_v4_conn_request() local 627 dreq = dccp_rsk(req); in dccp_v4_conn_request() 628 if (dccp_parse_options(sk, dreq, skb)) in dccp_v4_conn_request() 648 dreq->dreq_isr = dcb->dccpd_seq; in dccp_v4_conn_request() 649 dreq->dreq_gsr = dreq->dreq_isr; in dccp_v4_conn_request() 650 dreq->dreq_iss = dccp_v4_init_sequence(skb); in dccp_v4_conn_request() 651 dreq->dreq_gss = dreq->dreq_iss; in dccp_v4_conn_request() 652 dreq->dreq_service = service; in dccp_v4_conn_request()
|
D | ipv6.c | 320 struct dccp_request_sock *dreq; in dccp_v6_conn_request() local 358 dreq = dccp_rsk(req); in dccp_v6_conn_request() 359 if (dccp_parse_options(sk, dreq, skb)) in dccp_v6_conn_request() 391 dreq->dreq_isr = dcb->dccpd_seq; in dccp_v6_conn_request() 392 dreq->dreq_gsr = dreq->dreq_isr; in dccp_v6_conn_request() 393 dreq->dreq_iss = dccp_v6_init_sequence(skb); in dccp_v6_conn_request() 394 dreq->dreq_gss = dreq->dreq_iss; in dccp_v6_conn_request() 395 dreq->dreq_service = service; in dccp_v6_conn_request()
|
D | feat.c | 633 int dccp_feat_insert_opts(struct dccp_sock *dp, struct dccp_request_sock *dreq, in dccp_feat_insert_opts() argument 636 struct list_head *fn = dreq ? &dreq->dreq_featneg : &dp->dccps_featneg; in dccp_feat_insert_opts() 1004 int dccp_feat_server_ccid_dependencies(struct dccp_request_sock *dreq) in dccp_feat_server_ccid_dependencies() argument 1006 struct list_head *fn = &dreq->dreq_featneg; in dccp_feat_server_ccid_dependencies() 1404 int dccp_feat_parse_options(struct sock *sk, struct dccp_request_sock *dreq, in dccp_feat_parse_options() argument 1408 struct list_head *fn = dreq ? &dreq->dreq_featneg : &dp->dccps_featneg; in dccp_feat_parse_options()
|
D | dccp.h | 455 int dccp_feat_server_ccid_dependencies(struct dccp_request_sock *dreq);
|
/linux-6.1.9/drivers/crypto/marvell/cesa/ |
D | tdma.c | 37 void mv_cesa_dma_step(struct mv_cesa_req *dreq) in mv_cesa_dma_step() argument 39 struct mv_cesa_engine *engine = dreq->engine; in mv_cesa_dma_step() 51 writel_relaxed(dreq->chain.first->cur_dma, in mv_cesa_dma_step() 58 void mv_cesa_dma_cleanup(struct mv_cesa_req *dreq) in mv_cesa_dma_cleanup() argument 62 for (tdma = dreq->chain.first; tdma;) { in mv_cesa_dma_cleanup() 75 dreq->chain.first = NULL; in mv_cesa_dma_cleanup() 76 dreq->chain.last = NULL; in mv_cesa_dma_cleanup() 79 void mv_cesa_dma_prepare(struct mv_cesa_req *dreq, in mv_cesa_dma_prepare() argument 84 for (tdma = dreq->chain.first; tdma; tdma = tdma->next) { in mv_cesa_dma_prepare() 97 struct mv_cesa_req *dreq) in mv_cesa_tdma_chain() argument [all …]
|
D | cesa.h | 826 void mv_cesa_dma_step(struct mv_cesa_req *dreq); 828 static inline int mv_cesa_dma_process(struct mv_cesa_req *dreq, in mv_cesa_dma_process() argument 840 void mv_cesa_dma_prepare(struct mv_cesa_req *dreq, 842 void mv_cesa_dma_cleanup(struct mv_cesa_req *dreq); 844 struct mv_cesa_req *dreq);
|
/linux-6.1.9/net/sunrpc/ |
D | cache.c | 585 static void __unhash_deferred_req(struct cache_deferred_req *dreq) in __unhash_deferred_req() argument 587 hlist_del_init(&dreq->hash); in __unhash_deferred_req() 588 if (!list_empty(&dreq->recent)) { in __unhash_deferred_req() 589 list_del_init(&dreq->recent); in __unhash_deferred_req() 594 static void __hash_deferred_req(struct cache_deferred_req *dreq, struct cache_head *item) in __hash_deferred_req() argument 598 INIT_LIST_HEAD(&dreq->recent); in __hash_deferred_req() 599 hlist_add_head(&dreq->hash, &cache_defer_hash[hash]); in __hash_deferred_req() 602 static void setup_deferral(struct cache_deferred_req *dreq, in setup_deferral() argument 607 dreq->item = item; in setup_deferral() 611 __hash_deferred_req(dreq, item); in setup_deferral() [all …]
|
/linux-6.1.9/drivers/s390/block/ |
D | dasd_diag.c | 167 struct dasd_diag_req *dreq; in dasd_start_diag() local 178 dreq = cqr->data; in dasd_start_diag() 183 private->iob.block_count = dreq->block_count; in dasd_start_diag() 185 private->iob.bio_list = dreq->bio; in dasd_start_diag() 512 struct dasd_diag_req *dreq; in dasd_diag_build_cp() local 545 cqr = dasd_smalloc_request(DASD_DIAG_MAGIC, 0, struct_size(dreq, bio, count), in dasd_diag_build_cp() 550 dreq = (struct dasd_diag_req *) cqr->data; in dasd_diag_build_cp() 551 dreq->block_count = count; in dasd_diag_build_cp() 552 dbio = dreq->bio; in dasd_diag_build_cp()
|
/linux-6.1.9/drivers/dma/ |
D | bcm2835-dma.c | 72 unsigned int dreq; member 659 if (c->dreq != 0) in bcm2835_dma_prep_slave_sg() 660 info |= BCM2835_DMA_PER_MAP(c->dreq); in bcm2835_dma_prep_slave_sg() 733 if (c->dreq != 0) in bcm2835_dma_prep_dma_cyclic() 734 info |= BCM2835_DMA_PER_MAP(c->dreq); in bcm2835_dma_prep_dma_cyclic() 873 to_bcm2835_dma_chan(chan)->dreq = spec->args[0]; in bcm2835_dma_xlate()
|
/linux-6.1.9/fs/nilfs2/ |
D | btree.c | 1735 union nilfs_bmap_ptr_req *dreq, in nilfs_btree_prepare_convert_and_insert() argument 1749 dreq->bpr_ptr = nilfs_btree_find_target_v(btree, NULL, key); in nilfs_btree_prepare_convert_and_insert() 1757 ret = nilfs_bmap_prepare_alloc_ptr(btree, dreq, dat); in nilfs_btree_prepare_convert_and_insert() 1764 nreq->bpr_ptr = dreq->bpr_ptr + 1; in nilfs_btree_prepare_convert_and_insert() 1784 nilfs_bmap_abort_alloc_ptr(btree, dreq, dat); in nilfs_btree_prepare_convert_and_insert() 1795 union nilfs_bmap_ptr_req *dreq, in nilfs_btree_commit_convert_and_insert() argument 1815 nilfs_bmap_commit_alloc_ptr(btree, dreq, dat); in nilfs_btree_commit_convert_and_insert() 1822 nilfs_btree_node_insert(node, n, key, dreq->bpr_ptr, ncblk); in nilfs_btree_commit_convert_and_insert() 1837 nilfs_bmap_commit_alloc_ptr(btree, dreq, dat); in nilfs_btree_commit_convert_and_insert() 1844 nilfs_btree_node_insert(node, n, key, dreq->bpr_ptr, in nilfs_btree_commit_convert_and_insert() [all …]
|
/linux-6.1.9/Documentation/devicetree/bindings/dma/ |
D | st_fdma.txt | 51 -bit 2-0: Holdoff value, dreq will be masked for
|
/linux-6.1.9/include/linux/ |
D | dccp.h | 180 extern int dccp_parse_options(struct sock *sk, struct dccp_request_sock *dreq,
|
D | nfs_xdr.h | 1621 struct nfs_direct_req *dreq; member 1662 struct nfs_direct_req *dreq; /* O_DIRECT request */ member 1674 struct nfs_direct_req *dreq; /* O_DIRECT request */ member
|
/linux-6.1.9/drivers/infiniband/core/ |
D | cm_trace.h | 183 DEFINE_CM_SEND_EVENT(dreq);
|