/linux-5.19.10/net/sunrpc/ |
D | xdr.c | 184 unsigned int buflen = head->iov_len; in xdr_inline_pages() 186 head->iov_len = offset; in xdr_inline_pages() 193 tail->iov_len = buflen - offset; in xdr_inline_pages() 428 if (base >= iov->iov_len) in xdr_buf_iov_zero() 430 if (len > iov->iov_len - base) in xdr_buf_iov_zero() 431 len = iov->iov_len - base; in xdr_buf_iov_zero() 488 if (buflen <= buf->head->iov_len) in xdr_buf_pages_fill_sparse() 490 pagelen = buflen - buf->head->iov_len; in xdr_buf_pages_fill_sparse() 513 unsigned int sum = head->iov_len + buf->page_len + tail->iov_len; in xdr_buf_try_expand() 530 tail->iov_len += free_space; in xdr_buf_try_expand() [all …]
|
D | socklib.c | 97 len = xdr->head[0].iov_len; in xdr_partial_copy_from_skb() 155 len = xdr->tail[0].iov_len; in xdr_partial_copy_from_skb() 217 iov_iter_kvec(&msg->msg_iter, WRITE, vec, 1, vec->iov_len); in xprt_send_kvec() 241 .iov_len = sizeof(marker) in xprt_send_rm_and_kvec() 245 size_t len = iov[0].iov_len + iov[1].iov_len; in xprt_send_rm_and_kvec() 279 want = xdr->head[0].iov_len + rmsize; in xprt_sock_sendmsg() 314 if (base >= xdr->tail[0].iov_len) in xprt_sock_sendmsg()
|
D | svc.c | 1243 if (argv->iov_len < 6*4) in svc_process_common() 1263 reply_statp = resv->iov_base + resv->iov_len; in svc_process_common() 1329 statp = resv->iov_base +resv->iov_len; in svc_process_common() 1349 resv->iov_len = ((void*)statp) - resv->iov_base + 4; in svc_process_common() 1374 argv->iov_len); in svc_process_common() 1452 resv->iov_len = 0; in svc_process() 1459 rqstp->rq_res.tail[0].iov_len = 0; in svc_process() 1509 if (rqstp->rq_arg.len <= rqstp->rq_arg.head[0].iov_len) { in bc_svc_process() 1510 rqstp->rq_arg.head[0].iov_len = rqstp->rq_arg.len; in bc_svc_process() 1512 } else if (rqstp->rq_arg.len <= rqstp->rq_arg.head[0].iov_len + in bc_svc_process() [all …]
|
/linux-5.19.10/include/linux/sunrpc/ |
D | svc.h | 202 iov->iov_len -= sizeof(__be32); in svc_getnl() 208 __be32 *vp = iov->iov_base + iov->iov_len; in svc_putnl() 210 iov->iov_len += sizeof(__be32); in svc_putnl() 219 iov->iov_len -= sizeof(__be32); in svc_getu32() 227 iov->iov_len += sizeof(*vp); in svc_ungetu32() 232 __be32 *vp = iov->iov_base + iov->iov_len; in svc_putu32() 234 iov->iov_len += sizeof(__be32); in svc_putu32() 365 && cp <= (char*)vec->iov_base + vec->iov_len; in xdr_argsize_check() 374 vec->iov_len = cp - (char*)vec->iov_base; in xdr_ressize_check() 376 return vec->iov_len <= PAGE_SIZE; in xdr_ressize_check() [all …]
|
/linux-5.19.10/tools/testing/selftests/powerpc/ptrace/ |
D | ptrace.h | 110 iov.iov_len = n * sizeof(unsigned long); in ptrace_read_regs() 130 iov.iov_len = n * sizeof(unsigned long); in ptrace_write_regs() 152 iov.iov_len = sizeof(unsigned long); in show_tar_registers() 199 iov.iov_len = sizeof(unsigned long); in write_tar_registers() 242 iov.iov_len = sizeof(unsigned long); in show_tm_checkpointed_state() 290 iov.iov_len = sizeof(unsigned long); in write_ckpt_tar_registers() 371 iov.iov_len = sizeof(struct fpr_regs); in show_ckpt_fpr() 395 iov.iov_len = sizeof(struct fpr_regs); in write_ckpt_fpr() 481 iov.iov_len = sizeof(struct pt_regs); in show_ckpt_gpr() 509 iov.iov_len = sizeof(struct pt_regs); in write_ckpt_gpr() [all …]
|
/linux-5.19.10/fs/cifs/ |
D | smb2transport.c | 272 if (drqst.rq_nvec >= 2 && iov[0].iov_len == 4) { in smb2_calc_signature() 274 iov[0].iov_len); in smb2_calc_signature() 339 label.iov_base, label.iov_len); in generate_key() 353 context.iov_base, context.iov_len); in generate_key() 496 d->label.iov_len = 12; in generate_smb30signingkey() 498 d->context.iov_len = 8; in generate_smb30signingkey() 502 d->label.iov_len = 11; in generate_smb30signingkey() 504 d->context.iov_len = 10; in generate_smb30signingkey() 508 d->label.iov_len = 11; in generate_smb30signingkey() 510 d->context.iov_len = 10; in generate_smb30signingkey() [all …]
|
D | transport.c | 267 rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) { in smb_rqst_len() 277 buflen += iov[i].iov_len; in smb_rqst_len() 354 .iov_len = 4 in __smb_send_rqst() 373 dump_smb(iov[i].iov_base, iov[i].iov_len); in __smb_send_rqst() 374 size += iov[i].iov_len; in __smb_send_rqst() 475 iov.iov_len = sizeof(*tr_hdr); in smb_send_rqst() 500 iov[0].iov_len = 4; in smb_send() 502 iov[1].iov_len = smb_buf_length; in smb_send() 777 if (rqst->rq_iov[0].iov_len != 4 || in cifs_setup_async_request() 899 iov[0].iov_len = get_rfc1002_length(in_buf) + 4; in SendReceiveNoRsp() [all …]
|
/linux-5.19.10/net/sunrpc/auth_gss/ |
D | gss_krb5_wrap.c | 55 if (buf->page_len || buf->tail[0].iov_len) in gss_krb5_add_padding() 59 p = iov->iov_base + iov->iov_len; in gss_krb5_add_padding() 60 iov->iov_len += padding; in gss_krb5_add_padding() 72 if (len <= buf->head[0].iov_len) { in gss_krb5_remove_padding() 74 if (pad > buf->head[0].iov_len) in gss_krb5_remove_padding() 76 buf->head[0].iov_len -= pad; in gss_krb5_remove_padding() 79 len -= buf->head[0].iov_len; in gss_krb5_remove_padding() 91 BUG_ON(len > buf->tail[0].iov_len); in gss_krb5_remove_padding() 343 data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start; in gss_unwrap_kerberos_v1() 345 buf->head[0].iov_len -= (data_start - orig_start); in gss_unwrap_kerberos_v1() [all …]
|
D | svcauth_gss.c | 680 if (argv->iov_len < 4) in svc_safe_getnetobj() 684 if (argv->iov_len < l) in svc_safe_getnetobj() 688 argv->iov_len -= l; in svc_safe_getnetobj() 697 if (resv->iov_len + 4 > PAGE_SIZE) in svc_safe_putnetobj() 700 p = resv->iov_base + resv->iov_len; in svc_safe_putnetobj() 701 resv->iov_len += round_up_to_quad(o->len); in svc_safe_putnetobj() 702 if (resv->iov_len > PAGE_SIZE) in svc_safe_putnetobj() 727 iov.iov_len = (u8 *)argv->iov_base - (u8 *)rpcstart; in gss_verify_header() 731 if (argv->iov_len < 4) in gss_verify_header() 762 p = rqstp->rq_res.head->iov_base + rqstp->rq_res.head->iov_len; in gss_write_null_verf() [all …]
|
/linux-5.19.10/tools/testing/selftests/user_events/ |
D | ftrace_test.c | 251 io[0].iov_len = sizeof(reg.write_index); in TEST_F() 253 io[1].iov_len = sizeof(field1); in TEST_F() 255 io[2].iov_len = sizeof(field2); in TEST_F() 264 io[0].iov_len = sizeof(field2); in TEST_F() 268 io[0].iov_len = sizeof(reg.write_index); in TEST_F() 294 io[0].iov_len = sizeof(reg.write_index); in TEST_F() 296 io[1].iov_len = l; in TEST_F() 328 io[0].iov_len = sizeof(reg.write_index); in TEST_F() 330 io[1].iov_len = sizeof(loc); in TEST_F() 333 io[2].iov_len = bytes; in TEST_F()
|
/linux-5.19.10/net/rxrpc/ |
D | output.c | 239 iov[0].iov_len = sizeof(pkt->whdr) + sizeof(pkt->ack) + n; in rxrpc_send_ack_packet() 241 iov[1].iov_len = sizeof(pkt->ackinfo); in rxrpc_send_ack_packet() 242 len = iov[0].iov_len + iov[1].iov_len; in rxrpc_send_ack_packet() 329 iov[0].iov_len = sizeof(pkt); in rxrpc_send_abort_packet() 390 iov[0].iov_len = sizeof(whdr); in rxrpc_send_data_packet() 392 iov[1].iov_len = skb->len; in rxrpc_send_data_packet() 393 len = iov[0].iov_len + iov[1].iov_len; in rxrpc_send_data_packet() 434 if (iov[1].iov_len >= call->peer->maxdata) in rxrpc_send_data_packet() 570 iov[0].iov_len = sizeof(whdr); in rxrpc_reject_packets() 572 iov[1].iov_len = sizeof(code); in rxrpc_reject_packets() [all …]
|
D | local_event.c | 60 iov[0].iov_len = sizeof(whdr); in rxrpc_send_version_request() 62 iov[1].iov_len = sizeof(rxrpc_version_string); in rxrpc_send_version_request() 64 len = iov[0].iov_len + iov[1].iov_len; in rxrpc_send_version_request()
|
D | conn_event.c | 62 iov[0].iov_len = sizeof(pkt.whdr); in rxrpc_conn_retransmit_call() 64 iov[1].iov_len = 3; in rxrpc_conn_retransmit_call() 66 iov[2].iov_len = sizeof(ack_info); in rxrpc_conn_retransmit_call() 83 iov[0].iov_len += sizeof(pkt.abort_code); in rxrpc_conn_retransmit_call() 104 iov[0].iov_len += sizeof(pkt.ack); in rxrpc_conn_retransmit_call() 236 iov[0].iov_len = sizeof(whdr); in rxrpc_abort_connection() 238 iov[1].iov_len = sizeof(word); in rxrpc_abort_connection() 240 len = iov[0].iov_len + iov[1].iov_len; in rxrpc_abort_connection()
|
/linux-5.19.10/mm/ |
D | process_vm_access.c | 163 ssize_t iov_len; in process_vm_rw_core() local 171 iov_len = rvec[i].iov_len; in process_vm_rw_core() 172 if (iov_len > 0) { in process_vm_rw_core() 174 + iov_len) in process_vm_rw_core() 216 (unsigned long)rvec[i].iov_base, rvec[i].iov_len, in process_vm_rw_core()
|
/linux-5.19.10/fs/nfsd/ |
D | nfscache.c | 118 nfsd_stats_drc_mem_usage_sub(nn, rp->c_replvec.iov_len); in nfsd_reply_cache_free_locked() 324 size_t csum_len = min_t(size_t, buf->head[0].iov_len + buf->page_len, in nfsd_cache_csum() 326 size_t len = min(buf->head[0].iov_len, csum_len); in nfsd_cache_csum() 541 len = resv->iov_len - ((char*)statp - (char*)resv->iov_base); in nfsd_cache_update() 564 cachv->iov_len = bufsize; in nfsd_cache_update() 591 if (vec->iov_len + data->iov_len > PAGE_SIZE) { in nfsd_cache_append() 593 data->iov_len); in nfsd_cache_append() 596 memcpy((char*)vec->iov_base + vec->iov_len, data->iov_base, data->iov_len); in nfsd_cache_append() 597 vec->iov_len += data->iov_len; in nfsd_cache_append()
|
/linux-5.19.10/fs/ksmbd/ |
D | auth.c | 590 iov[i].iov_len); in ksmbd_sign_smb2_pdu() 641 iov[i].iov_len); in ksmbd_sign_smb3_pdu() 703 label.iov_len); in generate_key() 717 context.iov_len); in generate_key() 787 d.label.iov_len = 12; in ksmbd_gen_smb30_signingkey() 789 d.context.iov_len = 8; in ksmbd_gen_smb30_signingkey() 801 d.label.iov_len = 14; in ksmbd_gen_smb311_signingkey() 812 d.context.iov_len = 64; in ksmbd_gen_smb311_signingkey() 867 d->label.iov_len = 11; in ksmbd_gen_smb30_encryptionkey() 869 d->context.iov_len = 10; in ksmbd_gen_smb30_encryptionkey() [all …]
|
D | connection.c | 176 len += iov[iov_idx++].iov_len; in ksmbd_conn_write() 181 len += iov[iov_idx++].iov_len; in ksmbd_conn_write() 183 len += iov[iov_idx++].iov_len; in ksmbd_conn_write() 186 iov[iov_idx].iov_len = work->resp_hdr_sz; in ksmbd_conn_write() 188 iov[iov_idx].iov_len = get_rfc1002_len(work->response_buf) + 4; in ksmbd_conn_write() 190 len += iov[iov_idx++].iov_len; in ksmbd_conn_write()
|
/linux-5.19.10/arch/arm64/kernel/ |
D | mte.c | 378 size_t len = kiov->iov_len; in __access_remote_tags() 436 kiov->iov_len = buf - kiov->iov_base; in __access_remote_tags() 437 if (!kiov->iov_len) { in __access_remote_tags() 487 get_user(kiov.iov_len, &uiov->iov_len)) in mte_ptrace_copy_tags() 498 ret = put_user(kiov.iov_len, &uiov->iov_len); in mte_ptrace_copy_tags()
|
/linux-5.19.10/drivers/usb/usbip/ |
D | vhci_tx.c | 100 iov[iovnum].iov_len = sizeof(pdu_header); in vhci_send_cmd_submit() 110 iov[iovnum].iov_len = sg->length; in vhci_send_cmd_submit() 115 iov[iovnum].iov_len = in vhci_send_cmd_submit() 134 iov[iovnum].iov_len = len; in vhci_send_cmd_submit() 216 iov.iov_len = sizeof(pdu_header); in vhci_send_cmd_unlink()
|
D | stub_tx.c | 218 iov[iovnum].iov_len = sizeof(pdu_header); in stub_send_ret_submit() 232 iov[iovnum].iov_len = in stub_send_ret_submit() 254 iov[iovnum].iov_len = size; in stub_send_ret_submit() 261 iov[iovnum].iov_len = urb->actual_length; in stub_send_ret_submit() 280 iov[iovnum].iov_len = in stub_send_ret_submit() 311 iov[iovnum].iov_len = len; in stub_send_ret_submit() 388 iov[0].iov_len = sizeof(pdu_header); in stub_send_ret_unlink()
|
/linux-5.19.10/net/sunrpc/xprtrdma/ |
D | rpc_rdma.c | 174 return (buf->head[0].iov_len + buf->tail[0].iov_len) < in rpcrdma_nonpayload_inline() 214 seg->mr_len = vec->iov_len; in rpcrdma_convert_kvec() 257 if (xdrbuf->tail[0].iov_len) in rpcrdma_convert_iovs() 347 pos = rqst->rq_snd_buf.head[0].iov_len; in rpcrdma_encode_read_list() 407 rqst->rq_rcv_buf.head[0].iov_len, in rpcrdma_encode_write_list() 669 dst += xdr->head[0].iov_len + xdr->page_len; in rpcrdma_pullup_tail_iov() 670 memmove(dst, xdr->tail[0].iov_base, xdr->tail[0].iov_len); in rpcrdma_pullup_tail_iov() 671 r_xprt->rx_stats.pullup_copy_count += xdr->tail[0].iov_len; in rpcrdma_pullup_tail_iov() 685 dst += xdr->head[0].iov_len; in rpcrdma_pullup_pagelist() 716 if (unlikely(xdr->tail[0].iov_len)) in rpcrdma_prepare_noch_pullup() [all …]
|
/linux-5.19.10/arch/x86/um/os-Linux/ |
D | registers.c | 36 iov.iov_len = FP_SIZE * sizeof(unsigned long); in save_fp_registers() 58 iov.iov_len = FP_SIZE * sizeof(unsigned long); in restore_fp_registers() 137 iov.iov_len = FP_SIZE * sizeof(unsigned long); in arch_init_registers()
|
/linux-5.19.10/lib/ |
D | iov_iter.c | 24 len = min(n, __p->iov_len - skip); \ 31 if (skip < __p->iov_len) \ 190 copy = min(bytes, iov->iov_len - skip); in copy_page_to_iter_iovec() 206 copy = min(bytes, iov->iov_len); in copy_page_to_iter_iovec() 220 copy = min(bytes, iov->iov_len - skip); in copy_page_to_iter_iovec() 234 copy = min(bytes, iov->iov_len); in copy_page_to_iter_iovec() 244 if (skip == iov->iov_len) { in copy_page_to_iter_iovec() 274 copy = min(bytes, iov->iov_len - skip); in copy_page_from_iter_iovec() 290 copy = min(bytes, iov->iov_len); in copy_page_from_iter_iovec() 304 copy = min(bytes, iov->iov_len - skip); in copy_page_from_iter_iovec() [all …]
|
/linux-5.19.10/net/smc/ |
D | smc_clc.c | 736 vec.iov_len = SMC_CLC_RECV_BUF_LEN; in smc_clc_wait_msg() 797 vec.iov_len = send_len; in smc_clc_send_decline() 936 vec[i++].iov_len = sizeof(*pclc_base); in smc_clc_send_proposal() 938 vec[i++].iov_len = sizeof(*pclc_smcd); in smc_clc_send_proposal() 941 vec[i++].iov_len = sizeof(*pclc_prfx); in smc_clc_send_proposal() 944 vec[i++].iov_len = pclc_prfx->ipv6_prefixes_cnt * in smc_clc_send_proposal() 950 vec[i++].iov_len = sizeof(*v2_ext) + in smc_clc_send_proposal() 954 vec[i++].iov_len = sizeof(*smcd_v2_ext); in smc_clc_send_proposal() 957 vec[i++].iov_len = ini->ism_offered_cnt * in smc_clc_send_proposal() 963 vec[i++].iov_len = sizeof(*trl); in smc_clc_send_proposal() [all …]
|
/linux-5.19.10/drivers/net/ethernet/google/gve/ |
D | gve_tx.c | 97 iov[0].iov_len = bytes; in gve_tx_alloc_fifo() 106 iov[0].iov_len -= overflow; in gve_tx_alloc_fifo() 108 iov[1].iov_len = overflow; in gve_tx_alloc_fifo() 430 u64 iov_offset, u64 iov_len) in gve_dma_sync_for_device() argument 432 u64 last_page = (iov_offset + iov_len - 1) / PAGE_SIZE; in gve_dma_sync_for_device() 485 info->iov[hdr_nfrags - 1].iov_len); in gve_tx_add_skb_copy() 498 info->iov[i].iov_len, in gve_tx_add_skb_copy() 503 info->iov[i].iov_len); in gve_tx_add_skb_copy() 506 info->iov[i].iov_len); in gve_tx_add_skb_copy() 507 copy_offset += info->iov[i].iov_len; in gve_tx_add_skb_copy() [all …]
|