Home
last modified time | relevance | path

Searched refs:iov (Results 1 – 25 of 211) sorted by relevance

123456789

/linux-6.6.21/tools/testing/selftests/powerpc/ptrace/
Dptrace.h108 struct iovec iov; in ptrace_read_regs() local
113 iov.iov_base = regs; in ptrace_read_regs()
114 iov.iov_len = n * sizeof(unsigned long); in ptrace_read_regs()
116 ret = ptrace(PTRACE_GETREGSET, child, type, &iov); in ptrace_read_regs()
128 struct iovec iov; in ptrace_write_regs() local
133 iov.iov_base = regs; in ptrace_write_regs()
134 iov.iov_len = n * sizeof(unsigned long); in ptrace_write_regs()
136 ret = ptrace(PTRACE_SETREGSET, child, type, &iov); in ptrace_write_regs()
146 struct iovec iov; in show_tar_registers() local
155 iov.iov_base = (u64 *) reg; in show_tar_registers()
[all …]
/linux-6.6.21/include/linux/
Dvringh.h99 struct iovec *iov; member
117 struct kvec *iov; member
132 static inline void vringh_iov_init(struct vringh_iov *iov, in vringh_iov_init() argument
135 iov->used = iov->i = 0; in vringh_iov_init()
136 iov->consumed = 0; in vringh_iov_init()
137 iov->max_num = num; in vringh_iov_init()
138 iov->iov = iovec; in vringh_iov_init()
141 static inline void vringh_iov_reset(struct vringh_iov *iov) in vringh_iov_reset() argument
143 iov->iov[iov->i].iov_len += iov->consumed; in vringh_iov_reset()
144 iov->iov[iov->i].iov_base -= iov->consumed; in vringh_iov_reset()
[all …]
/linux-6.6.21/arch/powerpc/platforms/powernv/
Dpci-sriov.c149 struct pnv_iov_data *iov; in pnv_pci_ioda_fixup_iov_resources() local
152 iov = kzalloc(sizeof(*iov), GFP_KERNEL); in pnv_pci_ioda_fixup_iov_resources()
153 if (!iov) in pnv_pci_ioda_fixup_iov_resources()
155 pdev->dev.archdata.iov_data = iov; in pnv_pci_ioda_fixup_iov_resources()
194 iov->m64_single_mode[i] = true; in pnv_pci_ioda_fixup_iov_resources()
209 iov->need_shift = true; in pnv_pci_ioda_fixup_iov_resources()
223 kfree(iov); in pnv_pci_ioda_fixup_iov_resources()
252 struct pnv_iov_data *iov = pnv_iov_get(pdev); in pnv_pci_iov_resource_alignment() local
260 if (!iov) in pnv_pci_iov_resource_alignment()
268 if (iov->m64_single_mode[resno - PCI_IOV_RESOURCES]) in pnv_pci_iov_resource_alignment()
[all …]
/linux-6.6.21/drivers/pci/
Diov.c86 struct pci_sriov *iov = dev->sriov; in pci_iov_set_numvfs() local
88 pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, nr_virtfn); in pci_iov_set_numvfs()
89 pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_OFFSET, &iov->offset); in pci_iov_set_numvfs()
90 pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_STRIDE, &iov->stride); in pci_iov_set_numvfs()
102 struct pci_sriov *iov = dev->sriov; in compute_max_vf_buses() local
105 for (nr_virtfn = iov->total_VFs; nr_virtfn; nr_virtfn--) { in compute_max_vf_buses()
107 if (!iov->offset || (nr_virtfn > 1 && !iov->stride)) { in compute_max_vf_buses()
113 if (busnr > iov->max_VF_buses) in compute_max_vf_buses()
114 iov->max_VF_buses = busnr; in compute_max_vf_buses()
295 struct pci_sriov *iov = dev->sriov; in pci_iov_add_virtfn() local
[all …]
/linux-6.6.21/drivers/vhost/
Dvringh.c83 void vringh_kiov_advance(struct vringh_kiov *iov, size_t len) in vringh_kiov_advance() argument
85 while (len && iov->i < iov->used) { in vringh_kiov_advance()
86 size_t partlen = min(iov->iov[iov->i].iov_len, len); in vringh_kiov_advance()
88 iov->consumed += partlen; in vringh_kiov_advance()
89 iov->iov[iov->i].iov_len -= partlen; in vringh_kiov_advance()
90 iov->iov[iov->i].iov_base += partlen; in vringh_kiov_advance()
92 if (!iov->iov[iov->i].iov_len) { in vringh_kiov_advance()
94 iov->iov[iov->i].iov_len = iov->consumed; in vringh_kiov_advance()
95 iov->iov[iov->i].iov_base -= iov->consumed; in vringh_kiov_advance()
97 iov->consumed = 0; in vringh_kiov_advance()
[all …]
/linux-6.6.21/drivers/usb/usbip/
Dvhci_tx.c59 struct kvec *iov; in vhci_send_cmd_submit() local
75 memset(&iov, 0, sizeof(iov)); in vhci_send_cmd_submit()
85 iov = kcalloc(iovnum, sizeof(*iov), GFP_KERNEL); in vhci_send_cmd_submit()
86 if (!iov) { in vhci_send_cmd_submit()
99 iov[iovnum].iov_base = &pdu_header; in vhci_send_cmd_submit()
100 iov[iovnum].iov_len = sizeof(pdu_header); in vhci_send_cmd_submit()
109 iov[iovnum].iov_base = sg_virt(sg); in vhci_send_cmd_submit()
110 iov[iovnum].iov_len = sg->length; in vhci_send_cmd_submit()
114 iov[iovnum].iov_base = urb->transfer_buffer; in vhci_send_cmd_submit()
115 iov[iovnum].iov_len = in vhci_send_cmd_submit()
[all …]
Dstub_tx.c164 struct kvec *iov = NULL; in stub_send_ret_submit() local
193 iov = kcalloc(iovnum, sizeof(struct kvec), GFP_KERNEL); in stub_send_ret_submit()
195 if (!iov) { in stub_send_ret_submit()
217 iov[iovnum].iov_base = &pdu_header; in stub_send_ret_submit()
218 iov[iovnum].iov_len = sizeof(pdu_header); in stub_send_ret_submit()
230 iov[iovnum].iov_base = in stub_send_ret_submit()
232 iov[iovnum].iov_len = in stub_send_ret_submit()
253 iov[iovnum].iov_base = sg_virt(sg); in stub_send_ret_submit()
254 iov[iovnum].iov_len = size; in stub_send_ret_submit()
260 iov[iovnum].iov_base = urb->transfer_buffer; in stub_send_ret_submit()
[all …]
Dvudc_tx.c41 struct kvec iov[1]; in v_send_ret_unlink() local
50 memset(&iov, 0, sizeof(iov)); in v_send_ret_unlink()
56 iov[0].iov_base = &pdu_header; in v_send_ret_unlink()
57 iov[0].iov_len = sizeof(pdu_header); in v_send_ret_unlink()
60 ret = kernel_sendmsg(udc->ud.tcp_socket, &msg, iov, in v_send_ret_unlink()
78 struct kvec *iov = NULL; in v_send_ret_submit() local
100 iov = kcalloc(iovnum, sizeof(*iov), GFP_KERNEL); in v_send_ret_submit()
101 if (!iov) { in v_send_ret_submit()
114 iov[iovnum].iov_base = &pdu_header; in v_send_ret_submit()
115 iov[iovnum].iov_len = sizeof(pdu_header); in v_send_ret_submit()
[all …]
/linux-6.6.21/drivers/crypto/cavium/nitrox/
Dnitrox_mbx.c137 vfno = RING_TO_VFNO(i, ndev->iov.max_vf_queues); in nitrox_pf2vf_mbox_handler()
138 vfdev = ndev->iov.vfdev + vfno; in nitrox_pf2vf_mbox_handler()
149 queue_work(ndev->iov.pf2vf_wq, &pfwork->pf2vf_resp); in nitrox_pf2vf_mbox_handler()
160 vfno = RING_TO_VFNO(i + 64, ndev->iov.max_vf_queues); in nitrox_pf2vf_mbox_handler()
161 vfdev = ndev->iov.vfdev + vfno; in nitrox_pf2vf_mbox_handler()
173 queue_work(ndev->iov.pf2vf_wq, &pfwork->pf2vf_resp); in nitrox_pf2vf_mbox_handler()
184 ndev->iov.vfdev = kcalloc(ndev->iov.num_vfs, in nitrox_mbox_init()
186 if (!ndev->iov.vfdev) in nitrox_mbox_init()
189 for (i = 0; i < ndev->iov.num_vfs; i++) { in nitrox_mbox_init()
190 vfdev = ndev->iov.vfdev + i; in nitrox_mbox_init()
[all …]
/linux-6.6.21/arch/x86/um/os-Linux/
Dregisters.c32 struct iovec iov; in save_fp_registers() local
35 iov.iov_base = fp_regs; in save_fp_registers()
36 iov.iov_len = FP_SIZE * sizeof(unsigned long); in save_fp_registers()
37 if (ptrace(PTRACE_GETREGSET, pid, NT_X86_XSTATE, &iov) < 0) in save_fp_registers()
55 struct iovec iov; in restore_fp_registers() local
57 iov.iov_base = fp_regs; in restore_fp_registers()
58 iov.iov_len = FP_SIZE * sizeof(unsigned long); in restore_fp_registers()
59 if (ptrace(PTRACE_SETREGSET, pid, NT_X86_XSTATE, &iov) < 0) in restore_fp_registers()
130 struct iovec iov; in arch_init_registers() local
136 iov.iov_base = fp_regs; in arch_init_registers()
[all …]
/linux-6.6.21/tools/testing/selftests/arm64/fp/
Dzt-ptrace.c59 struct iovec iov; in get_za() local
73 iov.iov_base = *buf; in get_za()
74 iov.iov_len = sz; in get_za()
75 if (ptrace(PTRACE_GETREGSET, pid, NT_ARM_ZA, &iov)) in get_za()
93 struct iovec iov; in set_za() local
95 iov.iov_base = (void *)za; in set_za()
96 iov.iov_len = za->size; in set_za()
97 return ptrace(PTRACE_SETREGSET, pid, NT_ARM_ZA, &iov); in set_za()
102 struct iovec iov; in get_zt() local
104 iov.iov_base = zt; in get_zt()
[all …]
/linux-6.6.21/fs/smb/server/
Dksmbd_work.c32 work->iov = kcalloc(work->iov_alloc_cnt, sizeof(struct kvec), in ksmbd_alloc_work_struct()
34 if (!work->iov) { in ksmbd_alloc_work_struct()
58 kfree(work->iov); in ksmbd_free_work_struct()
104 work->iov[++work->iov_idx].iov_base = ib; in __ksmbd_iov_pin()
105 work->iov[work->iov_idx].iov_len = ib_len; in __ksmbd_iov_pin()
126 new = krealloc(work->iov, in __ksmbd_iov_pin_rsp()
134 work->iov = new; in __ksmbd_iov_pin_rsp()
139 work->iov[work->iov_idx].iov_base = work->response_buf; in __ksmbd_iov_pin_rsp()
140 *(__be32 *)work->iov[0].iov_base = 0; in __ksmbd_iov_pin_rsp()
141 work->iov[work->iov_idx].iov_len = 4; in __ksmbd_iov_pin_rsp()
[all …]
Dtransport_tcp.c36 struct kvec *iov; member
103 kfree(t->iov); in free_transport()
116 static unsigned int kvec_array_init(struct kvec *new, struct kvec *iov, in kvec_array_init() argument
121 while (bytes || !iov->iov_len) { in kvec_array_init()
122 int copy = min(bytes, iov->iov_len); in kvec_array_init()
126 if (iov->iov_len == base) { in kvec_array_init()
127 iov++; in kvec_array_init()
133 memcpy(new, iov, sizeof(*iov) * nr_segs); in kvec_array_init()
150 if (t->iov && nr_segs <= t->nr_iov) in get_conn_iovec()
151 return t->iov; in get_conn_iovec()
[all …]
/linux-6.6.21/fs/smb/client/
Dsmb2pdu.c988 add_posix_context(struct kvec *iov, unsigned int *num_iovec, umode_t mode) in add_posix_context() argument
992 iov[num].iov_base = create_posix_buf(mode); in add_posix_context()
995 if (iov[num].iov_base == NULL) in add_posix_context()
997 iov[num].iov_len = sizeof(struct create_posix); in add_posix_context()
1026 struct kvec iov[1]; in SMB2_negotiate() local
1099 iov[0].iov_base = (char *)req; in SMB2_negotiate()
1100 iov[0].iov_len = total_len; in SMB2_negotiate()
1103 rqst.rq_iov = iov; in SMB2_negotiate()
1424 struct kvec iov[2]; member
1490 sess_data->iov[0].iov_base = (char *)req; in SMB2_sess_alloc_buffer()
[all …]
Dsess.c1345 struct kvec iov[3]; member
1362 sess_data->iov[0].iov_base = (char *)smb_buf; in sess_alloc_buffer()
1363 sess_data->iov[0].iov_len = be32_to_cpu(smb_buf->smb_buf_length) + 4; in sess_alloc_buffer()
1371 sess_data->iov[2].iov_base = kmalloc(2000, GFP_KERNEL); in sess_alloc_buffer()
1372 if (!sess_data->iov[2].iov_base) { in sess_alloc_buffer()
1381 sess_data->iov[0].iov_base = NULL; in sess_alloc_buffer()
1382 sess_data->iov[0].iov_len = 0; in sess_alloc_buffer()
1390 struct kvec *iov = sess_data->iov; in sess_free_buffer() local
1396 if (sess_data->buf0_type != CIFS_NO_BUFFER && iov[0].iov_base) in sess_free_buffer()
1397 memzero_explicit(iov[0].iov_base, iov[0].iov_len); in sess_free_buffer()
[all …]
Dtransport.c250 struct kvec *iov; in smb_rqst_len() local
256 iov = &rqst->rq_iov[1]; in smb_rqst_len()
259 iov = rqst->rq_iov; in smb_rqst_len()
265 buflen += iov[i].iov_len; in smb_rqst_len()
276 struct kvec *iov; in __smb_send_rqst() local
341 iov = rqst[j].rq_iov; in __smb_send_rqst()
346 dump_smb(iov[i].iov_base, iov[i].iov_len); in __smb_send_rqst()
347 size += iov[i].iov_len; in __smb_send_rqst()
350 iov_iter_kvec(&smb_msg.msg_iter, ITER_SOURCE, iov, n_vec, size); in __smb_send_rqst()
423 struct kvec iov; member
[all …]
Dsmb2file.c26 static struct smb2_symlink_err_rsp *symlink_data(const struct kvec *iov) in symlink_data() argument
28 struct smb2_err_rsp *err = iov->iov_base; in symlink_data()
38 if (le32_to_cpu(err->ByteCount) < len || iov->iov_len < len + sizeof(*err) + 1) in symlink_data()
42 end = (struct smb2_error_context_rsp *)((u8 *)err + iov->iov_len); in symlink_data()
55 iov->iov_len >= SMB2_SYMLINK_STRUCT_SIZE) { in symlink_data()
66 int smb2_parse_symlink_response(struct cifs_sb_info *cifs_sb, const struct kvec *iov, char **path) in smb2_parse_symlink_response() argument
73 if (!cifs_sb || !iov || !iov->iov_base || !iov->iov_len || !path) in smb2_parse_symlink_response()
76 sym = symlink_data(iov); in smb2_parse_symlink_response()
85 if (iov->iov_len < SMB2_SYMLINK_STRUCT_SIZE + sub_offs + sub_len || in smb2_parse_symlink_response()
86 iov->iov_len < SMB2_SYMLINK_STRUCT_SIZE + print_offs + print_len) in smb2_parse_symlink_response()
/linux-6.6.21/net/handshake/
Dalert.c40 struct kvec iov; in tls_alert_send() local
48 iov.iov_base = alert; in tls_alert_send()
49 iov.iov_len = sizeof(alert); in tls_alert_send()
62 iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &iov, 1, iov.iov_len); in tls_alert_send()
100 const struct kvec *iov; in tls_alert_recv() local
103 iov = msg->msg_iter.kvec; in tls_alert_recv()
104 data = iov->iov_base; in tls_alert_recv()
/linux-6.6.21/net/rxrpc/
Dlocal_event.c41 struct kvec iov[2]; in rxrpc_send_version_request() local
68 iov[0].iov_base = &whdr; in rxrpc_send_version_request()
69 iov[0].iov_len = sizeof(whdr); in rxrpc_send_version_request()
70 iov[1].iov_base = (char *)rxrpc_version_string; in rxrpc_send_version_request()
71 iov[1].iov_len = sizeof(rxrpc_version_string); in rxrpc_send_version_request()
73 len = iov[0].iov_len + iov[1].iov_len; in rxrpc_send_version_request()
75 ret = kernel_sendmsg(local->socket, &msg, iov, 2, len); in rxrpc_send_version_request()
Doutput.c191 struct kvec iov[1]; in rxrpc_send_ack_packet() local
215 iov[0].iov_base = &txb->wire; in rxrpc_send_ack_packet()
216 iov[0].iov_len = sizeof(txb->wire) + sizeof(txb->ack) + n; in rxrpc_send_ack_packet()
217 len = iov[0].iov_len; in rxrpc_send_ack_packet()
234 iov_iter_kvec(&msg.msg_iter, WRITE, iov, 1, len); in rxrpc_send_ack_packet()
265 struct kvec iov[1]; in rxrpc_send_abort_packet() local
302 iov[0].iov_base = &pkt; in rxrpc_send_abort_packet()
303 iov[0].iov_len = sizeof(pkt); in rxrpc_send_abort_packet()
308 iov_iter_kvec(&msg.msg_iter, WRITE, iov, 1, sizeof(pkt)); in rxrpc_send_abort_packet()
329 struct kvec iov[1]; in rxrpc_send_data_packet() local
[all …]
/linux-6.6.21/lib/
Diov_iter.c127 const struct iovec *iov = iter_iov(i); \
131 iov, (I)) \
132 i->nr_segs -= iov - iter_iov(i); \
133 i->__iov = iov; \
285 const struct iovec *iov, unsigned long nr_segs, in iov_iter_init() argument
295 .__iov = iov, in iov_iter_init()
625 const struct iovec *iov, *end; in iov_iter_iovec_advance() local
632 for (iov = iter_iov(i), end = iov + i->nr_segs; iov < end; iov++) { in iov_iter_iovec_advance()
633 if (likely(size < iov->iov_len)) in iov_iter_iovec_advance()
635 size -= iov->iov_len; in iov_iter_iovec_advance()
[all …]
/linux-6.6.21/net/rds/
Drdma.c529 static int rds_rdma_pages(struct rds_iovec iov[], int nr_iovecs) in rds_rdma_pages() argument
537 nr_pages = rds_pages_in_vec(&iov[i]); in rds_rdma_pages()
555 struct rds_iov_vector *iov) in rds_rdma_extra_size() argument
571 iov->iov = kcalloc(args->nr_local, in rds_rdma_extra_size()
574 if (!iov->iov) in rds_rdma_extra_size()
577 vec = &iov->iov[0]; in rds_rdma_extra_size()
582 iov->len = args->nr_local; in rds_rdma_extra_size()
646 iovs = vec->iov; in rds_cmsg_rdma_args()
708 struct rds_iovec *iov = &iovs[i]; in rds_cmsg_rdma_args() local
710 unsigned int nr = rds_pages_in_vec(iov); in rds_cmsg_rdma_args()
[all …]
/linux-6.6.21/drivers/net/ethernet/google/gve/
Dgve_tx.c85 struct gve_tx_iovec iov[2]) in gve_tx_alloc_fifo()
105 iov[0].iov_offset = fifo->head; in gve_tx_alloc_fifo()
106 iov[0].iov_len = bytes; in gve_tx_alloc_fifo()
115 iov[0].iov_len -= overflow; in gve_tx_alloc_fifo()
116 iov[1].iov_offset = 0; /* Start of fifo*/ in gve_tx_alloc_fifo()
117 iov[1].iov_len = overflow; in gve_tx_alloc_fifo()
125 iov[nfrags - 1].iov_padding = padding; in gve_tx_alloc_fifo()
149 for (i = 0; i < ARRAY_SIZE(info->iov); i++) { in gve_tx_clear_buffer_state()
150 space_freed += info->iov[i].iov_len + info->iov[i].iov_padding; in gve_tx_clear_buffer_state()
151 info->iov[i].iov_len = 0; in gve_tx_clear_buffer_state()
[all …]
/linux-6.6.21/arch/um/drivers/
Dvector_kern.c294 struct iovec *iov) in prep_msg() argument
306 iov[iov_index].iov_len = vp->header_size; in prep_msg()
307 vp->form_header(iov[iov_index].iov_base, skb, vp); in prep_msg()
310 iov[iov_index].iov_base = skb->data; in prep_msg()
312 iov[iov_index].iov_len = skb->len - skb->data_len; in prep_msg()
315 iov[iov_index].iov_len = skb->len; in prep_msg()
319 iov[iov_index].iov_base = skb_frag_address_safe(skb_frag); in prep_msg()
320 iov[iov_index].iov_len = skb_frag_size(skb_frag); in prep_msg()
483 struct iovec *iov; in destroy_queue() local
503 iov = mmsg_vector->msg_hdr.msg_iov; in destroy_queue()
[all …]
/linux-6.6.21/tools/testing/selftests/arm64/mte/
Dcheck_user_mem.c95 struct iovec iov[1]; in check_usermem_access_fault() local
96 iov[0].iov_base = ptr + ptroff; in check_usermem_access_fault()
97 iov[0].iov_len = size; in check_usermem_access_fault()
98 syscall_len = readv(fd, iov, 1); in check_usermem_access_fault()
102 struct iovec iov[1]; in check_usermem_access_fault() local
103 iov[0].iov_base = ptr + ptroff; in check_usermem_access_fault()
104 iov[0].iov_len = size; in check_usermem_access_fault()
105 syscall_len = writev(fd, iov, 1); in check_usermem_access_fault()

123456789