Home
last modified time | relevance | path

Searched refs:ctxt (Results 1 – 25 of 118) sorted by relevance

12345

/linux-6.1.9/arch/x86/kvm/
Demulate.c197 int (*execute)(struct x86_emulate_ctxt *ctxt);
206 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
245 static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr) in reg_read() argument
247 if (KVM_EMULATOR_BUG_ON(nr >= NR_EMULATOR_GPRS, ctxt)) in reg_read()
250 if (!(ctxt->regs_valid & (1 << nr))) { in reg_read()
251 ctxt->regs_valid |= 1 << nr; in reg_read()
252 ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr); in reg_read()
254 return ctxt->_regs[nr]; in reg_read()
257 static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr) in reg_write() argument
259 if (KVM_EMULATOR_BUG_ON(nr >= NR_EMULATOR_GPRS, ctxt)) in reg_write()
[all …]
Dkvm_emulate.h92 void (*vm_bugged)(struct x86_emulate_ctxt *ctxt);
98 ulong (*read_gpr)(struct x86_emulate_ctxt *ctxt, unsigned reg);
105 void (*write_gpr)(struct x86_emulate_ctxt *ctxt, unsigned reg, ulong val);
114 int (*read_std)(struct x86_emulate_ctxt *ctxt,
126 int (*read_phys)(struct x86_emulate_ctxt *ctxt, unsigned long addr,
137 int (*write_std)(struct x86_emulate_ctxt *ctxt,
147 int (*fetch)(struct x86_emulate_ctxt *ctxt,
157 int (*read_emulated)(struct x86_emulate_ctxt *ctxt,
168 int (*write_emulated)(struct x86_emulate_ctxt *ctxt,
181 int (*cmpxchg_emulated)(struct x86_emulate_ctxt *ctxt,
[all …]
/linux-6.1.9/arch/arm64/kvm/hyp/include/hyp/
Dsysreg-sr.h19 static inline void __sysreg_save_common_state(struct kvm_cpu_context *ctxt) in __sysreg_save_common_state() argument
21 ctxt_sys_reg(ctxt, MDSCR_EL1) = read_sysreg(mdscr_el1); in __sysreg_save_common_state()
24 static inline void __sysreg_save_user_state(struct kvm_cpu_context *ctxt) in __sysreg_save_user_state() argument
26 ctxt_sys_reg(ctxt, TPIDR_EL0) = read_sysreg(tpidr_el0); in __sysreg_save_user_state()
27 ctxt_sys_reg(ctxt, TPIDRRO_EL0) = read_sysreg(tpidrro_el0); in __sysreg_save_user_state()
30 static inline bool ctxt_has_mte(struct kvm_cpu_context *ctxt) in ctxt_has_mte() argument
32 struct kvm_vcpu *vcpu = ctxt->__hyp_running_vcpu; in ctxt_has_mte()
35 vcpu = container_of(ctxt, struct kvm_vcpu, arch.ctxt); in ctxt_has_mte()
40 static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt) in __sysreg_save_el1_state() argument
42 ctxt_sys_reg(ctxt, CSSELR_EL1) = read_sysreg(csselr_el1); in __sysreg_save_el1_state()
[all …]
Dswitch.h157 &vcpu->arch.ctxt.fp_regs.fpsr); in __hyp_sve_restore_guest()
209 __fpsimd_restore_state(&vcpu->arch.ctxt.fp_regs); in kvm_hyp_handle_fpsimd()
294 #define __ptrauth_save_key(ctxt, key) \ argument
298 ctxt_sys_reg(ctxt, key ## KEYLO_EL1) = __val; \
300 ctxt_sys_reg(ctxt, key ## KEYHI_EL1) = __val; \
307 struct kvm_cpu_context *ctxt; in kvm_hyp_handle_ptrauth() local
313 ctxt = this_cpu_ptr(&kvm_hyp_ctxt); in kvm_hyp_handle_ptrauth()
314 __ptrauth_save_key(ctxt, APIA); in kvm_hyp_handle_ptrauth()
315 __ptrauth_save_key(ctxt, APIB); in kvm_hyp_handle_ptrauth()
316 __ptrauth_save_key(ctxt, APDA); in kvm_hyp_handle_ptrauth()
[all …]
/linux-6.1.9/net/sunrpc/xprtrdma/
Dsvc_rdma_recvfrom.c128 struct svc_rdma_recv_ctxt *ctxt; in svc_rdma_recv_ctxt_alloc() local
132 ctxt = kmalloc(sizeof(*ctxt), GFP_KERNEL); in svc_rdma_recv_ctxt_alloc()
133 if (!ctxt) in svc_rdma_recv_ctxt_alloc()
143 svc_rdma_recv_cid_init(rdma, &ctxt->rc_cid); in svc_rdma_recv_ctxt_alloc()
144 pcl_init(&ctxt->rc_call_pcl); in svc_rdma_recv_ctxt_alloc()
145 pcl_init(&ctxt->rc_read_pcl); in svc_rdma_recv_ctxt_alloc()
146 pcl_init(&ctxt->rc_write_pcl); in svc_rdma_recv_ctxt_alloc()
147 pcl_init(&ctxt->rc_reply_pcl); in svc_rdma_recv_ctxt_alloc()
149 ctxt->rc_recv_wr.next = NULL; in svc_rdma_recv_ctxt_alloc()
150 ctxt->rc_recv_wr.wr_cqe = &ctxt->rc_cqe; in svc_rdma_recv_ctxt_alloc()
[all …]
Dsvc_rdma_sendto.c126 struct svc_rdma_send_ctxt *ctxt; in svc_rdma_send_ctxt_alloc() local
132 size = sizeof(*ctxt); in svc_rdma_send_ctxt_alloc()
134 ctxt = kmalloc(size, GFP_KERNEL); in svc_rdma_send_ctxt_alloc()
135 if (!ctxt) in svc_rdma_send_ctxt_alloc()
145 svc_rdma_send_cid_init(rdma, &ctxt->sc_cid); in svc_rdma_send_ctxt_alloc()
147 ctxt->sc_send_wr.next = NULL; in svc_rdma_send_ctxt_alloc()
148 ctxt->sc_send_wr.wr_cqe = &ctxt->sc_cqe; in svc_rdma_send_ctxt_alloc()
149 ctxt->sc_send_wr.sg_list = ctxt->sc_sges; in svc_rdma_send_ctxt_alloc()
150 ctxt->sc_send_wr.send_flags = IB_SEND_SIGNALED; in svc_rdma_send_ctxt_alloc()
151 init_completion(&ctxt->sc_done); in svc_rdma_send_ctxt_alloc()
[all …]
Dsvc_rdma_rw.c56 struct svc_rdma_rw_ctxt *ctxt; in svc_rdma_get_rw_ctxt() local
63 ctxt = llist_entry(node, struct svc_rdma_rw_ctxt, rw_node); in svc_rdma_get_rw_ctxt()
65 ctxt = kmalloc(struct_size(ctxt, rw_first_sgl, SG_CHUNK_SIZE), in svc_rdma_get_rw_ctxt()
67 if (!ctxt) in svc_rdma_get_rw_ctxt()
70 INIT_LIST_HEAD(&ctxt->rw_list); in svc_rdma_get_rw_ctxt()
73 ctxt->rw_sg_table.sgl = ctxt->rw_first_sgl; in svc_rdma_get_rw_ctxt()
74 if (sg_alloc_table_chained(&ctxt->rw_sg_table, sges, in svc_rdma_get_rw_ctxt()
75 ctxt->rw_sg_table.sgl, in svc_rdma_get_rw_ctxt()
78 return ctxt; in svc_rdma_get_rw_ctxt()
81 kfree(ctxt); in svc_rdma_get_rw_ctxt()
[all …]
/linux-6.1.9/drivers/net/ethernet/intel/ice/
Dice_vsi_vlan_lib.c93 struct ice_vsi_ctx *ctxt; in ice_vsi_manage_vlan_insertion() local
96 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); in ice_vsi_manage_vlan_insertion()
97 if (!ctxt) in ice_vsi_manage_vlan_insertion()
104 ctxt->info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL; in ice_vsi_manage_vlan_insertion()
107 ctxt->info.inner_vlan_flags |= (vsi->info.inner_vlan_flags & in ice_vsi_manage_vlan_insertion()
110 ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); in ice_vsi_manage_vlan_insertion()
112 err = ice_update_vsi(hw, vsi->idx, ctxt, NULL); in ice_vsi_manage_vlan_insertion()
119 vsi->info.inner_vlan_flags = ctxt->info.inner_vlan_flags; in ice_vsi_manage_vlan_insertion()
121 kfree(ctxt); in ice_vsi_manage_vlan_insertion()
133 struct ice_vsi_ctx *ctxt; in ice_vsi_manage_vlan_stripping() local
[all …]
/linux-6.1.9/arch/x86/power/
Dcpu.c38 static void msr_save_context(struct saved_context *ctxt) in msr_save_context() argument
40 struct saved_msr *msr = ctxt->saved_msrs.array; in msr_save_context()
41 struct saved_msr *end = msr + ctxt->saved_msrs.num; in msr_save_context()
50 static void msr_restore_context(struct saved_context *ctxt) in msr_restore_context() argument
52 struct saved_msr *msr = ctxt->saved_msrs.array; in msr_restore_context()
53 struct saved_msr *end = msr + ctxt->saved_msrs.num; in msr_restore_context()
78 static void __save_processor_state(struct saved_context *ctxt) in __save_processor_state() argument
88 store_idt(&ctxt->idt); in __save_processor_state()
96 ctxt->gdt_desc.size = GDT_SIZE - 1; in __save_processor_state()
97 ctxt->gdt_desc.address = (unsigned long)get_cpu_gdt_rw(smp_processor_id()); in __save_processor_state()
[all …]
/linux-6.1.9/drivers/net/wireless/intel/iwlwifi/mvm/
Dphy-ctxt.c69 static void iwl_mvm_phy_ctxt_cmd_hdr(struct iwl_mvm_phy_ctxt *ctxt, in iwl_mvm_phy_ctxt_cmd_hdr() argument
73 cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(ctxt->id, in iwl_mvm_phy_ctxt_cmd_hdr()
74 ctxt->color)); in iwl_mvm_phy_ctxt_cmd_hdr()
79 struct iwl_mvm_phy_ctxt *ctxt, in iwl_mvm_phy_ctxt_set_rxchain() argument
97 if (active_cnt == 1 && iwl_mvm_rx_diversity_allowed(mvm, ctxt)) { in iwl_mvm_phy_ctxt_set_rxchain()
128 struct iwl_mvm_phy_ctxt *ctxt, in iwl_mvm_phy_ctxt_cmd_data_v1() argument
139 iwl_mvm_phy_ctxt_set_rxchain(mvm, ctxt, &tail->rxchain_info, in iwl_mvm_phy_ctxt_cmd_data_v1()
149 struct iwl_mvm_phy_ctxt *ctxt, in iwl_mvm_phy_ctxt_cmd_data() argument
162 iwl_mvm_phy_ctxt_set_rxchain(mvm, ctxt, &cmd->rxchain_info, in iwl_mvm_phy_ctxt_cmd_data()
167 struct iwl_mvm_phy_ctxt *ctxt, in iwl_mvm_phy_send_rlc() argument
[all …]
/linux-6.1.9/arch/x86/kernel/
Dsev.c260 static int vc_fetch_insn_kernel(struct es_em_ctxt *ctxt, in vc_fetch_insn_kernel() argument
263 return copy_from_kernel_nofault(buffer, (unsigned char *)ctxt->regs->ip, MAX_INSN_SIZE); in vc_fetch_insn_kernel()
266 static enum es_result __vc_decode_user_insn(struct es_em_ctxt *ctxt) in __vc_decode_user_insn() argument
271 insn_bytes = insn_fetch_from_user_inatomic(ctxt->regs, buffer); in __vc_decode_user_insn()
274 ctxt->fi.vector = X86_TRAP_PF; in __vc_decode_user_insn()
275 ctxt->fi.error_code = X86_PF_INSTR | X86_PF_USER; in __vc_decode_user_insn()
276 ctxt->fi.cr2 = ctxt->regs->ip; in __vc_decode_user_insn()
280 ctxt->fi.vector = X86_TRAP_GP; in __vc_decode_user_insn()
281 ctxt->fi.error_code = 0; in __vc_decode_user_insn()
282 ctxt->fi.cr2 = 0; in __vc_decode_user_insn()
[all …]
Dsev-shared.c174 static enum es_result vc_init_em_ctxt(struct es_em_ctxt *ctxt, in vc_init_em_ctxt() argument
180 memset(ctxt, 0, sizeof(*ctxt)); in vc_init_em_ctxt()
181 ctxt->regs = regs; in vc_init_em_ctxt()
184 ret = vc_decode_insn(ctxt); in vc_init_em_ctxt()
189 static void vc_finish_insn(struct es_em_ctxt *ctxt) in vc_finish_insn() argument
191 ctxt->regs->ip += ctxt->insn.length; in vc_finish_insn()
194 static enum es_result verify_exception_info(struct ghcb *ghcb, struct es_em_ctxt *ctxt) in verify_exception_info() argument
210 ctxt->fi.vector = v; in verify_exception_info()
213 ctxt->fi.error_code = info >> 32; in verify_exception_info()
223 struct es_em_ctxt *ctxt, in sev_es_ghcb_hv_call() argument
[all …]
/linux-6.1.9/arch/arm64/include/asm/
Dkvm_asm.h275 .macro get_vcpu_ptr vcpu, ctxt
276 get_host_ctxt \ctxt, \vcpu
277 ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
280 .macro get_loaded_vcpu vcpu, ctxt
281 adr_this_cpu \ctxt, kvm_hyp_ctxt, \vcpu
282 ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
285 .macro set_loaded_vcpu vcpu, ctxt, tmp
286 adr_this_cpu \ctxt, kvm_hyp_ctxt, \tmp
287 str \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
314 .macro save_callee_saved_regs ctxt
[all …]
/linux-6.1.9/arch/arm64/kvm/hyp/nvhe/
Dsysreg-sr.c21 void __sysreg_save_state_nvhe(struct kvm_cpu_context *ctxt) in __sysreg_save_state_nvhe() argument
23 __sysreg_save_el1_state(ctxt); in __sysreg_save_state_nvhe()
24 __sysreg_save_common_state(ctxt); in __sysreg_save_state_nvhe()
25 __sysreg_save_user_state(ctxt); in __sysreg_save_state_nvhe()
26 __sysreg_save_el2_return_state(ctxt); in __sysreg_save_state_nvhe()
29 void __sysreg_restore_state_nvhe(struct kvm_cpu_context *ctxt) in __sysreg_restore_state_nvhe() argument
31 __sysreg_restore_el1_state(ctxt); in __sysreg_restore_state_nvhe()
32 __sysreg_restore_common_state(ctxt); in __sysreg_restore_state_nvhe()
33 __sysreg_restore_user_state(ctxt); in __sysreg_restore_state_nvhe()
34 __sysreg_restore_el2_return_state(ctxt); in __sysreg_restore_state_nvhe()
/linux-6.1.9/arch/arm64/kvm/hyp/vhe/
Dsysreg-sr.c27 void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt) in sysreg_save_host_state_vhe() argument
29 __sysreg_save_common_state(ctxt); in sysreg_save_host_state_vhe()
33 void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt) in sysreg_save_guest_state_vhe() argument
35 __sysreg_save_common_state(ctxt); in sysreg_save_guest_state_vhe()
36 __sysreg_save_el2_return_state(ctxt); in sysreg_save_guest_state_vhe()
40 void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt) in sysreg_restore_host_state_vhe() argument
42 __sysreg_restore_common_state(ctxt); in sysreg_restore_host_state_vhe()
46 void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt) in sysreg_restore_guest_state_vhe() argument
48 __sysreg_restore_common_state(ctxt); in sysreg_restore_guest_state_vhe()
49 __sysreg_restore_el2_return_state(ctxt); in sysreg_restore_guest_state_vhe()
[all …]
/linux-6.1.9/drivers/infiniband/hw/hfi1/
Dtrace_ctxts.h25 __field(unsigned int, ctxt)
37 __entry->ctxt = uctxt->ctxt;
50 __entry->ctxt,
66 TP_PROTO(struct hfi1_devdata *dd, unsigned int ctxt,
69 TP_ARGS(dd, ctxt, subctxt, cinfo),
71 __field(unsigned int, ctxt)
80 __entry->ctxt = ctxt;
90 __entry->ctxt,
100 const char *hfi1_trace_print_rsm_hist(struct trace_seq *p, unsigned int ctxt);
102 TP_PROTO(unsigned int ctxt),
[all …]
Dtrace_rx.h29 __field(u32, ctxt)
38 __entry->ctxt = packet->rcd->ctxt;
48 __entry->ctxt,
62 __field(u32, ctxt)
67 __entry->ctxt = rcd->ctxt;
73 __entry->ctxt,
80 TP_PROTO(unsigned int ctxt, u16 subctxt, const char *type,
82 TP_ARGS(ctxt, subctxt, type, start, end),
84 __field(unsigned int, ctxt)
91 __entry->ctxt = ctxt;
[all …]
Dtrace_tx.h170 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt),
171 TP_ARGS(dd, ctxt, subctxt),
173 __field(u16, ctxt)
177 __entry->ctxt = ctxt;
182 __entry->ctxt,
188 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
190 TP_ARGS(dd, ctxt, subctxt, comp_idx),
192 __field(u16, ctxt)
197 __entry->ctxt = ctxt;
203 __entry->ctxt,
[all …]
Dinit.c111 if (rcd->ctxt == HFI1_CTRL_CTXT) in hfi1_create_kctxt()
179 rcd->dd->rcd[rcd->ctxt] = NULL; in hfi1_rcd_free()
229 u16 ctxt; in allocate_rcd_index() local
232 for (ctxt = 0; ctxt < dd->num_rcv_contexts; ctxt++) in allocate_rcd_index()
233 if (!dd->rcd[ctxt]) in allocate_rcd_index()
236 if (ctxt < dd->num_rcv_contexts) { in allocate_rcd_index()
237 rcd->ctxt = ctxt; in allocate_rcd_index()
238 dd->rcd[ctxt] = rcd; in allocate_rcd_index()
243 if (ctxt >= dd->num_rcv_contexts) in allocate_rcd_index()
246 *index = ctxt; in allocate_rcd_index()
[all …]
/linux-6.1.9/arch/x86/xen/
Dsmp_pv.c253 struct vcpu_guest_context *ctxt; in cpu_initialize_context() local
262 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); in cpu_initialize_context()
263 if (ctxt == NULL) { in cpu_initialize_context()
276 ctxt->user_regs.eip = (unsigned long)asm_cpu_bringup_and_idle; in cpu_initialize_context()
277 ctxt->flags = VGCF_IN_KERNEL; in cpu_initialize_context()
278 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */ in cpu_initialize_context()
279 ctxt->user_regs.ds = __USER_DS; in cpu_initialize_context()
280 ctxt->user_regs.es = __USER_DS; in cpu_initialize_context()
281 ctxt->user_regs.ss = __KERNEL_DS; in cpu_initialize_context()
282 ctxt->user_regs.cs = __KERNEL_CS; in cpu_initialize_context()
[all …]
Dpmu.c30 #define field_offset(ctxt, field) ((void *)((uintptr_t)ctxt + \ argument
31 (uintptr_t)ctxt->field))
202 struct xen_pmu_intel_ctxt *ctxt; in xen_intel_pmu_emulate() local
212 ctxt = &xenpmu_data->pmu.c.intel; in xen_intel_pmu_emulate()
216 reg = &ctxt->global_ovf_ctrl; in xen_intel_pmu_emulate()
219 reg = &ctxt->global_status; in xen_intel_pmu_emulate()
222 reg = &ctxt->global_ctrl; in xen_intel_pmu_emulate()
225 reg = &ctxt->fixed_ctrl; in xen_intel_pmu_emulate()
230 fix_counters = field_offset(ctxt, fixed_counters); in xen_intel_pmu_emulate()
234 arch_cntr_pair = field_offset(ctxt, arch_counters); in xen_intel_pmu_emulate()
[all …]
/linux-6.1.9/fs/nilfs2/
Dbtnode.c174 struct nilfs_btnode_chkey_ctxt *ctxt) in nilfs_btnode_prepare_change_key() argument
178 __u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey; in nilfs_btnode_prepare_change_key()
184 obh = ctxt->bh; in nilfs_btnode_prepare_change_key()
185 ctxt->newbh = NULL; in nilfs_btnode_prepare_change_key()
224 ctxt->newbh = nbh; in nilfs_btnode_prepare_change_key()
237 struct nilfs_btnode_chkey_ctxt *ctxt) in nilfs_btnode_commit_change_key() argument
239 struct buffer_head *obh = ctxt->bh, *nbh = ctxt->newbh; in nilfs_btnode_commit_change_key()
240 __u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey; in nilfs_btnode_commit_change_key()
267 ctxt->bh = nbh; in nilfs_btnode_commit_change_key()
277 struct nilfs_btnode_chkey_ctxt *ctxt) in nilfs_btnode_abort_change_key() argument
[all …]
/linux-6.1.9/arch/x86/boot/compressed/
Dsev.c76 static enum es_result vc_decode_insn(struct es_em_ctxt *ctxt) in vc_decode_insn() argument
81 memcpy(buffer, (unsigned char *)ctxt->regs->ip, MAX_INSN_SIZE); in vc_decode_insn()
83 ret = insn_decode(&ctxt->insn, buffer, MAX_INSN_SIZE, INSN_MODE_64); in vc_decode_insn()
90 static enum es_result vc_write_mem(struct es_em_ctxt *ctxt, in vc_write_mem() argument
98 static enum es_result vc_read_mem(struct es_em_ctxt *ctxt, in vc_read_mem() argument
236 struct es_em_ctxt ctxt; in do_boot_stage2_vc() local
243 result = vc_init_em_ctxt(&ctxt, regs, exit_code); in do_boot_stage2_vc()
250 result = vc_handle_rdtsc(boot_ghcb, &ctxt, exit_code); in do_boot_stage2_vc()
253 result = vc_handle_ioio(boot_ghcb, &ctxt); in do_boot_stage2_vc()
256 result = vc_handle_cpuid(boot_ghcb, &ctxt); in do_boot_stage2_vc()
[all …]
/linux-6.1.9/arch/arm64/kvm/hyp/include/nvhe/
Dtrap_handler.h14 #define cpu_reg(ctxt, r) (ctxt)->regs.regs[r] argument
15 #define DECLARE_REG(type, name, ctxt, reg) \ argument
16 type name = (type)cpu_reg(ctxt, (reg))
/linux-6.1.9/fs/ocfs2/
Dxattr.c264 struct ocfs2_xattr_set_ctxt *ctxt);
269 struct ocfs2_xattr_set_ctxt *ctxt);
702 struct ocfs2_xattr_set_ctxt *ctxt) in ocfs2_xattr_extend_allocation() argument
705 handle_t *handle = ctxt->handle; in ocfs2_xattr_extend_allocation()
728 ctxt->data_ac, in ocfs2_xattr_extend_allocation()
729 ctxt->meta_ac, in ocfs2_xattr_extend_allocation()
767 struct ocfs2_xattr_set_ctxt *ctxt) in __ocfs2_remove_xattr_range() argument
771 handle_t *handle = ctxt->handle; in __ocfs2_remove_xattr_range()
783 ret = ocfs2_remove_extent(handle, &et, cpos, len, ctxt->meta_ac, in __ocfs2_remove_xattr_range()
784 &ctxt->dealloc); in __ocfs2_remove_xattr_range()
[all …]

12345