Home
last modified time | relevance | path

Searched refs:afu (Results 1 – 25 of 43) sorted by relevance

12

/linux-5.19.10/drivers/misc/cxl/
Dnative.c22 static int afu_control(struct cxl_afu *afu, u64 command, u64 clear, in afu_control() argument
29 spin_lock(&afu->afu_cntl_lock); in afu_control()
32 trace_cxl_afu_ctrl(afu, command); in afu_control()
34 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An); in afu_control()
35 cxl_p2n_write(afu, CXL_AFU_Cntl_An, (AFU_Cntl & ~clear) | command); in afu_control()
37 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An); in afu_control()
40 dev_warn(&afu->dev, "WARNING: AFU control timed out!\n"); in afu_control()
45 if (!cxl_ops->link_ok(afu->adapter, afu)) { in afu_control()
46 afu->enabled = enabled; in afu_control()
54 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An); in afu_control()
[all …]
Dguest.c20 static void pci_error_handlers(struct cxl_afu *afu, in pci_error_handlers() argument
28 if (afu->phb == NULL) in pci_error_handlers()
31 list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) { in pci_error_handlers()
65 dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%.16llx\n", errstat); in guest_handle_psl_slice_error()
70 static ssize_t guest_collect_vpd(struct cxl *adapter, struct cxl_afu *afu, in guest_collect_vpd() argument
120 rc = cxl_h_collect_vpd(afu->guest->handle, 0, in guest_collect_vpd()
158 return cxl_h_collect_int_info(ctx->afu->guest->handle, ctx->process_token, info); in guest_get_irq_info()
178 static int afu_read_error_state(struct cxl_afu *afu, int *state_out) in afu_read_error_state() argument
183 if (!afu) in afu_read_error_state()
186 rc = cxl_h_read_error_state(afu->guest->handle, &state); in afu_read_error_state()
[all …]
Dpci.c89 #define AFUD_READ(afu, off) in_be64(afu->native->afu_desc_mmio + off) argument
90 #define AFUD_READ_LE(afu, off) in_le64(afu->native->afu_desc_mmio + off) argument
94 #define AFUD_READ_INFO(afu) AFUD_READ(afu, 0x0) argument
103 #define AFUD_READ_CR(afu) AFUD_READ(afu, 0x20) argument
105 #define AFUD_READ_CR_OFF(afu) AFUD_READ(afu, 0x28) argument
106 #define AFUD_READ_PPPSA(afu) AFUD_READ(afu, 0x30) argument
110 #define AFUD_READ_PPPSA_OFF(afu) AFUD_READ(afu, 0x38) argument
111 #define AFUD_READ_EB(afu) AFUD_READ(afu, 0x40) argument
113 #define AFUD_READ_EB_OFF(afu) AFUD_READ(afu, 0x48) argument
268 static void dump_afu_descriptor(struct cxl_afu *afu) in dump_afu_descriptor() argument
[all …]
Dvphb.c31 struct cxl_afu *afu; in cxl_pci_enable_device_hook() local
35 afu = (struct cxl_afu *)phb->private_data; in cxl_pci_enable_device_hook()
37 if (!cxl_ops->link_ok(afu->adapter, afu)) { in cxl_pci_enable_device_hook()
53 return (cxl_ops->afu_check_and_enable(afu) == 0); in cxl_pci_enable_device_hook()
93 static void cxl_afu_configured_put(struct cxl_afu *afu) in cxl_afu_configured_put() argument
95 atomic_dec_if_positive(&afu->configured_state); in cxl_afu_configured_put()
98 static bool cxl_afu_configured_get(struct cxl_afu *afu) in cxl_afu_configured_get() argument
100 return atomic_inc_unless_negative(&afu->configured_state); in cxl_afu_configured_get()
104 struct cxl_afu *afu, int *_record) in cxl_pcie_config_info() argument
109 if (record > afu->crs_num) in cxl_pcie_config_info()
[all …]
Dfile.c28 #define CXL_AFU_MINOR_D(afu) (CXL_CARD_MINOR(afu->adapter) + 1 + (3 * afu->slice)) argument
29 #define CXL_AFU_MINOR_M(afu) (CXL_AFU_MINOR_D(afu) + 1) argument
30 #define CXL_AFU_MINOR_S(afu) (CXL_AFU_MINOR_D(afu) + 2) argument
31 #define CXL_AFU_MKDEV_D(afu) MKDEV(MAJOR(cxl_dev), CXL_AFU_MINOR_D(afu)) argument
32 #define CXL_AFU_MKDEV_M(afu) MKDEV(MAJOR(cxl_dev), CXL_AFU_MINOR_M(afu)) argument
33 #define CXL_AFU_MKDEV_S(afu) MKDEV(MAJOR(cxl_dev), CXL_AFU_MINOR_S(afu)) argument
46 struct cxl_afu *afu; in __afu_open() local
61 if (!(afu = adapter->afu[slice])) { in __afu_open()
71 cxl_afu_get(afu); in __afu_open()
74 if (!afu->current_mode) in __afu_open()
[all …]
Dsysfs.c204 struct cxl_afu *afu = to_afu_chardev_m(device); in mmio_size_show_master() local
206 return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->adapter->ps_size); in mmio_size_show_master()
213 struct cxl_afu *afu = to_afu_chardev_m(device); in pp_mmio_off_show() local
215 return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->native->pp_offset); in pp_mmio_off_show()
222 struct cxl_afu *afu = to_afu_chardev_m(device); in pp_mmio_len_show() local
224 return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->pp_size); in pp_mmio_len_show()
240 struct cxl_afu *afu = to_cxl_afu(device); in mmio_size_show() local
242 if (afu->pp_size) in mmio_size_show()
243 return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->pp_size); in mmio_size_show()
244 return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->adapter->ps_size); in mmio_size_show()
[all …]
Dtrace.h70 __field(u8, afu)
75 __entry->card = ctx->afu->adapter->adapter_num;
76 __entry->afu = ctx->afu->slice;
82 __entry->afu,
95 __field(u8, afu)
104 __entry->card = ctx->afu->adapter->adapter_num;
105 __entry->afu = ctx->afu->slice;
115 __entry->afu,
136 __field(u8, afu)
144 __entry->card = ctx->afu->adapter->adapter_num;
[all …]
Dof.c70 struct cxl_afu *afu) in read_phys_addr() argument
88 afu->guest->handle = addr; in read_phys_addr()
91 afu->guest->p2n_phys += addr; in read_phys_addr()
92 afu->guest->p2n_size = size; in read_phys_addr()
95 afu->psn_phys += addr; in read_phys_addr()
96 afu->adapter->ps_size = size; in read_phys_addr()
111 static int read_vpd(struct cxl *adapter, struct cxl_afu *afu) in read_vpd() argument
122 rc = cxl_guest_read_afu_vpd(afu, vpd, len); in read_vpd()
131 int cxl_of_read_afu_handle(struct cxl_afu *afu, struct device_node *afu_np) in cxl_of_read_afu_handle() argument
133 if (read_handle(afu_np, &afu->guest->handle)) in cxl_of_read_afu_handle()
[all …]
Ddebugfs.c80 void cxl_debugfs_add_afu_regs_psl9(struct cxl_afu *afu, struct dentry *dir) in cxl_debugfs_add_afu_regs_psl9() argument
82 debugfs_create_io_x64("serr", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_SERR_An)); in cxl_debugfs_add_afu_regs_psl9()
85 void cxl_debugfs_add_afu_regs_psl8(struct cxl_afu *afu, struct dentry *dir) in cxl_debugfs_add_afu_regs_psl8() argument
87 debugfs_create_io_x64("sstp0", S_IRUSR, dir, _cxl_p2n_addr(afu, CXL_SSTP0_An)); in cxl_debugfs_add_afu_regs_psl8()
88 debugfs_create_io_x64("sstp1", S_IRUSR, dir, _cxl_p2n_addr(afu, CXL_SSTP1_An)); in cxl_debugfs_add_afu_regs_psl8()
90 debugfs_create_io_x64("fir", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_FIR_SLICE_An)); in cxl_debugfs_add_afu_regs_psl8()
91 debugfs_create_io_x64("serr", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_SERR_An)); in cxl_debugfs_add_afu_regs_psl8()
92 debugfs_create_io_x64("afu_debug", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_AFU_DEBUG_An)); in cxl_debugfs_add_afu_regs_psl8()
93 debugfs_create_io_x64("trace", S_IRUSR | S_IWUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_SLICE_TRACE)); in cxl_debugfs_add_afu_regs_psl8()
96 void cxl_debugfs_afu_add(struct cxl_afu *afu) in cxl_debugfs_afu_add() argument
[all …]
Dmain.c37 int cxl_afu_slbia(struct cxl_afu *afu) in cxl_afu_slbia() argument
42 cxl_p2n_write(afu, CXL_SLBIA_An, CXL_TLB_SLB_IQ_ALL); in cxl_afu_slbia()
43 while (cxl_p2n_read(afu, CXL_SLBIA_An) & CXL_TLB_SLB_P) { in cxl_afu_slbia()
45 dev_warn(&afu->dev, "WARNING: CXL AFU SLBIA timed out!\n"); in cxl_afu_slbia()
51 if (!cxl_ops->link_ok(afu->adapter, afu)) in cxl_afu_slbia()
66 ctx->afu->adapter->adapter_num, ctx->afu->slice, ctx->pe); in _cxl_slbia()
73 cxl_afu_slbia(ctx->afu); in _cxl_slbia()
79 struct cxl_afu *afu; in cxl_slbia_core() local
90 afu = adapter->afu[slice]; in cxl_slbia_core()
91 if (!afu || !afu->enabled) in cxl_slbia_core()
[all …]
Dcxl.h541 struct cxl_afu *afu; member
630 int (*afu_regs_init)(struct cxl_afu *afu);
631 int (*sanitise_afu_regs)(struct cxl_afu *afu);
632 int (*register_serr_irq)(struct cxl_afu *afu);
633 void (*release_serr_irq)(struct cxl_afu *afu);
635 irqreturn_t (*fail_irq)(struct cxl_afu *afu, struct cxl_irq_info *irq_info);
636 int (*activate_dedicated_process)(struct cxl_afu *afu);
641 void (*debugfs_add_afu_regs)(struct cxl_afu *afu, struct dentry *dir);
682 struct cxl_afu *afu[CXL_MAX_SLICES]; member
765 static inline bool cxl_adapter_link_ok(struct cxl *cxl, struct cxl_afu *afu) in cxl_adapter_link_ok() argument
[all …]
Dcontext.c35 int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master) in cxl_context_init() argument
39 ctx->afu = afu; in cxl_context_init()
92 mutex_lock(&afu->contexts_lock); in cxl_context_init()
94 i = idr_alloc(&ctx->afu->contexts_idr, ctx, 0, in cxl_context_init()
95 ctx->afu->num_procs, GFP_NOWAIT); in cxl_context_init()
97 mutex_unlock(&afu->contexts_lock); in cxl_context_init()
103 ctx->elem = &ctx->afu->native->spa[i]; in cxl_context_init()
114 cxl_afu_get(afu); in cxl_context_init()
138 if (ctx->afu->current_mode == CXL_MODE_DEDICATED) { in cxl_mmap_fault()
139 area = ctx->afu->psn_phys; in cxl_mmap_fault()
[all …]
Dapi.c103 struct cxl_afu *afu; in cxl_dev_context_init() local
107 afu = cxl_pci_to_afu(dev); in cxl_dev_context_init()
108 if (IS_ERR(afu)) in cxl_dev_context_init()
109 return ERR_CAST(afu); in cxl_dev_context_init()
118 rc = cxl_context_init(ctx, afu, false); in cxl_dev_context_init()
189 num = ctx->afu->pp_irqs; in cxl_allocate_afu_irqs()
200 cxl_map_irq(ctx->afu->adapter, hwirq, cxl_ops->psl_interrupt, ctx, "psl"); in cxl_allocate_afu_irqs()
227 cxl_ops->release_irq_ranges(&ctx->irqs, ctx->afu->adapter); in cxl_free_afu_irqs()
243 return cxl_map_irq(ctx->afu->adapter, hwirq, handler, cookie, name); in cxl_map_afu_irq()
282 rc = cxl_adapter_context_get(ctx->afu->adapter); in cxl_start_context()
[all …]
Dirq.c64 dev_err_ratelimited(&ctx->afu->dev, "CXL AFU Error undelivered to pe %i: 0x%016llx\n", in cxl_irq_psl9()
149 dev_err_ratelimited(&ctx->afu->dev, "CXL AFU Error " in cxl_irq_psl8()
311 if ((rc = cxl_ops->alloc_irq_ranges(&ctx->irqs, ctx->afu->adapter, in afu_allocate_irqs()
317 ctx->irqs.offset[0] = ctx->afu->native->psl_hwirq; in afu_allocate_irqs()
338 dev_name(&ctx->afu->dev), in afu_allocate_irqs()
352 cxl_ops->release_irq_ranges(&ctx->irqs, ctx->afu->adapter); in afu_allocate_irqs()
384 cxl_map_irq(ctx->afu->adapter, hwirq, handler, ctx, in afu_register_hwirqs()
419 cxl_ops->release_irq_ranges(&ctx->irqs, ctx->afu->adapter); in afu_release_irqs()
424 void cxl_afu_decode_psl_serr(struct cxl_afu *afu, u64 serr) in cxl_afu_decode_psl_serr() argument
426 dev_crit(&afu->dev, in cxl_afu_decode_psl_serr()
[all …]
/linux-5.19.10/drivers/misc/ocxl/
Dcore.c18 struct ocxl_afu *afu; in alloc_afu() local
20 afu = kzalloc(sizeof(struct ocxl_afu), GFP_KERNEL); in alloc_afu()
21 if (!afu) in alloc_afu()
24 kref_init(&afu->kref); in alloc_afu()
25 mutex_init(&afu->contexts_lock); in alloc_afu()
26 mutex_init(&afu->afu_control_lock); in alloc_afu()
27 idr_init(&afu->contexts_idr); in alloc_afu()
28 afu->fn = fn; in alloc_afu()
30 return afu; in alloc_afu()
35 struct ocxl_afu *afu = container_of(kref, struct ocxl_afu, kref); in free_afu() local
[all …]
Dmmio.c7 int ocxl_global_mmio_read32(struct ocxl_afu *afu, size_t offset, in ocxl_global_mmio_read32() argument
10 if (offset > afu->config.global_mmio_size - 4) in ocxl_global_mmio_read32()
20 *val = readl_be((char *)afu->global_mmio_ptr + offset); in ocxl_global_mmio_read32()
24 *val = readl((char *)afu->global_mmio_ptr + offset); in ocxl_global_mmio_read32()
32 int ocxl_global_mmio_read64(struct ocxl_afu *afu, size_t offset, in ocxl_global_mmio_read64() argument
35 if (offset > afu->config.global_mmio_size - 8) in ocxl_global_mmio_read64()
45 *val = readq_be((char *)afu->global_mmio_ptr + offset); in ocxl_global_mmio_read64()
49 *val = readq((char *)afu->global_mmio_ptr + offset); in ocxl_global_mmio_read64()
57 int ocxl_global_mmio_write32(struct ocxl_afu *afu, size_t offset, in ocxl_global_mmio_write32() argument
60 if (offset > afu->config.global_mmio_size - 4) in ocxl_global_mmio_write32()
[all …]
Dcontext.c7 int ocxl_context_alloc(struct ocxl_context **context, struct ocxl_afu *afu, in ocxl_context_alloc() argument
17 ctx->afu = afu; in ocxl_context_alloc()
18 mutex_lock(&afu->contexts_lock); in ocxl_context_alloc()
19 pasid = idr_alloc(&afu->contexts_idr, ctx, afu->pasid_base, in ocxl_context_alloc()
20 afu->pasid_base + afu->pasid_max, GFP_KERNEL); in ocxl_context_alloc()
22 mutex_unlock(&afu->contexts_lock); in ocxl_context_alloc()
26 afu->pasid_count++; in ocxl_context_alloc()
27 mutex_unlock(&afu->contexts_lock); in ocxl_context_alloc()
44 ocxl_afu_get(afu); in ocxl_context_alloc()
85 dev = to_pci_dev(ctx->afu->fn->dev.parent); in ocxl_context_attach()
[all …]
Dsysfs.c10 return info->afu; in to_afu()
17 struct ocxl_afu *afu = to_afu(device); in global_mmio_size_show() local
20 afu->config.global_mmio_size); in global_mmio_size_show()
27 struct ocxl_afu *afu = to_afu(device); in pp_mmio_size_show() local
30 afu->config.pp_mmio_stride); in pp_mmio_size_show()
37 struct ocxl_afu *afu = to_afu(device); in afu_version_show() local
40 afu->config.version_major, in afu_version_show()
41 afu->config.version_minor); in afu_version_show()
48 struct ocxl_afu *afu = to_afu(device); in contexts_show() local
51 afu->pasid_count, afu->pasid_max); in contexts_show()
[all …]
Dconfig.c405 struct ocxl_afu_config *afu) in read_afu_name() argument
415 ptr = (u32 *) &afu->name[i]; in read_afu_name()
418 afu->name[OCXL_AFU_NAME_SZ - 1] = '\0'; /* play safe */ in read_afu_name()
423 struct ocxl_afu_config *afu) in read_afu_mmio() argument
434 afu->global_mmio_bar = EXTRACT_BITS(val, 0, 2); in read_afu_mmio()
435 afu->global_mmio_offset = EXTRACT_BITS(val, 16, 31) << 16; in read_afu_mmio()
440 afu->global_mmio_offset += (u64) val << 32; in read_afu_mmio()
445 afu->global_mmio_size = val; in read_afu_mmio()
453 afu->pp_mmio_bar = EXTRACT_BITS(val, 0, 2); in read_afu_mmio()
454 afu->pp_mmio_offset = EXTRACT_BITS(val, 16, 31) << 16; in read_afu_mmio()
[all …]
Dpci.c20 struct ocxl_afu *afu, *tmp; in ocxl_probe() local
32 list_for_each_entry_safe(afu, tmp, afu_list, list) { in ocxl_probe()
34 rc = ocxl_file_register_afu(afu); in ocxl_probe()
37 afu->config.name, afu->config.idx); in ocxl_probe()
47 struct ocxl_afu *afu; in ocxl_remove() local
53 list_for_each_entry(afu, afu_list, list) { in ocxl_remove()
54 ocxl_file_unregister_afu(afu); in ocxl_remove()
/linux-5.19.10/drivers/scsi/cxlflash/
Dcommon.h113 struct afu *afu; member
158 struct afu *parent;
204 struct afu *afu; member
230 struct afu { struct
232 int (*send_cmd)(struct afu *afu, struct afu_cmd *cmd); argument
255 static inline struct hwq *get_hwq(struct afu *afu, u32 index) in get_hwq() argument
259 return &afu->hwqs[index]; in get_hwq()
262 static inline bool afu_is_irqpoll_enabled(struct afu *afu) in afu_is_irqpoll_enabled() argument
264 return !!afu->irqpoll_weight; in afu_is_irqpoll_enabled()
267 static inline bool afu_has_cap(struct afu *afu, u64 cap) in afu_has_cap() argument
[all …]
Dmain.c44 struct afu *afu = cmd->parent; in process_cmd_err() local
45 struct cxlflash_cfg *cfg = afu->parent; in process_cmd_err()
156 struct afu *afu = cmd->parent; in cmd_complete() local
157 struct cxlflash_cfg *cfg = afu->parent; in cmd_complete()
159 struct hwq *hwq = get_hwq(afu, cmd->hwq_index); in cmd_complete()
193 struct cxlflash_cfg *cfg = hwq->afu->parent; in flush_pending_cmds()
237 struct cxlflash_cfg *cfg = hwq->afu->parent; in context_reset()
300 static int send_cmd_ioarrin(struct afu *afu, struct afu_cmd *cmd) in send_cmd_ioarrin() argument
302 struct cxlflash_cfg *cfg = afu->parent; in send_cmd_ioarrin()
304 struct hwq *hwq = get_hwq(afu, cmd->hwq_index); in send_cmd_ioarrin()
[all …]
Docxl_hw.c181 struct ocxl_hw_afu *afu = ctx->hw_afu; in afu_map_irq() local
182 struct device *dev = afu->dev; in afu_map_irq()
253 struct ocxl_hw_afu *afu = ctx->hw_afu; in afu_unmap_irq() local
254 struct device *dev = afu->dev; in afu_unmap_irq()
329 struct ocxl_hw_afu *afu = ctx->hw_afu; in start_context() local
330 struct ocxl_afu_config *acfg = &afu->acfg; in start_context()
331 void *link_token = afu->link_token; in start_context()
332 struct pci_dev *pdev = afu->pdev; in start_context()
333 struct device *dev = afu->dev; in start_context()
349 ctx->psn_phys = afu->gmmio_phys; in start_context()
[all …]
/linux-5.19.10/drivers/fpga/
Ddfl-afu-region.c19 struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata); in afu_mmio_region_init() local
21 INIT_LIST_HEAD(&afu->regions); in afu_mmio_region_init()
24 #define for_each_region(region, afu) \ argument
25 list_for_each_entry((region), &(afu)->regions, node)
27 static struct dfl_afu_mmio_region *get_region_by_index(struct dfl_afu *afu, in get_region_by_index() argument
32 for_each_region(region, afu) in get_region_by_index()
53 struct dfl_afu *afu; in afu_mmio_region_add() local
67 afu = dfl_fpga_pdata_get_private(pdata); in afu_mmio_region_add()
70 if (get_region_by_index(afu, region_index)) { in afu_mmio_region_add()
77 region->offset = afu->region_cur_offset; in afu_mmio_region_add()
[all …]
/linux-5.19.10/include/misc/
Docxl.h95 void ocxl_afu_get(struct ocxl_afu *afu);
101 void ocxl_afu_put(struct ocxl_afu *afu);
130 int ocxl_context_alloc(struct ocxl_context **context, struct ocxl_afu *afu,
212 struct ocxl_afu_config *ocxl_afu_config(struct ocxl_afu *afu);
219 void ocxl_afu_set_private(struct ocxl_afu *afu, void *private);
228 void *ocxl_afu_get_private(struct ocxl_afu *afu);
240 int ocxl_global_mmio_read32(struct ocxl_afu *afu, size_t offset,
252 int ocxl_global_mmio_read64(struct ocxl_afu *afu, size_t offset,
264 int ocxl_global_mmio_write32(struct ocxl_afu *afu, size_t offset,
276 int ocxl_global_mmio_write64(struct ocxl_afu *afu, size_t offset,
[all …]

12