Lines Matching refs:afu

20 static void pci_error_handlers(struct cxl_afu *afu,  in pci_error_handlers()  argument
28 if (afu->phb == NULL) in pci_error_handlers()
31 list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) { in pci_error_handlers()
65 dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%.16llx\n", errstat); in guest_handle_psl_slice_error()
70 static ssize_t guest_collect_vpd(struct cxl *adapter, struct cxl_afu *afu, in guest_collect_vpd() argument
120 rc = cxl_h_collect_vpd(afu->guest->handle, 0, in guest_collect_vpd()
158 return cxl_h_collect_int_info(ctx->afu->guest->handle, ctx->process_token, info); in guest_get_irq_info()
178 static int afu_read_error_state(struct cxl_afu *afu, int *state_out) in afu_read_error_state() argument
183 if (!afu) in afu_read_error_state()
186 rc = cxl_h_read_error_state(afu->guest->handle, &state); in afu_read_error_state()
199 struct cxl_afu *afu = data; in guest_slice_irq_err() local
203 rc = cxl_h_get_fn_error_interrupt(afu->guest->handle, &serr); in guest_slice_irq_err()
205 dev_crit(&afu->dev, "Couldn't read PSL_SERR_An: %d\n", rc); in guest_slice_irq_err()
208 afu_error = cxl_p2n_read(afu, CXL_AFU_ERR_An); in guest_slice_irq_err()
209 dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An); in guest_slice_irq_err()
210 cxl_afu_decode_psl_serr(afu, serr); in guest_slice_irq_err()
211 dev_crit(&afu->dev, "AFU_ERR_An: 0x%.16llx\n", afu_error); in guest_slice_irq_err()
212 dev_crit(&afu->dev, "PSL_DSISR_An: 0x%.16llx\n", dsisr); in guest_slice_irq_err()
214 rc = cxl_h_ack_fn_error_interrupt(afu->guest->handle, serr); in guest_slice_irq_err()
216 dev_crit(&afu->dev, "Couldn't ack slice error interrupt: %d\n", in guest_slice_irq_err()
268 struct cxl_afu *afu = NULL; in guest_reset() local
274 if ((afu = adapter->afu[i])) { in guest_reset()
275 pci_error_handlers(afu, CXL_ERROR_DETECTED_EVENT, in guest_reset()
277 cxl_context_detach_all(afu); in guest_reset()
283 if (!rc && (afu = adapter->afu[i])) { in guest_reset()
284 pci_error_handlers(afu, CXL_SLOT_RESET_EVENT, in guest_reset()
286 pci_error_handlers(afu, CXL_RESUME_EVENT, 0); in guest_reset()
355 static int guest_register_serr_irq(struct cxl_afu *afu) in guest_register_serr_irq() argument
357 afu->err_irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err", in guest_register_serr_irq()
358 dev_name(&afu->dev)); in guest_register_serr_irq()
359 if (!afu->err_irq_name) in guest_register_serr_irq()
362 if (!(afu->serr_virq = cxl_map_irq(afu->adapter, afu->serr_hwirq, in guest_register_serr_irq()
363 guest_slice_irq_err, afu, afu->err_irq_name))) { in guest_register_serr_irq()
364 kfree(afu->err_irq_name); in guest_register_serr_irq()
365 afu->err_irq_name = NULL; in guest_register_serr_irq()
372 static void guest_release_serr_irq(struct cxl_afu *afu) in guest_release_serr_irq() argument
374 cxl_unmap_irq(afu->serr_virq, afu); in guest_release_serr_irq()
375 cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq); in guest_release_serr_irq()
376 kfree(afu->err_irq_name); in guest_release_serr_irq()
381 return cxl_h_control_faults(ctx->afu->guest->handle, ctx->process_token, in guest_ack_irq()
391 pr_devel("Disabling AFU(%d) interrupts\n", ctx->afu->slice); in disable_afu_irqs()
407 pr_devel("Enabling AFU(%d) interrupts\n", ctx->afu->slice); in enable_afu_irqs()
417 static int _guest_afu_cr_readXX(int sz, struct cxl_afu *afu, int cr_idx, in _guest_afu_cr_readXX() argument
424 if (afu->crs_len < sz) in _guest_afu_cr_readXX()
427 if (unlikely(offset >= afu->crs_len)) in _guest_afu_cr_readXX()
434 rc = cxl_h_get_config(afu->guest->handle, cr_idx, offset, in _guest_afu_cr_readXX()
461 static int guest_afu_cr_read32(struct cxl_afu *afu, int cr_idx, u64 offset, in guest_afu_cr_read32() argument
467 rc = _guest_afu_cr_readXX(4, afu, cr_idx, offset, &val); in guest_afu_cr_read32()
473 static int guest_afu_cr_read16(struct cxl_afu *afu, int cr_idx, u64 offset, in guest_afu_cr_read16() argument
479 rc = _guest_afu_cr_readXX(2, afu, cr_idx, offset, &val); in guest_afu_cr_read16()
485 static int guest_afu_cr_read8(struct cxl_afu *afu, int cr_idx, u64 offset, in guest_afu_cr_read8() argument
491 rc = _guest_afu_cr_readXX(1, afu, cr_idx, offset, &val); in guest_afu_cr_read8()
497 static int guest_afu_cr_read64(struct cxl_afu *afu, int cr_idx, u64 offset, in guest_afu_cr_read64() argument
500 return _guest_afu_cr_readXX(8, afu, cr_idx, offset, out); in guest_afu_cr_read64()
503 static int guest_afu_cr_write32(struct cxl_afu *afu, int cr, u64 off, u32 in) in guest_afu_cr_write32() argument
509 static int guest_afu_cr_write16(struct cxl_afu *afu, int cr, u64 off, u16 in) in guest_afu_cr_write16() argument
515 static int guest_afu_cr_write8(struct cxl_afu *afu, int cr, u64 off, u8 in) in guest_afu_cr_write8() argument
524 struct cxl *adapter = ctx->afu->adapter; in attach_afu_directed()
591 rc = cxl_h_attach_process(ctx->afu->guest->handle, elem, in attach_afu_directed()
594 if (ctx->master || !ctx->afu->pp_psa) { in attach_afu_directed()
595 ctx->psn_phys = ctx->afu->psn_phys; in attach_afu_directed()
596 ctx->psn_size = ctx->afu->adapter->ps_size; in attach_afu_directed()
601 if (ctx->afu->pp_psa && mmio_size && in attach_afu_directed()
602 ctx->afu->pp_size == 0) { in attach_afu_directed()
611 ctx->afu->pp_size = mmio_size; in attach_afu_directed()
631 if (ctx->afu->current_mode == CXL_MODE_DIRECTED) in guest_attach_process()
643 if (cxl_h_detach_process(ctx->afu->guest->handle, ctx->process_token)) in detach_afu_directed()
653 if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) in guest_detach_process()
656 if (ctx->afu->current_mode == CXL_MODE_DIRECTED) in guest_detach_process()
664 struct cxl_afu *afu = to_cxl_afu(dev); in guest_release_afu() local
668 idr_destroy(&afu->contexts_idr); in guest_release_afu()
670 kfree(afu->guest); in guest_release_afu()
671 kfree(afu); in guest_release_afu()
674 ssize_t cxl_guest_read_afu_vpd(struct cxl_afu *afu, void *buf, size_t len) in cxl_guest_read_afu_vpd() argument
676 return guest_collect_vpd(NULL, afu, buf, len); in cxl_guest_read_afu_vpd()
680 static ssize_t guest_afu_read_err_buffer(struct cxl_afu *afu, char *buf, in guest_afu_read_err_buffer() argument
690 rc = cxl_h_get_afu_err(afu->guest->handle, in guest_afu_read_err_buffer()
706 static int guest_afu_check_and_enable(struct cxl_afu *afu) in guest_afu_check_and_enable() argument
735 static int activate_afu_directed(struct cxl_afu *afu) in activate_afu_directed() argument
739 dev_info(&afu->dev, "Activating AFU(%d) directed mode\n", afu->slice); in activate_afu_directed()
741 afu->current_mode = CXL_MODE_DIRECTED; in activate_afu_directed()
743 afu->num_procs = afu->max_procs_virtualised; in activate_afu_directed()
745 if ((rc = cxl_chardev_m_afu_add(afu))) in activate_afu_directed()
748 if ((rc = cxl_sysfs_afu_m_add(afu))) in activate_afu_directed()
751 if ((rc = cxl_chardev_s_afu_add(afu))) in activate_afu_directed()
756 cxl_sysfs_afu_m_remove(afu); in activate_afu_directed()
758 cxl_chardev_afu_remove(afu); in activate_afu_directed()
762 static int guest_afu_activate_mode(struct cxl_afu *afu, int mode) in guest_afu_activate_mode() argument
766 if (!(mode & afu->modes_supported)) in guest_afu_activate_mode()
770 return activate_afu_directed(afu); in guest_afu_activate_mode()
773 dev_err(&afu->dev, "Dedicated mode not supported\n"); in guest_afu_activate_mode()
778 static int deactivate_afu_directed(struct cxl_afu *afu) in deactivate_afu_directed() argument
780 dev_info(&afu->dev, "Deactivating AFU(%d) directed mode\n", afu->slice); in deactivate_afu_directed()
782 afu->current_mode = 0; in deactivate_afu_directed()
783 afu->num_procs = 0; in deactivate_afu_directed()
785 cxl_sysfs_afu_m_remove(afu); in deactivate_afu_directed()
786 cxl_chardev_afu_remove(afu); in deactivate_afu_directed()
788 cxl_ops->afu_reset(afu); in deactivate_afu_directed()
793 static int guest_afu_deactivate_mode(struct cxl_afu *afu, int mode) in guest_afu_deactivate_mode() argument
797 if (!(mode & afu->modes_supported)) in guest_afu_deactivate_mode()
801 return deactivate_afu_directed(afu); in guest_afu_deactivate_mode()
805 static int guest_afu_reset(struct cxl_afu *afu) in guest_afu_reset() argument
807 pr_devel("AFU(%d) reset request\n", afu->slice); in guest_afu_reset()
808 return cxl_h_reset_afu(afu->guest->handle); in guest_afu_reset()
811 static int guest_map_slice_regs(struct cxl_afu *afu) in guest_map_slice_regs() argument
813 if (!(afu->p2n_mmio = ioremap(afu->guest->p2n_phys, afu->guest->p2n_size))) { in guest_map_slice_regs()
814 dev_err(&afu->dev, "Error mapping AFU(%d) MMIO regions\n", in guest_map_slice_regs()
815 afu->slice); in guest_map_slice_regs()
821 static void guest_unmap_slice_regs(struct cxl_afu *afu) in guest_unmap_slice_regs() argument
823 if (afu->p2n_mmio) in guest_unmap_slice_regs()
824 iounmap(afu->p2n_mmio); in guest_unmap_slice_regs()
827 static int afu_update_state(struct cxl_afu *afu) in afu_update_state() argument
831 rc = afu_read_error_state(afu, &cur_state); in afu_update_state()
835 if (afu->guest->previous_state == cur_state) in afu_update_state()
838 pr_devel("AFU(%d) update state to %#x\n", afu->slice, cur_state); in afu_update_state()
842 afu->guest->previous_state = cur_state; in afu_update_state()
846 pci_error_handlers(afu, CXL_ERROR_DETECTED_EVENT, in afu_update_state()
849 cxl_context_detach_all(afu); in afu_update_state()
850 if ((rc = cxl_ops->afu_reset(afu))) in afu_update_state()
853 rc = afu_read_error_state(afu, &cur_state); in afu_update_state()
855 pci_error_handlers(afu, CXL_SLOT_RESET_EVENT, in afu_update_state()
857 pci_error_handlers(afu, CXL_RESUME_EVENT, 0); in afu_update_state()
859 afu->guest->previous_state = 0; in afu_update_state()
863 afu->guest->previous_state = cur_state; in afu_update_state()
867 dev_err(&afu->dev, "AFU is in permanent error state\n"); in afu_update_state()
868 pci_error_handlers(afu, CXL_ERROR_DETECTED_EVENT, in afu_update_state()
870 afu->guest->previous_state = cur_state; in afu_update_state()
875 afu->slice, cur_state); in afu_update_state()
896 static bool guest_link_ok(struct cxl *cxl, struct cxl_afu *afu) in guest_link_ok() argument
900 if (afu && (!afu_read_error_state(afu, &state))) { in guest_link_ok()
908 static int afu_properties_look_ok(struct cxl_afu *afu) in afu_properties_look_ok() argument
910 if (afu->pp_irqs < 0) { in afu_properties_look_ok()
911 dev_err(&afu->dev, "Unexpected per-process minimum interrupt value\n"); in afu_properties_look_ok()
915 if (afu->max_procs_virtualised < 1) { in afu_properties_look_ok()
916 dev_err(&afu->dev, "Unexpected max number of processes virtualised value\n"); in afu_properties_look_ok()
925 struct cxl_afu *afu; in cxl_guest_init_afu() local
930 if (!(afu = cxl_alloc_afu(adapter, slice))) in cxl_guest_init_afu()
933 if (!(afu->guest = kzalloc(sizeof(struct cxl_afu_guest), GFP_KERNEL))) { in cxl_guest_init_afu()
934 kfree(afu); in cxl_guest_init_afu()
938 if ((rc = dev_set_name(&afu->dev, "afu%i.%i", in cxl_guest_init_afu()
945 if ((rc = cxl_of_read_afu_handle(afu, afu_np))) in cxl_guest_init_afu()
948 if ((rc = cxl_ops->afu_reset(afu))) in cxl_guest_init_afu()
951 if ((rc = cxl_of_read_afu_properties(afu, afu_np))) in cxl_guest_init_afu()
954 if ((rc = afu_properties_look_ok(afu))) in cxl_guest_init_afu()
957 if ((rc = guest_map_slice_regs(afu))) in cxl_guest_init_afu()
960 if ((rc = guest_register_serr_irq(afu))) in cxl_guest_init_afu()
967 if ((rc = cxl_register_afu(afu))) in cxl_guest_init_afu()
970 if ((rc = cxl_sysfs_afu_add(afu))) in cxl_guest_init_afu()
981 if (afu->max_procs_virtualised == 1) in cxl_guest_init_afu()
982 afu->modes_supported = CXL_MODE_DEDICATED; in cxl_guest_init_afu()
984 afu->modes_supported = CXL_MODE_DIRECTED; in cxl_guest_init_afu()
986 if ((rc = cxl_afu_select_best_mode(afu))) in cxl_guest_init_afu()
989 adapter->afu[afu->slice] = afu; in cxl_guest_init_afu()
991 afu->enabled = true; in cxl_guest_init_afu()
997 afu->guest->parent = afu; in cxl_guest_init_afu()
998 afu->guest->handle_err = true; in cxl_guest_init_afu()
999 INIT_DELAYED_WORK(&afu->guest->work_err, afu_handle_errstate); in cxl_guest_init_afu()
1000 schedule_delayed_work(&afu->guest->work_err, msecs_to_jiffies(1000)); in cxl_guest_init_afu()
1002 if ((rc = cxl_pci_vphb_add(afu))) in cxl_guest_init_afu()
1003 dev_info(&afu->dev, "Can't register vPHB\n"); in cxl_guest_init_afu()
1008 cxl_sysfs_afu_remove(afu); in cxl_guest_init_afu()
1010 device_unregister(&afu->dev); in cxl_guest_init_afu()
1012 guest_release_serr_irq(afu); in cxl_guest_init_afu()
1014 guest_unmap_slice_regs(afu); in cxl_guest_init_afu()
1017 kfree(afu->guest); in cxl_guest_init_afu()
1018 kfree(afu); in cxl_guest_init_afu()
1023 void cxl_guest_remove_afu(struct cxl_afu *afu) in cxl_guest_remove_afu() argument
1025 if (!afu) in cxl_guest_remove_afu()
1029 afu->guest->handle_err = false; in cxl_guest_remove_afu()
1030 flush_delayed_work(&afu->guest->work_err); in cxl_guest_remove_afu()
1032 cxl_pci_vphb_remove(afu); in cxl_guest_remove_afu()
1033 cxl_sysfs_afu_remove(afu); in cxl_guest_remove_afu()
1035 spin_lock(&afu->adapter->afu_list_lock); in cxl_guest_remove_afu()
1036 afu->adapter->afu[afu->slice] = NULL; in cxl_guest_remove_afu()
1037 spin_unlock(&afu->adapter->afu_list_lock); in cxl_guest_remove_afu()
1039 cxl_context_detach_all(afu); in cxl_guest_remove_afu()
1040 cxl_ops->afu_deactivate_mode(afu, afu->current_mode); in cxl_guest_remove_afu()
1041 guest_release_serr_irq(afu); in cxl_guest_remove_afu()
1042 guest_unmap_slice_regs(afu); in cxl_guest_remove_afu()
1044 device_unregister(&afu->dev); in cxl_guest_remove_afu()