Lines Matching refs:sde
201 struct sdma_engine *sde,
204 struct sdma_engine *sde,
206 static void dump_sdma_state(struct sdma_engine *sde);
207 static void sdma_make_progress(struct sdma_engine *sde, u64 status);
208 static void sdma_desc_avail(struct sdma_engine *sde, uint avail);
209 static void sdma_flush_descq(struct sdma_engine *sde);
245 struct sdma_engine *sde, in write_sde_csr() argument
249 write_kctxt_csr(sde->dd, sde->this_idx, offset0, value); in write_sde_csr()
253 struct sdma_engine *sde, in read_sde_csr() argument
256 return read_kctxt_csr(sde->dd, sde->this_idx, offset0); in read_sde_csr()
263 static void sdma_wait_for_packet_egress(struct sdma_engine *sde, in sdma_wait_for_packet_egress() argument
266 u64 off = 8 * sde->this_idx; in sdma_wait_for_packet_egress()
267 struct hfi1_devdata *dd = sde->dd; in sdma_wait_for_packet_egress()
286 __func__, sde->this_idx, (u32)reg); in sdma_wait_for_packet_egress()
304 struct sdma_engine *sde = &dd->per_sdma[i]; in sdma_wait() local
306 sdma_wait_for_packet_egress(sde, 0); in sdma_wait()
310 static inline void sdma_set_desc_cnt(struct sdma_engine *sde, unsigned cnt) in sdma_set_desc_cnt() argument
314 if (!(sde->dd->flags & HFI1_HAS_SDMA_TIMEOUT)) in sdma_set_desc_cnt()
319 write_sde_csr(sde, SD(DESC_CNT), reg); in sdma_set_desc_cnt()
322 static inline void complete_tx(struct sdma_engine *sde, in complete_tx() argument
331 trace_hfi1_sdma_out_sn(sde, tx->sn); in complete_tx()
332 if (WARN_ON_ONCE(sde->head_sn != tx->sn)) in complete_tx()
333 dd_dev_err(sde->dd, "expected %llu got %llu\n", in complete_tx()
334 sde->head_sn, tx->sn); in complete_tx()
335 sde->head_sn++; in complete_tx()
337 __sdma_txclean(sde->dd, tx); in complete_tx()
362 static void sdma_flush(struct sdma_engine *sde) in sdma_flush() argument
370 sdma_flush_descq(sde); in sdma_flush()
371 spin_lock_irqsave(&sde->flushlist_lock, flags); in sdma_flush()
373 list_splice_init(&sde->flushlist, &flushlist); in sdma_flush()
374 spin_unlock_irqrestore(&sde->flushlist_lock, flags); in sdma_flush()
377 complete_tx(sde, txp, SDMA_TXREQ_S_ABORTED); in sdma_flush()
382 seq = read_seqbegin(&sde->waitlock); in sdma_flush()
383 if (!list_empty(&sde->dmawait)) { in sdma_flush()
384 write_seqlock(&sde->waitlock); in sdma_flush()
385 list_for_each_entry_safe(w, nw, &sde->dmawait, list) { in sdma_flush()
391 write_sequnlock(&sde->waitlock); in sdma_flush()
393 } while (read_seqretry(&sde->waitlock, seq)); in sdma_flush()
409 struct sdma_engine *sde = in sdma_field_flush() local
412 write_seqlock_irqsave(&sde->head_lock, flags); in sdma_field_flush()
413 if (!__sdma_running(sde)) in sdma_field_flush()
414 sdma_flush(sde); in sdma_field_flush()
415 write_sequnlock_irqrestore(&sde->head_lock, flags); in sdma_field_flush()
420 struct sdma_engine *sde = container_of(work, struct sdma_engine, in sdma_err_halt_wait() local
427 statuscsr = read_sde_csr(sde, SD(STATUS)); in sdma_err_halt_wait()
432 dd_dev_err(sde->dd, in sdma_err_halt_wait()
434 sde->this_idx); in sdma_err_halt_wait()
444 sdma_process_event(sde, sdma_event_e15_hw_halt_done); in sdma_err_halt_wait()
447 static void sdma_err_progress_check_schedule(struct sdma_engine *sde) in sdma_err_progress_check_schedule() argument
449 if (!is_bx(sde->dd) && HFI1_CAP_IS_KSET(SDMA_AHG)) { in sdma_err_progress_check_schedule()
451 struct hfi1_devdata *dd = sde->dd; in sdma_err_progress_check_schedule()
456 if (curr_sdma != sde) in sdma_err_progress_check_schedule()
460 dd_dev_err(sde->dd, in sdma_err_progress_check_schedule()
462 sde->this_idx); in sdma_err_progress_check_schedule()
463 mod_timer(&sde->err_progress_check_timer, jiffies + 10); in sdma_err_progress_check_schedule()
470 struct sdma_engine *sde = from_timer(sde, t, err_progress_check_timer); in sdma_err_progress_check() local
472 dd_dev_err(sde->dd, "SDE progress check event\n"); in sdma_err_progress_check()
473 for (index = 0; index < sde->dd->num_sdma; index++) { in sdma_err_progress_check()
474 struct sdma_engine *curr_sde = &sde->dd->per_sdma[index]; in sdma_err_progress_check()
478 if (curr_sde == sde) in sdma_err_progress_check()
503 schedule_work(&sde->err_halt_worker); in sdma_err_progress_check()
508 struct sdma_engine *sde = from_tasklet(sde, t, in sdma_hw_clean_up_task() local
514 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", in sdma_hw_clean_up_task()
515 sde->this_idx, slashstrip(__FILE__), __LINE__, in sdma_hw_clean_up_task()
518 statuscsr = read_sde_csr(sde, SD(STATUS)); in sdma_hw_clean_up_task()
525 sdma_process_event(sde, sdma_event_e25_hw_clean_up_done); in sdma_hw_clean_up_task()
528 static inline struct sdma_txreq *get_txhead(struct sdma_engine *sde) in get_txhead() argument
530 return sde->tx_ring[sde->tx_head & sde->sdma_mask]; in get_txhead()
536 static void sdma_flush_descq(struct sdma_engine *sde) in sdma_flush_descq() argument
540 struct sdma_txreq *txp = get_txhead(sde); in sdma_flush_descq()
547 head = sde->descq_head & sde->sdma_mask; in sdma_flush_descq()
548 tail = sde->descq_tail & sde->sdma_mask; in sdma_flush_descq()
551 head = ++sde->descq_head & sde->sdma_mask; in sdma_flush_descq()
555 sde->tx_ring[sde->tx_head++ & sde->sdma_mask] = NULL; in sdma_flush_descq()
556 complete_tx(sde, txp, SDMA_TXREQ_S_ABORTED); in sdma_flush_descq()
557 trace_hfi1_sdma_progress(sde, head, tail, txp); in sdma_flush_descq()
558 txp = get_txhead(sde); in sdma_flush_descq()
563 sdma_desc_avail(sde, sdma_descq_freecnt(sde)); in sdma_flush_descq()
568 struct sdma_engine *sde = from_tasklet(sde, t, sdma_sw_clean_up_task); in sdma_sw_clean_up_task() local
571 spin_lock_irqsave(&sde->tail_lock, flags); in sdma_sw_clean_up_task()
572 write_seqlock(&sde->head_lock); in sdma_sw_clean_up_task()
593 sdma_make_progress(sde, 0); in sdma_sw_clean_up_task()
595 sdma_flush(sde); in sdma_sw_clean_up_task()
602 sde->descq_tail = 0; in sdma_sw_clean_up_task()
603 sde->descq_head = 0; in sdma_sw_clean_up_task()
604 sde->desc_avail = sdma_descq_freecnt(sde); in sdma_sw_clean_up_task()
605 *sde->head_dma = 0; in sdma_sw_clean_up_task()
607 __sdma_process_event(sde, sdma_event_e40_sw_cleaned); in sdma_sw_clean_up_task()
609 write_sequnlock(&sde->head_lock); in sdma_sw_clean_up_task()
610 spin_unlock_irqrestore(&sde->tail_lock, flags); in sdma_sw_clean_up_task()
613 static void sdma_sw_tear_down(struct sdma_engine *sde) in sdma_sw_tear_down() argument
615 struct sdma_state *ss = &sde->state; in sdma_sw_tear_down()
621 atomic_set(&sde->dd->sdma_unfreeze_count, -1); in sdma_sw_tear_down()
622 wake_up_interruptible(&sde->dd->sdma_unfreeze_wq); in sdma_sw_tear_down()
625 static void sdma_start_hw_clean_up(struct sdma_engine *sde) in sdma_start_hw_clean_up() argument
627 tasklet_hi_schedule(&sde->sdma_hw_clean_up_task); in sdma_start_hw_clean_up()
630 static void sdma_set_state(struct sdma_engine *sde, in sdma_set_state() argument
633 struct sdma_state *ss = &sde->state; in sdma_set_state()
638 sde, in sdma_set_state()
649 sdma_flush(sde); in sdma_set_state()
670 sdma_sendctrl(sde, ss->current_op); in sdma_set_state()
708 int sdma_engine_get_vl(struct sdma_engine *sde) in sdma_engine_get_vl() argument
710 struct hfi1_devdata *dd = sde->dd; in sdma_engine_get_vl()
714 if (sde->this_idx >= TXE_NUM_SDMA_ENGINES) in sdma_engine_get_vl()
723 vl = m->engine_to_vl[sde->this_idx]; in sdma_engine_get_vl()
764 rval = e->sde[selector & e->mask]; in sdma_select_engine_vl()
795 struct sdma_engine *sde[]; member
831 struct sdma_engine *sde = NULL; in sdma_select_user_engine() local
849 sde = map->sde[selector & map->mask]; in sdma_select_user_engine()
853 if (sde) in sdma_select_user_engine()
854 return sde; in sdma_select_user_engine()
865 map->sde[map->ctr + i] = map->sde[i]; in sdma_populate_sde_map()
869 struct sdma_engine *sde) in sdma_cleanup_sde_map() argument
875 if (map->sde[i] == sde) { in sdma_cleanup_sde_map()
876 memmove(&map->sde[i], &map->sde[i + 1], in sdma_cleanup_sde_map()
877 (map->ctr - i - 1) * sizeof(map->sde[0])); in sdma_cleanup_sde_map()
892 ssize_t sdma_set_cpu_to_sde_map(struct sdma_engine *sde, const char *buf, in sdma_set_cpu_to_sde_map() argument
895 struct hfi1_devdata *dd = sde->dd; in sdma_set_cpu_to_sde_map()
901 vl = sdma_engine_get_vl(sde); in sdma_set_cpu_to_sde_map()
919 dd_dev_warn(sde->dd, "Invalid CPU mask\n"); in sdma_set_cpu_to_sde_map()
931 if (cpumask_test_cpu(cpu, &sde->cpu_mask)) { in sdma_set_cpu_to_sde_map()
954 rht_node->map[vl]->sde[0] = sde; in sdma_set_cpu_to_sde_map()
962 dd_dev_err(sde->dd, "Failed to set process to sde affinity for cpu %lu\n", in sdma_set_cpu_to_sde_map()
981 rht_node->map[vl]->sde[ctr - 1] = sde; in sdma_set_cpu_to_sde_map()
1009 sde); in sdma_set_cpu_to_sde_map()
1036 cpumask_copy(&sde->cpu_mask, new_mask); in sdma_set_cpu_to_sde_map()
1045 ssize_t sdma_get_cpu_to_sde_map(struct sdma_engine *sde, char *buf) in sdma_get_cpu_to_sde_map() argument
1048 if (cpumask_empty(&sde->cpu_mask)) in sdma_get_cpu_to_sde_map()
1051 cpumap_print_to_pagebuf(true, buf, &sde->cpu_mask); in sdma_get_cpu_to_sde_map()
1095 if (!rht_node->map[i]->sde[j]) in sdma_seqfile_dump_cpu_list()
1102 rht_node->map[i]->sde[j]->this_idx); in sdma_seqfile_dump_cpu_list()
1211 newmap->map[i]->sde[j] = in sdma_map_init()
1256 struct sdma_engine *sde; in sdma_clean() local
1273 sde = &dd->per_sdma[i]; in sdma_clean()
1275 sde->head_dma = NULL; in sdma_clean()
1276 sde->head_phys = 0; in sdma_clean()
1278 if (sde->descq) { in sdma_clean()
1281 sde->descq_cnt * sizeof(u64[2]), in sdma_clean()
1282 sde->descq, in sdma_clean()
1283 sde->descq_phys in sdma_clean()
1285 sde->descq = NULL; in sdma_clean()
1286 sde->descq_phys = 0; in sdma_clean()
1288 kvfree(sde->tx_ring); in sdma_clean()
1289 sde->tx_ring = NULL; in sdma_clean()
1322 struct sdma_engine *sde; in sdma_init() local
1378 sde = &dd->per_sdma[this_idx]; in sdma_init()
1379 sde->dd = dd; in sdma_init()
1380 sde->ppd = ppd; in sdma_init()
1381 sde->this_idx = this_idx; in sdma_init()
1382 sde->descq_cnt = descq_cnt; in sdma_init()
1383 sde->desc_avail = sdma_descq_freecnt(sde); in sdma_init()
1384 sde->sdma_shift = ilog2(descq_cnt); in sdma_init()
1385 sde->sdma_mask = (1 << sde->sdma_shift) - 1; in sdma_init()
1388 sde->int_mask = (u64)1 << (0 * TXE_NUM_SDMA_ENGINES + in sdma_init()
1390 sde->progress_mask = (u64)1 << (1 * TXE_NUM_SDMA_ENGINES + in sdma_init()
1392 sde->idle_mask = (u64)1 << (2 * TXE_NUM_SDMA_ENGINES + in sdma_init()
1395 sde->imask = sde->int_mask | sde->progress_mask | in sdma_init()
1396 sde->idle_mask; in sdma_init()
1398 spin_lock_init(&sde->tail_lock); in sdma_init()
1399 seqlock_init(&sde->head_lock); in sdma_init()
1400 spin_lock_init(&sde->senddmactrl_lock); in sdma_init()
1401 spin_lock_init(&sde->flushlist_lock); in sdma_init()
1402 seqlock_init(&sde->waitlock); in sdma_init()
1404 sde->ahg_bits = 0xfffffffe00000000ULL; in sdma_init()
1406 sdma_set_state(sde, sdma_state_s00_hw_down); in sdma_init()
1409 kref_init(&sde->state.kref); in sdma_init()
1410 init_completion(&sde->state.comp); in sdma_init()
1412 INIT_LIST_HEAD(&sde->flushlist); in sdma_init()
1413 INIT_LIST_HEAD(&sde->dmawait); in sdma_init()
1415 sde->tail_csr = in sdma_init()
1418 tasklet_setup(&sde->sdma_hw_clean_up_task, in sdma_init()
1420 tasklet_setup(&sde->sdma_sw_clean_up_task, in sdma_init()
1422 INIT_WORK(&sde->err_halt_worker, sdma_err_halt_wait); in sdma_init()
1423 INIT_WORK(&sde->flush_worker, sdma_field_flush); in sdma_init()
1425 sde->progress_check_head = 0; in sdma_init()
1427 timer_setup(&sde->err_progress_check_timer, in sdma_init()
1430 sde->descq = dma_alloc_coherent(&dd->pcidev->dev, in sdma_init()
1432 &sde->descq_phys, GFP_KERNEL); in sdma_init()
1433 if (!sde->descq) in sdma_init()
1435 sde->tx_ring = in sdma_init()
1439 if (!sde->tx_ring) in sdma_init()
1467 sde = &dd->per_sdma[this_idx]; in sdma_init()
1469 sde->head_dma = curr_head; in sdma_init()
1471 phys_offset = (unsigned long)sde->head_dma - in sdma_init()
1473 sde->head_phys = dd->sdma_heads_phys + phys_offset; in sdma_init()
1474 init_sdma_regs(sde, per_sdma_credits, idle_cnt); in sdma_init()
1513 struct sdma_engine *sde; in sdma_all_running() local
1518 sde = &dd->per_sdma[i]; in sdma_all_running()
1519 sdma_process_event(sde, sdma_event_e30_go_running); in sdma_all_running()
1531 struct sdma_engine *sde; in sdma_all_idle() local
1536 sde = &dd->per_sdma[i]; in sdma_all_idle()
1537 sdma_process_event(sde, sdma_event_e70_go_idle); in sdma_all_idle()
1552 struct sdma_engine *sde; in sdma_start() local
1556 sde = &dd->per_sdma[i]; in sdma_start()
1557 sdma_process_event(sde, sdma_event_e10_go_hw_start); in sdma_start()
1568 struct sdma_engine *sde; in sdma_exit() local
1572 sde = &dd->per_sdma[this_idx]; in sdma_exit()
1573 if (!list_empty(&sde->dmawait)) in sdma_exit()
1575 sde->this_idx); in sdma_exit()
1576 sdma_process_event(sde, sdma_event_e00_go_hw_down); in sdma_exit()
1578 del_timer_sync(&sde->err_progress_check_timer); in sdma_exit()
1585 sdma_finalput(&sde->state); in sdma_exit()
1662 static inline u16 sdma_gethead(struct sdma_engine *sde) in sdma_gethead() argument
1664 struct hfi1_devdata *dd = sde->dd; in sdma_gethead()
1669 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", in sdma_gethead()
1670 sde->this_idx, slashstrip(__FILE__), __LINE__, __func__); in sdma_gethead()
1674 use_dmahead = HFI1_CAP_IS_KSET(USE_SDMA_HEAD) && __sdma_running(sde) && in sdma_gethead()
1677 (u16)le64_to_cpu(*sde->head_dma) : in sdma_gethead()
1678 (u16)read_sde_csr(sde, SD(HEAD)); in sdma_gethead()
1686 swhead = sde->descq_head & sde->sdma_mask; in sdma_gethead()
1688 swtail = READ_ONCE(sde->descq_tail) & sde->sdma_mask; in sdma_gethead()
1689 cnt = sde->descq_cnt; in sdma_gethead()
1704 sde->this_idx, in sdma_gethead()
1725 static void sdma_desc_avail(struct sdma_engine *sde, uint avail) in sdma_desc_avail() argument
1732 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx, in sdma_desc_avail()
1734 dd_dev_err(sde->dd, "avail: %u\n", avail); in sdma_desc_avail()
1738 seq = read_seqbegin(&sde->waitlock); in sdma_desc_avail()
1739 if (!list_empty(&sde->dmawait)) { in sdma_desc_avail()
1741 write_seqlock(&sde->waitlock); in sdma_desc_avail()
1746 &sde->dmawait, in sdma_desc_avail()
1771 write_sequnlock(&sde->waitlock); in sdma_desc_avail()
1774 } while (read_seqretry(&sde->waitlock, seq)); in sdma_desc_avail()
1786 static void sdma_make_progress(struct sdma_engine *sde, u64 status) in sdma_make_progress() argument
1793 hwhead = sdma_gethead(sde); in sdma_make_progress()
1802 txp = get_txhead(sde); in sdma_make_progress()
1803 swhead = sde->descq_head & sde->sdma_mask; in sdma_make_progress()
1804 trace_hfi1_sdma_progress(sde, hwhead, swhead, txp); in sdma_make_progress()
1807 swhead = ++sde->descq_head & sde->sdma_mask; in sdma_make_progress()
1812 sde->tx_ring[sde->tx_head++ & sde->sdma_mask] = NULL; in sdma_make_progress()
1813 complete_tx(sde, txp, SDMA_TXREQ_S_OK); in sdma_make_progress()
1815 txp = get_txhead(sde); in sdma_make_progress()
1817 trace_hfi1_sdma_progress(sde, hwhead, swhead, txp); in sdma_make_progress()
1830 if ((status & sde->idle_mask) && !idle_check_done) { in sdma_make_progress()
1833 swtail = READ_ONCE(sde->descq_tail) & sde->sdma_mask; in sdma_make_progress()
1835 hwhead = (u16)read_sde_csr(sde, SD(HEAD)); in sdma_make_progress()
1841 sde->last_status = status; in sdma_make_progress()
1843 sdma_desc_avail(sde, sdma_descq_freecnt(sde)); in sdma_make_progress()
1855 void sdma_engine_interrupt(struct sdma_engine *sde, u64 status) in sdma_engine_interrupt() argument
1857 trace_hfi1_sdma_engine_interrupt(sde, status); in sdma_engine_interrupt()
1858 write_seqlock(&sde->head_lock); in sdma_engine_interrupt()
1859 sdma_set_desc_cnt(sde, sdma_desct_intr); in sdma_engine_interrupt()
1860 if (status & sde->idle_mask) in sdma_engine_interrupt()
1861 sde->idle_int_cnt++; in sdma_engine_interrupt()
1862 else if (status & sde->progress_mask) in sdma_engine_interrupt()
1863 sde->progress_int_cnt++; in sdma_engine_interrupt()
1864 else if (status & sde->int_mask) in sdma_engine_interrupt()
1865 sde->sdma_int_cnt++; in sdma_engine_interrupt()
1866 sdma_make_progress(sde, status); in sdma_engine_interrupt()
1867 write_sequnlock(&sde->head_lock); in sdma_engine_interrupt()
1875 void sdma_engine_error(struct sdma_engine *sde, u64 status) in sdma_engine_error() argument
1880 dd_dev_err(sde->dd, "CONFIG SDMA(%u) error status 0x%llx state %s\n", in sdma_engine_error()
1881 sde->this_idx, in sdma_engine_error()
1883 sdma_state_names[sde->state.current_state]); in sdma_engine_error()
1885 spin_lock_irqsave(&sde->tail_lock, flags); in sdma_engine_error()
1886 write_seqlock(&sde->head_lock); in sdma_engine_error()
1888 __sdma_process_event(sde, sdma_event_e60_hw_halted); in sdma_engine_error()
1890 dd_dev_err(sde->dd, in sdma_engine_error()
1892 sde->this_idx, in sdma_engine_error()
1894 sdma_state_names[sde->state.current_state]); in sdma_engine_error()
1895 dump_sdma_state(sde); in sdma_engine_error()
1897 write_sequnlock(&sde->head_lock); in sdma_engine_error()
1898 spin_unlock_irqrestore(&sde->tail_lock, flags); in sdma_engine_error()
1901 static void sdma_sendctrl(struct sdma_engine *sde, unsigned op) in sdma_sendctrl() argument
1908 dd_dev_err(sde->dd, "CONFIG SDMA(%u) senddmactrl E=%d I=%d H=%d C=%d\n", in sdma_sendctrl()
1909 sde->this_idx, in sdma_sendctrl()
1931 spin_lock_irqsave(&sde->senddmactrl_lock, flags); in sdma_sendctrl()
1933 sde->p_senddmactrl |= set_senddmactrl; in sdma_sendctrl()
1934 sde->p_senddmactrl &= ~clr_senddmactrl; in sdma_sendctrl()
1937 write_sde_csr(sde, SD(CTRL), in sdma_sendctrl()
1938 sde->p_senddmactrl | in sdma_sendctrl()
1941 write_sde_csr(sde, SD(CTRL), sde->p_senddmactrl); in sdma_sendctrl()
1943 spin_unlock_irqrestore(&sde->senddmactrl_lock, flags); in sdma_sendctrl()
1946 sdma_dumpstate(sde); in sdma_sendctrl()
1950 static void sdma_setlengen(struct sdma_engine *sde) in sdma_setlengen() argument
1953 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", in sdma_setlengen()
1954 sde->this_idx, slashstrip(__FILE__), __LINE__, __func__); in sdma_setlengen()
1962 write_sde_csr(sde, SD(LEN_GEN), in sdma_setlengen()
1963 (sde->descq_cnt / 64) << SD(LEN_GEN_LENGTH_SHIFT)); in sdma_setlengen()
1964 write_sde_csr(sde, SD(LEN_GEN), in sdma_setlengen()
1965 ((sde->descq_cnt / 64) << SD(LEN_GEN_LENGTH_SHIFT)) | in sdma_setlengen()
1969 static inline void sdma_update_tail(struct sdma_engine *sde, u16 tail) in sdma_update_tail() argument
1973 writeq(tail, sde->tail_csr); in sdma_update_tail()
1980 static void sdma_hw_start_up(struct sdma_engine *sde) in sdma_hw_start_up() argument
1985 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", in sdma_hw_start_up()
1986 sde->this_idx, slashstrip(__FILE__), __LINE__, __func__); in sdma_hw_start_up()
1989 sdma_setlengen(sde); in sdma_hw_start_up()
1990 sdma_update_tail(sde, 0); /* Set SendDmaTail */ in sdma_hw_start_up()
1991 *sde->head_dma = 0; in sdma_hw_start_up()
1995 write_sde_csr(sde, SD(ENG_ERR_CLEAR), reg); in sdma_hw_start_up()
2003 static void set_sdma_integrity(struct sdma_engine *sde) in set_sdma_integrity() argument
2005 struct hfi1_devdata *dd = sde->dd; in set_sdma_integrity()
2007 write_sde_csr(sde, SD(CHECK_ENABLE), in set_sdma_integrity()
2012 struct sdma_engine *sde, in init_sdma_regs() argument
2018 struct hfi1_devdata *dd = sde->dd; in init_sdma_regs()
2021 sde->this_idx, slashstrip(__FILE__), __LINE__, __func__); in init_sdma_regs()
2024 write_sde_csr(sde, SD(BASE_ADDR), sde->descq_phys); in init_sdma_regs()
2025 sdma_setlengen(sde); in init_sdma_regs()
2026 sdma_update_tail(sde, 0); /* Set SendDmaTail */ in init_sdma_regs()
2027 write_sde_csr(sde, SD(RELOAD_CNT), idle_cnt); in init_sdma_regs()
2028 write_sde_csr(sde, SD(DESC_CNT), 0); in init_sdma_regs()
2029 write_sde_csr(sde, SD(HEAD_ADDR), sde->head_phys); in init_sdma_regs()
2030 write_sde_csr(sde, SD(MEMORY), in init_sdma_regs()
2032 ((u64)(credits * sde->this_idx) << in init_sdma_regs()
2034 write_sde_csr(sde, SD(ENG_ERR_MASK), ~0ull); in init_sdma_regs()
2035 set_sdma_integrity(sde); in init_sdma_regs()
2038 write_sde_csr(sde, SD(CHECK_OPCODE), in init_sdma_regs()
2046 csr = read_csr(sde->dd, reg); \
2047 dd_dev_err(sde->dd, "%36s 0x%016llx\n", #reg, csr); \
2051 csr = read_sde_csr(sde, reg); \
2052 dd_dev_err(sde->dd, "%36s[%02u] 0x%016llx\n", \
2053 #reg, sde->this_idx, csr); \
2057 csr = read_csr(sde->dd, reg + (8 * i)); \
2058 dd_dev_err(sde->dd, "%33s_%02u 0x%016llx\n", \
2062 void sdma_dumpstate(struct sdma_engine *sde) in sdma_dumpstate() argument
2103 static void dump_sdma_state(struct sdma_engine *sde) in dump_sdma_state() argument
2112 head = sde->descq_head & sde->sdma_mask; in dump_sdma_state()
2113 tail = sde->descq_tail & sde->sdma_mask; in dump_sdma_state()
2114 cnt = sdma_descq_freecnt(sde); in dump_sdma_state()
2116 dd_dev_err(sde->dd, in dump_sdma_state()
2118 sde->this_idx, head, tail, cnt, in dump_sdma_state()
2119 !list_empty(&sde->flushlist)); in dump_sdma_state()
2125 descqp = &sde->descq[head]; in dump_sdma_state()
2139 dd_dev_err(sde->dd, in dump_sdma_state()
2142 dd_dev_err(sde->dd, in dump_sdma_state()
2146 dd_dev_err(sde->dd, in dump_sdma_state()
2158 head &= sde->sdma_mask; in dump_sdma_state()
2171 void sdma_seqfile_dump_sde(struct seq_file *s, struct sdma_engine *sde) in sdma_seqfile_dump_sde() argument
2180 head = sde->descq_head & sde->sdma_mask; in sdma_seqfile_dump_sde()
2181 tail = READ_ONCE(sde->descq_tail) & sde->sdma_mask; in sdma_seqfile_dump_sde()
2182 seq_printf(s, SDE_FMT, sde->this_idx, in sdma_seqfile_dump_sde()
2183 sde->cpu, in sdma_seqfile_dump_sde()
2184 sdma_state_name(sde->state.current_state), in sdma_seqfile_dump_sde()
2185 (unsigned long long)read_sde_csr(sde, SD(CTRL)), in sdma_seqfile_dump_sde()
2186 (unsigned long long)read_sde_csr(sde, SD(STATUS)), in sdma_seqfile_dump_sde()
2187 (unsigned long long)read_sde_csr(sde, SD(ENG_ERR_STATUS)), in sdma_seqfile_dump_sde()
2188 (unsigned long long)read_sde_csr(sde, SD(TAIL)), tail, in sdma_seqfile_dump_sde()
2189 (unsigned long long)read_sde_csr(sde, SD(HEAD)), head, in sdma_seqfile_dump_sde()
2190 (unsigned long long)le64_to_cpu(*sde->head_dma), in sdma_seqfile_dump_sde()
2191 (unsigned long long)read_sde_csr(sde, SD(MEMORY)), in sdma_seqfile_dump_sde()
2192 (unsigned long long)read_sde_csr(sde, SD(LEN_GEN)), in sdma_seqfile_dump_sde()
2193 (unsigned long long)read_sde_csr(sde, SD(RELOAD_CNT)), in sdma_seqfile_dump_sde()
2194 (unsigned long long)sde->last_status, in sdma_seqfile_dump_sde()
2195 (unsigned long long)sde->ahg_bits, in sdma_seqfile_dump_sde()
2196 sde->tx_tail, in sdma_seqfile_dump_sde()
2197 sde->tx_head, in sdma_seqfile_dump_sde()
2198 sde->descq_tail, in sdma_seqfile_dump_sde()
2199 sde->descq_head, in sdma_seqfile_dump_sde()
2200 !list_empty(&sde->flushlist), in sdma_seqfile_dump_sde()
2201 sde->descq_full_count, in sdma_seqfile_dump_sde()
2202 (unsigned long long)read_sde_csr(sde, SEND_DMA_CHECK_SLID)); in sdma_seqfile_dump_sde()
2208 descqp = &sde->descq[head]; in sdma_seqfile_dump_sde()
2233 head = (head + 1) & sde->sdma_mask; in sdma_seqfile_dump_sde()
2241 static inline u64 add_gen(struct sdma_engine *sde, u64 qw1) in add_gen() argument
2243 u8 generation = (sde->descq_tail >> sde->sdma_shift) & 3; in add_gen()
2267 static inline u16 submit_tx(struct sdma_engine *sde, struct sdma_txreq *tx) in submit_tx() argument
2274 tail = sde->descq_tail & sde->sdma_mask; in submit_tx()
2275 sde->descq[tail].qw[0] = cpu_to_le64(descp->qw[0]); in submit_tx()
2276 sde->descq[tail].qw[1] = cpu_to_le64(add_gen(sde, descp->qw[1])); in submit_tx()
2277 trace_hfi1_sdma_descriptor(sde, descp->qw[0], descp->qw[1], in submit_tx()
2278 tail, &sde->descq[tail]); in submit_tx()
2279 tail = ++sde->descq_tail & sde->sdma_mask; in submit_tx()
2286 sde->descq[tail].qw[0] = cpu_to_le64(descp->qw[0]); in submit_tx()
2293 qw1 = add_gen(sde, descp->qw[1]); in submit_tx()
2295 sde->descq[tail].qw[1] = cpu_to_le64(qw1); in submit_tx()
2296 trace_hfi1_sdma_descriptor(sde, descp->qw[0], qw1, in submit_tx()
2297 tail, &sde->descq[tail]); in submit_tx()
2298 tail = ++sde->descq_tail & sde->sdma_mask; in submit_tx()
2302 tx->sn = sde->tail_sn++; in submit_tx()
2303 trace_hfi1_sdma_in_sn(sde, tx->sn); in submit_tx()
2304 WARN_ON_ONCE(sde->tx_ring[sde->tx_tail & sde->sdma_mask]); in submit_tx()
2306 sde->tx_ring[sde->tx_tail++ & sde->sdma_mask] = tx; in submit_tx()
2307 sde->desc_avail -= tx->num_desc; in submit_tx()
2315 struct sdma_engine *sde, in sdma_check_progress() argument
2322 sde->desc_avail = sdma_descq_freecnt(sde); in sdma_check_progress()
2323 if (tx->num_desc <= sde->desc_avail) in sdma_check_progress()
2330 (const seqcount_t *)&sde->head_lock.seqcount); in sdma_check_progress()
2331 ret = wait->iow->sleep(sde, wait, tx, seq, pkts_sent); in sdma_check_progress()
2333 sde->desc_avail = sdma_descq_freecnt(sde); in sdma_check_progress()
2355 int sdma_send_txreq(struct sdma_engine *sde, in sdma_send_txreq() argument
2368 spin_lock_irqsave(&sde->tail_lock, flags); in sdma_send_txreq()
2370 if (unlikely(!__sdma_running(sde))) in sdma_send_txreq()
2372 if (unlikely(tx->num_desc > sde->desc_avail)) in sdma_send_txreq()
2374 tail = submit_tx(sde, tx); in sdma_send_txreq()
2377 sdma_update_tail(sde, tail); in sdma_send_txreq()
2379 spin_unlock_irqrestore(&sde->tail_lock, flags); in sdma_send_txreq()
2386 tx->sn = sde->tail_sn++; in sdma_send_txreq()
2387 trace_hfi1_sdma_in_sn(sde, tx->sn); in sdma_send_txreq()
2389 spin_lock(&sde->flushlist_lock); in sdma_send_txreq()
2390 list_add_tail(&tx->list, &sde->flushlist); in sdma_send_txreq()
2391 spin_unlock(&sde->flushlist_lock); in sdma_send_txreq()
2393 queue_work_on(sde->cpu, system_highpri_wq, &sde->flush_worker); in sdma_send_txreq()
2397 ret = sdma_check_progress(sde, wait, tx, pkts_sent); in sdma_send_txreq()
2402 sde->descq_full_count++; in sdma_send_txreq()
2434 int sdma_send_txlist(struct sdma_engine *sde, struct iowait_work *wait, in sdma_send_txlist() argument
2443 spin_lock_irqsave(&sde->tail_lock, flags); in sdma_send_txlist()
2447 if (unlikely(!__sdma_running(sde))) in sdma_send_txlist()
2449 if (unlikely(tx->num_desc > sde->desc_avail)) in sdma_send_txlist()
2456 tail = submit_tx(sde, tx); in sdma_send_txlist()
2460 sdma_update_tail(sde, tail); in sdma_send_txlist()
2472 sdma_update_tail(sde, tail); in sdma_send_txlist()
2473 spin_unlock_irqrestore(&sde->tail_lock, flags); in sdma_send_txlist()
2477 spin_lock(&sde->flushlist_lock); in sdma_send_txlist()
2483 tx->sn = sde->tail_sn++; in sdma_send_txlist()
2484 trace_hfi1_sdma_in_sn(sde, tx->sn); in sdma_send_txlist()
2486 list_add_tail(&tx->list, &sde->flushlist); in sdma_send_txlist()
2490 spin_unlock(&sde->flushlist_lock); in sdma_send_txlist()
2491 queue_work_on(sde->cpu, system_highpri_wq, &sde->flush_worker); in sdma_send_txlist()
2495 ret = sdma_check_progress(sde, wait, tx, submit_count > 0); in sdma_send_txlist()
2500 sde->descq_full_count++; in sdma_send_txlist()
2504 static void sdma_process_event(struct sdma_engine *sde, enum sdma_events event) in sdma_process_event() argument
2508 spin_lock_irqsave(&sde->tail_lock, flags); in sdma_process_event()
2509 write_seqlock(&sde->head_lock); in sdma_process_event()
2511 __sdma_process_event(sde, event); in sdma_process_event()
2513 if (sde->state.current_state == sdma_state_s99_running) in sdma_process_event()
2514 sdma_desc_avail(sde, sdma_descq_freecnt(sde)); in sdma_process_event()
2516 write_sequnlock(&sde->head_lock); in sdma_process_event()
2517 spin_unlock_irqrestore(&sde->tail_lock, flags); in sdma_process_event()
2520 static void __sdma_process_event(struct sdma_engine *sde, in __sdma_process_event() argument
2523 struct sdma_state *ss = &sde->state; in __sdma_process_event()
2528 dd_dev_err(sde->dd, "CONFIG SDMA(%u) [%s] %s\n", sde->this_idx, in __sdma_process_event()
2550 sdma_get(&sde->state); in __sdma_process_event()
2551 sdma_set_state(sde, in __sdma_process_event()
2559 sdma_sw_tear_down(sde); in __sdma_process_event()
2583 sdma_set_state(sde, sdma_state_s00_hw_down); in __sdma_process_event()
2584 sdma_sw_tear_down(sde); in __sdma_process_event()
2589 sdma_set_state(sde, in __sdma_process_event()
2591 sdma_start_hw_clean_up(sde); in __sdma_process_event()
2603 schedule_work(&sde->err_halt_worker); in __sdma_process_event()
2624 sdma_set_state(sde, sdma_state_s00_hw_down); in __sdma_process_event()
2625 sdma_sw_tear_down(sde); in __sdma_process_event()
2632 sdma_hw_start_up(sde); in __sdma_process_event()
2633 sdma_set_state(sde, ss->go_s99_running ? in __sdma_process_event()
2665 sdma_set_state(sde, sdma_state_s00_hw_down); in __sdma_process_event()
2666 sdma_sw_tear_down(sde); in __sdma_process_event()
2675 sdma_set_state(sde, sdma_state_s99_running); in __sdma_process_event()
2683 sdma_set_state(sde, sdma_state_s50_hw_halt_wait); in __sdma_process_event()
2684 schedule_work(&sde->err_halt_worker); in __sdma_process_event()
2690 sdma_set_state(sde, sdma_state_s80_hw_freeze); in __sdma_process_event()
2691 atomic_dec(&sde->dd->sdma_unfreeze_count); in __sdma_process_event()
2692 wake_up_interruptible(&sde->dd->sdma_unfreeze_wq); in __sdma_process_event()
2706 sdma_set_state(sde, sdma_state_s00_hw_down); in __sdma_process_event()
2718 sdma_set_state(sde, sdma_state_s40_hw_clean_up_wait); in __sdma_process_event()
2719 sdma_start_hw_clean_up(sde); in __sdma_process_event()
2745 sdma_set_state(sde, sdma_state_s00_hw_down); in __sdma_process_event()
2746 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); in __sdma_process_event()
2753 sdma_hw_start_up(sde); in __sdma_process_event()
2754 sdma_set_state(sde, ss->go_s99_running ? in __sdma_process_event()
2787 sdma_set_state(sde, sdma_state_s00_hw_down); in __sdma_process_event()
2788 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); in __sdma_process_event()
2793 sdma_set_state(sde, sdma_state_s30_sw_clean_up_wait); in __sdma_process_event()
2794 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); in __sdma_process_event()
2806 schedule_work(&sde->err_halt_worker); in __sdma_process_event()
2828 sdma_set_state(sde, sdma_state_s00_hw_down); in __sdma_process_event()
2829 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); in __sdma_process_event()
2834 sdma_set_state(sde, sdma_state_s30_sw_clean_up_wait); in __sdma_process_event()
2835 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); in __sdma_process_event()
2847 schedule_work(&sde->err_halt_worker); in __sdma_process_event()
2868 sdma_set_state(sde, sdma_state_s00_hw_down); in __sdma_process_event()
2869 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); in __sdma_process_event()
2892 sdma_set_state(sde, sdma_state_s82_freeze_sw_clean); in __sdma_process_event()
2893 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); in __sdma_process_event()
2907 sdma_set_state(sde, sdma_state_s00_hw_down); in __sdma_process_event()
2908 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); in __sdma_process_event()
2921 atomic_dec(&sde->dd->sdma_unfreeze_count); in __sdma_process_event()
2922 wake_up_interruptible(&sde->dd->sdma_unfreeze_wq); in __sdma_process_event()
2936 sdma_hw_start_up(sde); in __sdma_process_event()
2937 sdma_set_state(sde, ss->go_s99_running ? in __sdma_process_event()
2951 sdma_set_state(sde, sdma_state_s00_hw_down); in __sdma_process_event()
2952 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); in __sdma_process_event()
2968 sdma_err_progress_check_schedule(sde); in __sdma_process_event()
2975 sdma_set_state(sde, sdma_state_s50_hw_halt_wait); in __sdma_process_event()
2976 schedule_work(&sde->err_halt_worker); in __sdma_process_event()
2979 sdma_set_state(sde, sdma_state_s60_idle_halt_wait); in __sdma_process_event()
2985 sdma_set_state(sde, sdma_state_s80_hw_freeze); in __sdma_process_event()
2986 atomic_dec(&sde->dd->sdma_unfreeze_count); in __sdma_process_event()
2987 wake_up_interruptible(&sde->dd->sdma_unfreeze_wq); in __sdma_process_event()
2999 sdma_make_progress(sde, 0); in __sdma_process_event()
3141 struct sdma_engine *sde; in sdma_update_lmc() local
3153 sde = &dd->per_sdma[i]; in sdma_update_lmc()
3154 write_sde_csr(sde, SD(CHECK_SLID), sreg); in sdma_update_lmc()
3248 int sdma_ahg_alloc(struct sdma_engine *sde) in sdma_ahg_alloc() argument
3253 if (!sde) { in sdma_ahg_alloc()
3254 trace_hfi1_ahg_allocate(sde, -EINVAL); in sdma_ahg_alloc()
3258 nr = ffz(READ_ONCE(sde->ahg_bits)); in sdma_ahg_alloc()
3260 trace_hfi1_ahg_allocate(sde, -ENOSPC); in sdma_ahg_alloc()
3263 oldbit = test_and_set_bit(nr, &sde->ahg_bits); in sdma_ahg_alloc()
3268 trace_hfi1_ahg_allocate(sde, nr); in sdma_ahg_alloc()
3279 void sdma_ahg_free(struct sdma_engine *sde, int ahg_index) in sdma_ahg_free() argument
3281 if (!sde) in sdma_ahg_free()
3283 trace_hfi1_ahg_deallocate(sde, ahg_index); in sdma_ahg_free()
3286 clear_bit(ahg_index, &sde->ahg_bits); in sdma_ahg_free()
3374 struct sdma_engine *sde) in _sdma_engine_progress_schedule() argument
3376 trace_hfi1_sdma_engine_progress(sde, sde->progress_mask); in _sdma_engine_progress_schedule()
3378 write_csr(sde->dd, in _sdma_engine_progress_schedule()
3380 sde->progress_mask); in _sdma_engine_progress_schedule()