Searched refs:cvmx_get_cycle (Results 1 – 5 of 5) sorted by relevance
442 timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout; in cvmx_spi_clock_detect_cb()460 if (cvmx_get_cycle() > timeout_time) { in cvmx_spi_clock_detect_cb()467 timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout; in cvmx_spi_clock_detect_cb()485 if (cvmx_get_cycle() > timeout_time) { in cvmx_spi_clock_detect_cb()513 uint64_t timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout; in cvmx_spi_training_cb()541 timeout_time = cvmx_get_cycle() + 1000ull * MS * 600; in cvmx_spi_training_cb()554 if (cvmx_get_cycle() > timeout_time) { in cvmx_spi_training_cb()606 timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout; in cvmx_spi_calendar_sync_cb()610 if (cvmx_get_cycle() > timeout_time) { in cvmx_spi_calendar_sync_cb()
425 static inline uint64_t cvmx_get_cycle(void) in cvmx_get_cycle() function461 uint64_t done = cvmx_get_cycle() + (uint64_t)timeout_usec * \469 } else if (cvmx_get_cycle() > done) { \
1328 uint64_t start_cycle = cvmx_get_cycle(); in cvmx_pow_tag_sw_wait()1333 if (unlikely(cvmx_get_cycle() > start_cycle + MAX_CYCLES)) { in cvmx_pow_tag_sw_wait()
634 start_cycle = cvmx_get_cycle(); in __cvmx_pcie_rc_initialize_link_gen1()636 if (cvmx_get_cycle() - start_cycle > 2 * octeon_get_clock_rate()) { in __cvmx_pcie_rc_initialize_link_gen1()1102 start_cycle = cvmx_get_cycle(); in __cvmx_pcie_rc_initialize_link_gen2()1104 if (cvmx_get_cycle() - start_cycle > octeon_get_clock_rate()) in __cvmx_pcie_rc_initialize_link_gen2()
580 u64 done = cvmx_get_cycle() + 100 * in cvmx_wait_tx_rx()592 } else if (cvmx_get_cycle() > done) { in cvmx_wait_tx_rx()