Searched refs:cvmx_get_cycle (Results 1 – 10 of 10) sorted by relevance
390 uint64_t start_cycle = cvmx_get_cycle(); in cvmx_flash_erase_block()424 if (cvmx_get_cycle() > start_cycle + flash_info[chip_id].erase_timeout) in cvmx_flash_erase_block()446 uint64_t start_cycle = cvmx_get_cycle(); in cvmx_flash_erase_block()449 if (cvmx_get_cycle() > start_cycle + flash_info[chip_id].erase_timeout) in cvmx_flash_erase_block()514 uint64_t start_cycle = cvmx_get_cycle(); in cvmx_flash_write_block()534 if (cvmx_get_cycle() > start_cycle + flash_info[chip_id].write_timeout) in cvmx_flash_write_block()565 uint64_t start_cycle = cvmx_get_cycle(); in cvmx_flash_write_block()568 if (cvmx_get_cycle() > start_cycle + flash_info[chip_id].write_timeout) in cvmx_flash_write_block()
446 timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout; in cvmx_spi_clock_detect_cb()462 if (cvmx_get_cycle() > timeout_time) in cvmx_spi_clock_detect_cb()470 timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout; in cvmx_spi_clock_detect_cb()486 if (cvmx_get_cycle() > timeout_time) in cvmx_spi_clock_detect_cb()513 uint64_t timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout; in cvmx_spi_training_cb()540 timeout_time = cvmx_get_cycle() + 1000ull * MS * 600; /* Wait a really long time here */ in cvmx_spi_training_cb()542 timeout_time = cvmx_get_cycle() + 1000ull * MS * 10; in cvmx_spi_training_cb()555 if (cvmx_get_cycle() > timeout_time) in cvmx_spi_training_cb()603 timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout; in cvmx_spi_calendar_sync_cb()607 if (cvmx_get_cycle() > timeout_time) in cvmx_spi_calendar_sync_cb()
605 static inline uint64_t cvmx_get_cycle(void) in cvmx_get_cycle() function634 uint64_t done = cvmx_get_cycle() + cycles; in cvmx_wait()636 while (cvmx_get_cycle() < done) in cvmx_wait()650 uint64_t done = cvmx_get_cycle() + usec * cvmx_clock_get_rate(CVMX_CLOCK_CORE) / 1000000; in cvmx_wait_usec()651 while (cvmx_get_cycle() < done) in cvmx_wait_usec()
176 CVMX_FUNCTION uint64_t cvmx_get_cycle(void);
357 uint64_t done = cvmx_get_cycle() + (uint64_t)CVMX_MDIO_TIMEOUT * in __cvmx_mdio_read_rd_dat()363 } while (smi_rd.s.pending && (cvmx_get_cycle() < done)); in __cvmx_mdio_read_rd_dat()
1368 start_cycle = cvmx_get_cycle(); in cvmx_helper_wait_pko_queue_drain()1370 while (count && (cvmx_get_cycle() < stop_cycle)) in cvmx_helper_wait_pko_queue_drain()1593 start_cycle = cvmx_get_cycle(); in cvmx_helper_shutdown_packet_io_global()1596 (cvmx_get_cycle() < stop_cycle)) in cvmx_helper_shutdown_packet_io_global()
422 start_cycle = cvmx_get_cycle(); in __cvmx_pcie_rc_initialize_link_gen1()425 if (cvmx_get_cycle() - start_cycle > 100*cvmx_clock_get_rate(CVMX_CLOCK_CORE)) in __cvmx_pcie_rc_initialize_link_gen1()859 start_cycle = cvmx_get_cycle(); in __cvmx_pcie_rc_initialize_link_gen2()862 if (cvmx_get_cycle() - start_cycle > cvmx_clock_get_rate(CVMX_CLOCK_CORE)) in __cvmx_pcie_rc_initialize_link_gen2()
185 header.s.cycle = cvmx_get_cycle(); in __cvmx_log_build_header()
1449 uint64_t start_cycle = cvmx_get_cycle(); in cvmx_pow_tag_sw_wait()1455 if (cvmx_unlikely(cvmx_get_cycle() > start_cycle + MAX_CYCLES)) in cvmx_pow_tag_sw_wait()
258 uint64_t done = cvmx_get_cycle() + (uint64_t)timeout_usec * \267 } else if (cvmx_get_cycle() > done) { \