| /linux-6.15/fs/netfs/ |
| H A D | stats.c | 55 atomic_read(&netfs_n_rh_dio_read), in netfs_stats_show() 56 atomic_read(&netfs_n_rh_readahead), in netfs_stats_show() 64 atomic_read(&netfs_n_wh_dio_write), in netfs_stats_show() 68 atomic_read(&netfs_n_rh_zero), in netfs_stats_show() 72 atomic_read(&netfs_n_rh_download), in netfs_stats_show() 77 atomic_read(&netfs_n_rh_read), in netfs_stats_show() 81 atomic_read(&netfs_n_wh_upload), in netfs_stats_show() 85 atomic_read(&netfs_n_wh_write), in netfs_stats_show() 94 atomic_read(&netfs_n_rh_rreq), in netfs_stats_show() 95 atomic_read(&netfs_n_rh_sreq), in netfs_stats_show() [all …]
|
| H A D | fscache_stats.c | 61 atomic_read(&fscache_n_cookies), in fscache_stats_show() 62 atomic_read(&fscache_n_volumes), in fscache_stats_show() 64 atomic_read(&fscache_n_volumes_nomem) in fscache_stats_show() 68 atomic_read(&fscache_n_acquires), in fscache_stats_show() 69 atomic_read(&fscache_n_acquires_ok), in fscache_stats_show() 73 atomic_read(&fscache_n_cookies_lru), in fscache_stats_show() 84 atomic_read(&fscache_n_updates), in fscache_stats_show() 85 atomic_read(&fscache_n_resizes), in fscache_stats_show() 96 atomic_read(&fscache_n_culled)); in fscache_stats_show() 99 atomic_read(&fscache_n_read), in fscache_stats_show() [all …]
|
| /linux-6.15/net/rxrpc/ |
| H A D | proc.c | 179 atomic_read(&conn->active), in rxrpc_connection_seq_show() 254 atomic_read(&bundle->active), in rxrpc_bundle_seq_show() 421 atomic_read(&local->active_users), in rxrpc_local_seq_show() 483 atomic_read(&rxnet->stat_tx_data), in rxrpc_stats_show() 489 atomic_read(&rxnet->stat_rx_data), in rxrpc_stats_show() 494 atomic_read(&rxnet->stat_tx_ack_fill), in rxrpc_stats_show() 495 atomic_read(&rxnet->stat_tx_ack_send), in rxrpc_stats_show() 519 atomic_read(&rxnet->stat_rx_acks[0])); in rxrpc_stats_show() 558 atomic_read(&rxrpc_nr_txbuf), in rxrpc_stats_show() 559 atomic_read(&rxrpc_n_rx_skbs)); in rxrpc_stats_show() [all …]
|
| /linux-6.15/net/netfilter/ipvs/ |
| H A D | ip_vs_nq.c | 45 return atomic_read(&dest->activeconns) + 1; in ip_vs_nq_dest_overhead() 77 !atomic_read(&dest->weight)) in ip_vs_nq_schedule() 83 if (atomic_read(&dest->activeconns) == 0) { in ip_vs_nq_schedule() 90 ((__s64)loh * atomic_read(&dest->weight) > in ip_vs_nq_schedule() 91 (__s64)doh * atomic_read(&least->weight))) { in ip_vs_nq_schedule() 107 atomic_read(&least->activeconns), in ip_vs_nq_schedule() 109 atomic_read(&least->weight), loh); in ip_vs_nq_schedule()
|
| H A D | ip_vs_lblcr.c | 173 if ((atomic_read(&least->weight) > 0) in ip_vs_dest_set_min() 202 atomic_read(&least->activeconns), in ip_vs_dest_set_min() 204 atomic_read(&least->weight), loh); in ip_vs_dest_set_min() 222 if (atomic_read(&most->weight) > 0) { in ip_vs_dest_set_max() 247 atomic_read(&most->activeconns), in ip_vs_dest_set_max() 249 atomic_read(&most->weight), moh); in ip_vs_dest_set_max() 583 if (atomic_read(&dest->weight) > 0) { in __ip_vs_lblcr_schedule() 626 if (atomic_read(&dest->activeconns) > atomic_read(&dest->weight)) { in is_overloaded() 630 if (atomic_read(&d->activeconns)*2 in is_overloaded() 631 < atomic_read(&d->weight)) { in is_overloaded() [all …]
|
| H A D | ip_vs_sed.c | 49 return atomic_read(&dest->activeconns) + 1; in ip_vs_sed_dest_overhead() 80 atomic_read(&dest->weight) > 0) { in ip_vs_sed_schedule() 97 if ((__s64)loh * atomic_read(&dest->weight) > in ip_vs_sed_schedule() 98 (__s64)doh * atomic_read(&least->weight)) { in ip_vs_sed_schedule() 108 atomic_read(&least->activeconns), in ip_vs_sed_schedule() 110 atomic_read(&least->weight), loh); in ip_vs_sed_schedule()
|
| H A D | ip_vs_wlc.c | 52 atomic_read(&dest->weight) > 0) { in ip_vs_wlc_schedule() 69 if ((__s64)loh * atomic_read(&dest->weight) > in ip_vs_wlc_schedule() 70 (__s64)doh * atomic_read(&least->weight)) { in ip_vs_wlc_schedule() 80 atomic_read(&least->activeconns), in ip_vs_wlc_schedule() 82 atomic_read(&least->weight), loh); in ip_vs_wlc_schedule()
|
| H A D | ip_vs_fo.c | 34 atomic_read(&dest->weight) > hw) { in ip_vs_fo_schedule() 36 hw = atomic_read(&dest->weight); in ip_vs_fo_schedule() 44 atomic_read(&hweight->activeconns), in ip_vs_fo_schedule() 45 atomic_read(&hweight->weight)); in ip_vs_fo_schedule()
|
| H A D | ip_vs_ovf.c | 36 w = atomic_read(&dest->weight); in ip_vs_ovf_schedule() 38 atomic_read(&dest->activeconns) > w || in ip_vs_ovf_schedule() 51 atomic_read(&h->activeconns), in ip_vs_ovf_schedule() 52 atomic_read(&h->weight)); in ip_vs_ovf_schedule()
|
| H A D | ip_vs_lblc.c | 310 if (atomic_read(&tbl->entries) <= tbl->max_size) { in ip_vs_lblc_check_expire() 315 goal = (atomic_read(&tbl->entries) - tbl->max_size)*4/3; in ip_vs_lblc_check_expire() 419 if (atomic_read(&dest->weight) > 0) { in __ip_vs_lblc_schedule() 436 if ((__s64)loh * atomic_read(&dest->weight) > in __ip_vs_lblc_schedule() 437 (__s64)doh * atomic_read(&least->weight)) { in __ip_vs_lblc_schedule() 447 atomic_read(&least->activeconns), in __ip_vs_lblc_schedule() 449 atomic_read(&least->weight), loh); in __ip_vs_lblc_schedule() 462 if (atomic_read(&dest->activeconns) > atomic_read(&dest->weight)) { in is_overloaded() 466 if (atomic_read(&d->activeconns)*2 in is_overloaded() 467 < atomic_read(&d->weight)) { in is_overloaded() [all …]
|
| /linux-6.15/drivers/pinctrl/qcom/ |
| H A D | tlmm-test.c | 178 KUNIT_ASSERT_EQ(test, atomic_read(&priv->intr_count), 0); in tlmm_test_silent() 235 KUNIT_ASSERT_EQ(test, atomic_read(&priv->intr_count), 10); in tlmm_test_rising() 259 KUNIT_ASSERT_EQ(test, atomic_read(&priv->intr_count), 10); in tlmm_test_falling() 284 KUNIT_ASSERT_EQ(test, atomic_read(&priv->intr_count), 10); in tlmm_test_low() 309 KUNIT_ASSERT_EQ(test, atomic_read(&priv->intr_count), 10); in tlmm_test_high() 331 KUNIT_ASSERT_EQ(test, atomic_read(&priv->intr_count), 10); in tlmm_test_falling_in_handler() 353 KUNIT_ASSERT_EQ(test, atomic_read(&priv->intr_count), 10); in tlmm_test_rising_in_handler() 379 KUNIT_ASSERT_EQ(test, atomic_read(&priv->intr_count), 10); in tlmm_test_thread_rising() 528 before_edge = atomic_read(&priv->intr_count); in tlmm_test_rising_while_disabled() 532 after_edge = atomic_read(&priv->intr_count); in tlmm_test_rising_while_disabled() [all …]
|
| /linux-6.15/fs/bcachefs/ |
| H A D | six.c | 74 if ((atomic_read(&lock->state) & mask) != mask) in six_set_bitmask() 80 if (atomic_read(&lock->state) & mask) in six_clear_bitmask() 160 old = atomic_read(&lock->state); in __do_six_trylock() 191 old = atomic_read(&lock->state); in __do_six_trylock() 207 (atomic_read(&lock->state) & SIX_LOCK_HELD_write)); in __do_six_trylock() 363 if (atomic_read(&lock->state) & SIX_LOCK_NOSPIN) in six_optimistic_spin() 580 state = atomic_read(&lock->state); in do_six_unlock_type() 663 u32 old = atomic_read(&lock->state), new; in six_lock_tryupgrade() 739 EBUG_ON(!(atomic_read(&lock->state) & in six_lock_increment() 768 u32 state = atomic_read(&lock->state); in six_lock_wakeup_all() [all …]
|
| H A D | nocow_locking.c | 17 if (l->b[i] == dev_bucket && atomic_read(&l->l[i])) in bch2_bucket_nocow_is_locked() 57 if (!atomic_read(&l->l[i])) { in __bch2_bucket_nocow_trylock() 65 v = atomic_read(&l->l[i]); in __bch2_bucket_nocow_trylock() 69 v = atomic_read(&l->l[i]); in __bch2_bucket_nocow_trylock() 102 v |= atomic_read(&l->l[i]); in bch2_nocow_locks_to_text() 114 int v = atomic_read(&l->l[i]); in bch2_nocow_locks_to_text() 133 BUG_ON(atomic_read(&l->l[j])); in bch2_fs_nocow_locking_exit()
|
| /linux-6.15/fs/afs/ |
| H A D | validation.c | 130 if (atomic_read(&volume->cb_v_check) != atomic_read(&volume->cb_v_break)) in afs_check_validity() 140 else if (vnode->cb_scrub != atomic_read(&volume->cb_scrub)) in afs_check_validity() 243 snap = atomic_read(&volume->cb_ro_snapshot); in afs_update_volume_creation_time() 334 unsigned int cb_v_break = atomic_read(&volume->cb_v_break); in afs_update_volume_state() 335 unsigned int cb_v_check = atomic_read(&volume->cb_v_check); in afs_update_volume_state() 421 atomic_read(&volume->cb_v_check) != atomic_read(&volume->cb_v_break)) { in afs_validate() 428 cb_ro_snapshot = atomic_read(&volume->cb_ro_snapshot); in afs_validate() 429 cb_scrub = atomic_read(&volume->cb_scrub); in afs_validate() 437 atomic_read(&volume->cb_v_check) != atomic_read(&volume->cb_v_break) || in afs_validate() 458 cb_ro_snapshot = atomic_read(&volume->cb_ro_snapshot); in afs_validate() [all …]
|
| /linux-6.15/drivers/infiniband/hw/vmw_pvrdma/ |
| H A D | pvrdma_ring.h | 71 const unsigned int idx = atomic_read(var); in pvrdma_idx() 80 __u32 idx = atomic_read(var) + 1; /* Increment. */ in pvrdma_idx_ring_inc() 89 const __u32 tail = atomic_read(&r->prod_tail); in pvrdma_idx_ring_has_space() 90 const __u32 head = atomic_read(&r->cons_head); in pvrdma_idx_ring_has_space() 103 const __u32 tail = atomic_read(&r->prod_tail); in pvrdma_idx_ring_has_data() 104 const __u32 head = atomic_read(&r->cons_head); in pvrdma_idx_ring_has_data()
|
| /linux-6.15/drivers/crypto/bcm/ |
| H A D | util.c | 376 atomic_read(&ipriv->session_count)); in spu_debugfs_read() 379 atomic_read(&ipriv->stream_count)); in spu_debugfs_read() 388 op_cnt = atomic_read(&ipriv->cipher_cnt[alg][mode]); in spu_debugfs_read() 399 atomic_read(&ipriv->op_counts[SPU_OP_HASH])); in spu_debugfs_read() 401 op_cnt = atomic_read(&ipriv->hash_cnt[alg]); in spu_debugfs_read() 414 atomic_read(&ipriv->op_counts[SPU_OP_HMAC])); in spu_debugfs_read() 416 op_cnt = atomic_read(&ipriv->hmac_cnt[alg]); in spu_debugfs_read() 432 op_cnt = atomic_read(&ipriv->aead_cnt[alg]); in spu_debugfs_read() 448 atomic_read(&ipriv->mb_no_spc)); in spu_debugfs_read() 451 atomic_read(&ipriv->mb_send_fail)); in spu_debugfs_read() [all …]
|
| /linux-6.15/sound/core/seq/ |
| H A D | seq_lock.c | 16 if (atomic_read(lockp) < 0) { in snd_use_lock_sync_helper() 17 pr_warn("ALSA: seq_lock: lock trouble [counter = %d] in %s:%d\n", atomic_read(lockp), file, line); in snd_use_lock_sync_helper() 20 while (atomic_read(lockp) > 0) { in snd_use_lock_sync_helper() 22 pr_warn("ALSA: seq_lock: waiting [%d left] in %s:%d\n", atomic_read(lockp), file, line); in snd_use_lock_sync_helper()
|
| /linux-6.15/kernel/sched/ |
| H A D | membarrier.c | 210 atomic_read(&mm->membarrier_state)); in ipi_sync_rq_state() 242 membarrier_state = atomic_read(&next_mm->membarrier_state); in membarrier_update_current_mm() 323 if (!(atomic_read(&mm->membarrier_state) & in membarrier_private_expedited() 331 if (!(atomic_read(&mm->membarrier_state) & in membarrier_private_expedited() 337 if (!(atomic_read(&mm->membarrier_state) & in membarrier_private_expedited() 343 (atomic_read(&mm->mm_users) == 1 || num_online_cpus() == 1)) in membarrier_private_expedited() 438 int membarrier_state = atomic_read(&mm->membarrier_state); in sync_runqueues_membarrier_state() 442 if (atomic_read(&mm->mm_users) == 1 || num_online_cpus() == 1) { in sync_runqueues_membarrier_state() 500 if (atomic_read(&mm->membarrier_state) & in membarrier_register_global_expedited() 540 if ((atomic_read(&mm->membarrier_state) & ready_state) == ready_state) in membarrier_register_private_expedited() [all …]
|
| /linux-6.15/arch/mips/kernel/ |
| H A D | sync-r4k.c | 116 while (atomic_read(&start_count) != cpus - 1) in check_counter_sync_source() 126 while (atomic_read(&stop_count) != cpus-1) in check_counter_sync_source() 167 if (atomic_read(&test_runs) > 0) in check_counter_sync_source() 192 while (atomic_read(&start_count) != cpus) in synchronise_count_slave() 210 while (atomic_read(&stop_count) != cpus) in synchronise_count_slave() 223 if (!atomic_read(&test_runs)) { in synchronise_count_slave()
|
| /linux-6.15/arch/openrisc/kernel/ |
| H A D | sync-timer.c | 53 while (atomic_read(&count_count_start) != 1) in synchronise_count_master() 74 while (atomic_read(&count_count_stop) != 1) in synchronise_count_master() 104 while (atomic_read(&count_count_start) != 2) in synchronise_count_slave() 114 while (atomic_read(&count_count_stop) != 2) in synchronise_count_slave()
|
| /linux-6.15/drivers/s390/scsi/ |
| H A D | zfcp_erp.c | 139 if (atomic_read(&port->status) & in zfcp_erp_handle_failed() 148 if (atomic_read(&adapter->status) & in zfcp_erp_handle_failed() 176 p_status = atomic_read(&port->status); in zfcp_erp_required_act() 184 p_status = atomic_read(&port->status); in zfcp_erp_required_act() 189 p_status = atomic_read(&port->status); in zfcp_erp_required_act() 192 a_status = atomic_read(&adapter->status); in zfcp_erp_required_act() 202 a_status = atomic_read(&adapter->status); in zfcp_erp_required_act() 239 if (!(atomic_read(&zfcp_sdev->status) & in zfcp_erp_setup_act() 264 if (!(atomic_read(&adapter->status) & in zfcp_erp_setup_act() 767 if (!(atomic_read(&adapter->status) & in zfcp_erp_adapter_strat_fsf_xconf() [all …]
|
| /linux-6.15/net/mac80211/ |
| H A D | led.h | 16 if (!atomic_read(&local->rx_led_active)) in ieee80211_led_rx() 25 if (!atomic_read(&local->tx_led_active)) in ieee80211_led_tx() 74 if (atomic_read(&local->tpt_led_active)) in ieee80211_tpt_led_trig_tx() 83 if (atomic_read(&local->tpt_led_active)) in ieee80211_tpt_led_trig_rx()
|
| /linux-6.15/fs/xfs/libxfs/ |
| H A D | xfs_group.c | 43 ASSERT(atomic_read(&xg->xg_ref) >= 0); in xfs_group_get() 54 ASSERT(atomic_read(&xg->xg_ref) > 0 || in xfs_group_hold() 55 atomic_read(&xg->xg_active_ref) > 0); in xfs_group_hold() 68 ASSERT(atomic_read(&xg->xg_ref) > 0); in xfs_group_put() 162 XFS_IS_CORRUPT(mp, atomic_read(&xg->xg_ref) != 0); in xfs_group_free() 174 XFS_IS_CORRUPT(mp, atomic_read(&xg->xg_active_ref) != 0); in xfs_group_free()
|
| /linux-6.15/fs/xfs/ |
| H A D | xfs_trans_buf.c | 149 ASSERT(atomic_read(&bip->bli_refcount) > 0); in xfs_trans_get_buf_map() 184 ASSERT(atomic_read(&bip->bli_refcount) > 0); in __xfs_trans_getsb() 297 ASSERT(atomic_read(&bip->bli_refcount) > 0); in xfs_trans_read_buf_map() 375 ASSERT(atomic_read(&bip->bli_refcount) > 0); in xfs_trans_brelse() 425 ASSERT(atomic_read(&bip->bli_refcount) > 0); in xfs_trans_bdetach() 469 ASSERT(atomic_read(&bip->bli_refcount) > 0); in xfs_trans_bhold() 490 ASSERT(atomic_read(&bip->bli_refcount) > 0); in xfs_trans_bhold_release() 518 ASSERT(atomic_read(&bip->bli_refcount) > 0); in xfs_trans_dirty_buf() 605 ASSERT(atomic_read(&bip->bli_refcount) > 0); in xfs_trans_binval() 659 ASSERT(atomic_read(&bip->bli_refcount) > 0); in xfs_trans_inode_buf() [all …]
|
| /linux-6.15/net/batman-adv/ |
| H A D | gateway_common.c | 31 gw_mode = atomic_read(&bat_priv->gw.mode); in batadv_gw_tvlv_container_update() 39 down = atomic_read(&bat_priv->gw.bandwidth_down); in batadv_gw_tvlv_container_update() 40 up = atomic_read(&bat_priv->gw.bandwidth_up); in batadv_gw_tvlv_container_update() 86 atomic_read(&bat_priv->gw.mode) == BATADV_GW_MODE_CLIENT) in batadv_gw_tvlv_ogm_handler_v1()
|