| /linux-6.15/drivers/md/dm-vdo/ |
| H A D | logical-zone.c | 67 zone->zones = zones; in initialize_zone() 91 zone_count_t zone; in vdo_make_logical_zones() local 104 for (zone = 0; zone < zone_count; zone++) { in vdo_make_logical_zones() 155 if (!vdo_is_state_draining(&zone->state) || zone->notifying || in check_for_drain_complete() 252 zone->zone_number, (unsigned long long) zone->flush_generation, in vdo_increment_logical_zone_flush_generation() 267 struct logical_zone *zone = data_vio->logical.zone; in vdo_acquire_flush_generation_lock() local 305 if (zone->oldest_active_generation <= zone->notification_generation) { in attempt_generation_complete_notification() 312 zone->notification_generation = zone->oldest_active_generation; in attempt_generation_complete_notification() 327 struct logical_zone *zone = data_vio->logical.zone; in vdo_release_flush_generation_lock() local 340 if (!update_oldest_active_generation(zone) || zone->notifying) in vdo_release_flush_generation_lock() [all …]
|
| H A D | block-map.c | 1444 a, b, zone->oldest_generation, zone->generation); in is_not_older() 1466 (zone->oldest_generation != zone->generation)) in release_generation() 1571 .zone = zone, in finish_page_write() 1703 zone = data_vio->logical.zone->block_map_zone; in release_page_lock() 2094 set_generation(zone, tree_page, zone->generation); in finish_block_map_allocation() 2327 set_generation(zone, page, zone->generation); in vdo_write_tree_page() 2750 VIO_PRIORITY_METADATA, zone, &zone->vio_pool); in initialize_block_map_zone() 2756 zone->page_cache.zone = zone; in initialize_block_map_zone() 2844 for (zone = 0; zone < map->zone_count; zone++) in vdo_free_block_map() 2893 for (zone = 0; zone < map->zone_count; zone++) { in vdo_decode_block_map() [all …]
|
| H A D | physical-zone.c | 337 vdo_int_map_free(zone->pbn_operations); in initialize_zone() 341 zone->zone_number = zone_number; in initialize_zone() 347 free_pbn_lock_pool(vdo_forget(zone->lock_pool)); in initialize_zone() 348 vdo_int_map_free(zone->pbn_operations); in initialize_zone() 399 struct physical_zone *zone = &zones->zones[index]; in vdo_free_physical_zones() local 401 free_pbn_lock_pool(vdo_forget(zone->lock_pool)); in vdo_free_physical_zones() 418 return ((zone == NULL) ? NULL : vdo_int_map_get(zone->pbn_operations, pbn)); in vdo_get_physical_zone_pbn_lock() 542 struct physical_zone *zone = allocation->zone; in continue_allocating() local 574 allocation->zone = zone->next; in continue_allocating() 634 return_pbn_lock_to_pool(zone->lock_pool, lock); in vdo_release_physical_zone_pbn_lock() [all …]
|
| /linux-6.15/drivers/block/null_blk/ |
| H A D | zoned.c | 136 zone->capacity = zone->len; in null_init_zoned_dev() 137 zone->wp = zone->start + zone->len; in null_init_zoned_dev() 158 zone->wp = zone->start + zone->capacity; in null_init_zoned_dev() 161 zone->wp = zone->start; in null_init_zoned_dev() 388 zone->wp + nr_sectors > zone->start + zone->capacity) { in null_zone_write() 432 if (zone->wp == zone->start + zone->capacity) { in null_zone_write() 542 if (zone->wp > zone->start) in null_close_zone() 548 if (zone->wp == zone->start) in null_close_zone() 602 zone->wp = zone->start + zone->len; in null_finish_zone() 638 zone->wp = zone->start; in null_reset_zone() [all …]
|
| /linux-6.15/fs/pstore/ |
| H A D | zone.c | 413 zone->name, i, zone->off, in psz_kmsg_recover_meta() 421 zone->name, i, zone->off, in psz_kmsg_recover_meta() 444 zone->name, i, zone->off, in psz_kmsg_recover_meta() 453 zone->name, i, zone->off, in psz_kmsg_recover_meta() 489 if (!zone || zone->oldbuf) in psz_recover_zone() 516 zone->name, zone->off, zone->buffer_size); in psz_recover_zone() 523 zone->name, zone->off, zone->buffer_size, in psz_recover_zone() 529 zone->name, zone->off, zone->buffer_size, in psz_recover_zone() 652 if (zone && zone->buffer && buffer_datalen(zone)) in psz_ok() 763 zone->oldbuf = zone->buffer; in psz_kmsg_write_record() [all …]
|
| /linux-6.15/mm/ |
| H A D | page_alloc.c | 2467 struct zone *zone; in drain_pages() local 2712 struct zone *zone; in __free_frozen_pages() local 3239 struct zone *zone; in unreserve_highatomic_pageblock() local 3549 struct zone *zone; in get_page_from_freelist() local 4016 struct zone *zone; in should_compact_retry() local 4186 struct zone *zone; in wake_all_kswapds() local 4316 struct zone *zone; in should_reclaim_retry() local 4790 struct zone *zone; in alloc_pages_bulk_noprof() local 5218 struct zone *zone; in nr_free_zone_pages() local 5263 struct zone *zone; in build_zonerefs_node() local [all …]
|
| H A D | show_mem.c | 26 static inline void show_node(struct zone *zone) in show_node() argument 38 struct zone *zone; in si_mem_available() local 40 for_each_zone(zone) in si_mem_available() 104 struct zone *zone = &pgdat->node_zones[zone_type]; in si_meminfo_node() local 106 if (is_highmem(zone)) { in si_meminfo_node() 190 struct zone *zone; in show_free_areas() local 305 show_node(zone); in show_free_areas() 328 zone->name, in show_free_areas() 363 show_node(zone); in show_free_areas() 404 struct zone *zone; in __show_mem() local [all …]
|
| H A D | vmstat.c | 56 struct zone *zone; in zero_zones_numa_counters() local 192 struct zone *zone; in fold_vm_numa_events() local 278 struct zone *zone; in refresh_zone_stat_thresholds() local 323 struct zone *zone; in set_pgdat_percpu_threshold() local 709 struct zone *zone; in inc_zone_page_state() local 814 struct zone *zone; in refresh_cpu_vm_stats() local 902 struct zone *zone; in cpu_vm_stats_fold() local 1518 struct zone *zone; in walk_zones_in_node() local 1537 struct zone *zone) in frag_show_print() argument 1741 struct zone *zone) in zoneinfo_show_print() argument [all …]
|
| H A D | compaction.c | 469 struct zone *zone = cc->zone; in update_cached_migrate() local 492 struct zone *zone = cc->zone; in update_pageblock_skip() local 1715 struct zone *zone = cc->zone; in isolate_freepages() local 2477 struct zone *zone; in compaction_zonelist_suitable() local 2801 .zone = zone, in compact_zone_order() 2861 struct zone *zone; in try_to_compact_pages() local 2938 struct zone *zone; in compact_node() local 2956 cc.zone = zone; in compact_node() 3074 struct zone *zone; in kcompactd_node_suitable() local 3104 struct zone *zone; in kcompactd_do_work() local [all …]
|
| H A D | memory_hotplug.c | 494 zone->spanned_pages = zone_end_pfn(zone) - pfn; in shrink_zone_span() 510 zone->spanned_pages = pfn - zone->zone_start_pfn + 1; in shrink_zone_span() 521 struct zone *zone; in update_pgdat_span() local 546 void remove_pfn_range_from_zone(struct zone *zone, in remove_pfn_range_from_zone() argument 704 struct zone *zone, struct memory_notify *arg) in node_states_check_changes_online() argument 816 struct zone *zone) in auto_movable_stats_account_zone() argument 870 struct zone *zone; in auto_movable_can_online_movable() local 926 struct zone *zone = &pgdat->node_zones[zid]; in default_kernel_zone_for_pfn() local 1093 struct zone *zone = page_zone(page); in adjust_present_page_count() local 1895 struct zone *zone, struct memory_notify *arg) in node_states_check_changes_offline() argument [all …]
|
| H A D | mm_init.c | 68 struct zone *zone; in mminit_verify_zonelist() local 679 struct zone *zone = &pgdat->node_zones[zid]; in __init_page_from_nid() local 976 struct zone *zone = node->node_zones + j; in memmap_init() local 1345 struct zone *zone = pgdat->node_zones + i; in calculate_node_totalpages() local 1586 struct zone *zone = pgdat->node_zones + z; in free_area_init_core_hotplug() local 1603 struct zone *zone = pgdat->node_zones + j; in free_area_init_core() local 2137 struct zone *zone = arg; in deferred_init_memmap_chunk() local 2166 struct zone *zone; in deferred_init_memmap() local 2329 void set_zone_contiguous(struct zone *zone) in set_zone_contiguous() argument 2359 struct zone *zone, *izone = NULL; in pfn_range_intersects_zones() local [all …]
|
| H A D | page_isolation.c | 37 struct zone *zone = page_zone(page); in has_unmovable_pages() local 72 if (zone_idx(zone) == ZONE_MOVABLE) in has_unmovable_pages() 157 struct zone *zone = page_zone(page); in set_migratetype_isolate() local 195 zone->nr_isolate_pageblock++; in set_migratetype_isolate() 214 struct zone *zone; in unset_migratetype_isolate() local 220 zone = page_zone(page); in unset_migratetype_isolate() 270 zone->nr_isolate_pageblock--; in unset_migratetype_isolate() 320 struct zone *zone; in isolate_single_pageblock() local 338 zone->zone_start_pfn); in isolate_single_pageblock() 614 struct zone *zone; in test_pages_isolated() local [all …]
|
| /linux-6.15/include/linux/ |
| H A D | memory_hotplug.h | 11 struct zone; 101 static inline unsigned zone_span_seqbegin(struct zone *zone) in zone_span_seqbegin() argument 109 static inline void zone_span_writelock(struct zone *zone) in zone_span_writelock() argument 113 static inline void zone_span_writeunlock(struct zone *zone) in zone_span_writeunlock() argument 117 static inline void zone_seqlock_init(struct zone *zone) in zone_seqlock_init() argument 126 struct zone *zone, bool mhp_off_inaccessible); 129 struct zone *zone, struct memory_group *group); 214 static inline void zone_seqlock_init(struct zone *zone) {} in zone_seqlock_init() argument 281 struct zone *zone, struct memory_group *group); 290 struct zone *zone, struct memory_group *group) in offline_pages() argument [all …]
|
| H A D | mmzone.h | 1089 return zone->zone_start_pfn + zone->spanned_pages; in zone_end_pfn() 1102 static inline bool zone_is_empty(struct zone *zone) in zone_is_empty() argument 1271 struct zone *zone; /* Pointer to actual zone */ member 1537 #define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones) argument 1557 static inline bool managed_zone(struct zone *zone) in managed_zone() argument 1569 static inline int zone_to_nid(struct zone *zone) in zone_to_nid() argument 1579 static inline int zone_to_nid(struct zone *zone) in zone_to_nid() argument 1606 static inline int is_highmem(struct zone *zone) in is_highmem() argument 1637 extern struct zone *next_zone(struct zone *zone); 1657 zone = next_zone(zone)) [all …]
|
| H A D | vmstat.h | 142 static inline void zone_numa_event_add(long x, struct zone *zone, in zone_numa_event_add() argument 149 static inline unsigned long zone_numa_event_state(struct zone *zone, in zone_numa_event_state() argument 162 static inline void zone_page_state_add(long x, struct zone *zone, in zone_page_state_add() argument 204 static inline unsigned long zone_page_state(struct zone *zone, in zone_page_state() argument 221 static inline unsigned long zone_page_state_snapshot(struct zone *zone, in zone_page_state_snapshot() argument 240 __count_numa_event(struct zone *zone, enum numa_stat_item item) in __count_numa_event() argument 300 void drain_zonestat(struct zone *zone, struct per_cpu_zonestat *); 302 int calculate_pressure_threshold(struct zone *zone); 303 int calculate_normal_threshold(struct zone *zone); 312 static inline void __mod_zone_page_state(struct zone *zone, in __mod_zone_page_state() argument [all …]
|
| /linux-6.15/tools/power/cpupower/lib/ |
| H A D | powercap.c | 132 strcat(file, zone->sys_name); in sysfs_powercap_get64_val() 175 strcat(path, zone->sys_name); in powercap_zone_get_enabled() 206 strcat(file, zone->sys_name); in powercap_read_zone() 209 if (zone->parent) in powercap_read_zone() 210 zone->tree_depth = zone->parent->tree_depth + 1; in powercap_read_zone() 213 zone->has_energy_uj = 1; in powercap_read_zone() 216 zone->has_power_uw = 1; in powercap_read_zone() 249 child_zone->parent = zone; in powercap_read_zone() 295 if (!zone) in powercap_walk_zones() 298 ret = f(zone); in powercap_walk_zones() [all …]
|
| /linux-6.15/fs/xfs/libxfs/ |
| H A D | xfs_zones.c | 19 struct blk_zone *zone, in xfs_zone_validate_empty() argument 37 struct blk_zone *zone, in xfs_zone_validate_wp() argument 68 struct blk_zone *zone, in xfs_zone_validate_full() argument 86 struct blk_zone *zone, in xfs_zone_validate_seq() argument 92 switch (zone->cond) { in xfs_zone_validate_seq() 105 rtg_rgno(rtg), zone->cond); in xfs_zone_validate_seq() 116 struct blk_zone *zone, in xfs_zone_validate_conv() argument 121 switch (zone->cond) { in xfs_zone_validate_conv() 134 struct blk_zone *zone, in xfs_zone_validate() argument 158 if (zone->len != zone->capacity) { in xfs_zone_validate() [all …]
|
| /linux-6.15/drivers/md/dm-vdo/indexer/ |
| H A D | index.c | 110 unsigned int zone; in enqueue_barrier_messages() local 112 for (zone = 0; zone < index->zone_count; zone++) { in enqueue_barrier_messages() 205 swap(zone->open_chapter, zone->writing_chapter); in swap_open_chapter() 239 if (zone->id == i) in announce_chapter_closed() 259 (unsigned long long) zone->newest_virtual_chapter, zone->id, in open_next_chapter() 261 zone->open_chapter->capacity - zone->open_chapter->size); in open_next_chapter() 268 uds_set_volume_index_zone_open_chapter(zone->index->volume_index, zone->id, in open_next_chapter() 272 finished_zones = start_closing_chapter(zone->index, zone->id, in open_next_chapter() 1113 if (zone == NULL) in free_index_zone() 1118 vdo_free(zone); in free_index_zone() [all …]
|
| /linux-6.15/drivers/md/ |
| H A D | dm-zoned-metadata.c | 226 return zone->id - zone->dev->zone_offset; in dmz_dev_zone_id() 308 if (!zone) in dmz_insert() 322 return zone; in dmz_insert() 1404 zmd->sb[0].zone = zone; in dmz_init_zone() 1625 zone->id, zone->wp_block, wp); in dmz_handle_seq_write_err() 1628 dmz_invalidate_blocks(zmd, zone, zone->wp_block, in dmz_handle_seq_write_err() 2029 if (!zone) in dmz_get_zone_for_reclaim() 2513 zone->id, zone->weight, in dmz_validate_blocks() 2593 zone->id, zone->weight, n); in dmz_invalidate_blocks() 2908 zone = dmz_get(zmd, zmd->sb[0].zone->id + i); in dmz_ctr_metadata() [all …]
|
| /linux-6.15/include/net/netfilter/ |
| H A D | nf_conntrack_zones.h | 12 return &ct->zone; in nf_ct_zone() 21 zone->id = id; in nf_ct_zone_init() 22 zone->flags = flags; in nf_ct_zone_init() 23 zone->dir = dir; in nf_ct_zone_init() 25 return zone; in nf_ct_zone_init() 36 if (tmpl->zone.flags & NF_CT_FLAG_MARK) in nf_ct_zone_tmpl() 43 const struct nf_conntrack_zone *zone) in nf_ct_zone_add() argument 46 ct->zone = *zone; in nf_ct_zone_add() 53 return zone->dir & (1 << dir); in nf_ct_zone_matches_dir() 60 return nf_ct_zone_matches_dir(zone, dir) ? in nf_ct_zone_id() [all …]
|
| /linux-6.15/fs/adfs/ |
| H A D | map.c | 179 } while (--zone > 0); in scan_map() 202 unsigned int zone; in adfs_map_statfs() local 209 } while (--zone > 0); in adfs_map_statfs() 322 for (zone = 1; zone < nzones; zone++) { in adfs_map_layout() 324 dm[zone].dm_startblk = zone * zone_size - ADFS_DR_SIZE_BITS; in adfs_map_layout() 337 unsigned int zone; in adfs_map_read() local 339 for (zone = 0; zone < nzones; zone++) { in adfs_map_read() 340 dm[zone].dm_bh = sb_bread(sb, map_addr + zone); in adfs_map_read() 341 if (!dm[zone].dm_bh) in adfs_map_read() 350 unsigned int zone; in adfs_map_relse() local [all …]
|
| /linux-6.15/kernel/power/ |
| H A D | snapshot.c | 630 struct zone *zone; in create_mem_extents() local 755 zone = bm->cur.zone; in memory_bm_find_bit() 785 if (zone == bm->cur.zone && in memory_bm_find_bit() 803 bm->cur.zone = zone; in memory_bm_find_bit() 1245 static void mark_free_pages(struct zone *zone) in mark_free_pages() argument 1300 struct zone *zone; in count_free_highmem_pages() local 1348 struct zone *zone; in count_highmem_pages() local 1411 struct zone *zone; in count_data_pages() local 1526 struct zone *zone; in copy_data_pages() local 1828 struct zone *zone; in hibernate_preallocate_memory() local [all …]
|
| /linux-6.15/virt/kvm/ |
| H A D | coalesced_mmio.c | 36 if (addr < dev->zone.addr) in coalesced_mmio_in_range() 38 if (addr + len > dev->zone.addr + dev->zone.size) in coalesced_mmio_in_range() 123 struct kvm_coalesced_mmio_zone *zone) in kvm_vm_ioctl_register_coalesced_mmio() argument 128 if (zone->pio != 1 && zone->pio != 0) in kvm_vm_ioctl_register_coalesced_mmio() 138 dev->zone = *zone; in kvm_vm_ioctl_register_coalesced_mmio() 142 zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS, in kvm_vm_ioctl_register_coalesced_mmio() 143 zone->addr, zone->size, &dev->dev); in kvm_vm_ioctl_register_coalesced_mmio() 159 struct kvm_coalesced_mmio_zone *zone) in kvm_vm_ioctl_unregister_coalesced_mmio() argument 164 if (zone->pio != 1 && zone->pio != 0) in kvm_vm_ioctl_unregister_coalesced_mmio() 170 if (zone->pio == dev->zone.pio && in kvm_vm_ioctl_unregister_coalesced_mmio() [all …]
|
| /linux-6.15/drivers/net/ethernet/mellanox/mlx4/ |
| H A D | alloc.c | 250 struct mlx4_zone_entry *zone = kmalloc(sizeof(*zone), GFP_KERNEL); in mlx4_zone_add_one() local 252 if (NULL == zone) in mlx4_zone_add_one() 279 *puid = zone->uid; in mlx4_zone_add_one() 328 kfree(zone); in mlx4_zone_allocator_destroy() 349 uid = zone->uid; in __mlx4_alloc_from_zone() 421 mlx4_bitmap_free_range(zone->bitmap, obj - zone->offset, count, zone->use_rr); in __mlx4_free_from_zone() 432 return zone; in __mlx4_find_zone_by_uid() 447 bitmap = zone == NULL ? NULL : zone->bitmap; in mlx4_zone_get_bitmap() 463 if (NULL == zone) { in mlx4_zone_remove_one() 472 kfree(zone); in mlx4_zone_remove_one() [all …]
|
| /linux-6.15/include/trace/events/ |
| H A D | compaction.h | 194 TP_PROTO(struct zone *zone, 223 TP_PROTO(struct zone *zone, 232 TP_PROTO(struct zone *zone, 241 TP_PROTO(struct zone *zone, int order), 243 TP_ARGS(zone, order), 274 TP_PROTO(struct zone *zone, int order), 276 TP_ARGS(zone, order) 281 TP_PROTO(struct zone *zone, int order), 283 TP_ARGS(zone, order) 288 TP_PROTO(struct zone *zone, int order), [all …]
|