| /linux-6.15/include/linux/ |
| H A D | oom.h | 12 struct zonelist; 30 struct zonelist *zonelist; member
|
| H A D | mmzone.h | 1289 struct zonelist { struct 1354 struct zonelist node_zonelists[MAX_ZONELISTS]; 1727 static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, in first_zones_zonelist() argument 1731 return next_zones_zonelist(zonelist->_zonerefs, in first_zones_zonelist() 1774 struct zonelist *zonelist; in movable_only_nodes() local 1787 zonelist = &NODE_DATA(nid)->node_zonelists[ZONELIST_FALLBACK]; in movable_only_nodes() 1788 z = first_zones_zonelist(zonelist, ZONE_NORMAL, nodes); in movable_only_nodes()
|
| H A D | swap.h | 410 extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
|
| H A D | gfp.h | 212 static inline struct zonelist *node_zonelist(int nid, gfp_t flags) in node_zonelist()
|
| /linux-6.15/Documentation/translations/zh_CN/mm/ |
| H A D | numa.rst | 57 中的一个或多个]构建了一个有序的“区列表”。zonelist指定了当一个选定的区/节点不能满足分配请求 63 代表了相对稀缺的资源。Linux选择了一个默认的Node ordered zonelist。这意味着在使用按NUMA距
|
| /linux-6.15/Documentation/mm/ |
| H A D | numa.rst | 74 an ordered "zonelist". A zonelist specifies the zones/nodes to visit when a 84 a default Node ordered zonelist. This means it tries to fallback to other zones 89 Linux will attempt to allocate from the first node in the appropriate zonelist 92 nodes' zones in the selected zonelist looking for the first zone in the list 120 zonelist--will not be the node itself. Rather, it will be the node that the
|
| /linux-6.15/mm/ |
| H A D | page_alloc.c | 3236 struct zonelist *zonelist = ac->zonelist; in unreserve_highatomic_pageblock() local 3786 .zonelist = ac->zonelist, in __alloc_pages_may_oom() 4028 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, in should_compact_retry() 4340 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, in should_reclaim_retry() 4491 struct zoneref *z = first_zones_zonelist(ac->zonelist, in __alloc_pages_slowpath() 4726 ac->zonelist = node_zonelist(preferred_nid, gfp_mask); in prepare_alloc_pages() 5223 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL); in nr_free_zone_pages() local 5225 for_each_zone_zonelist(zone, z, zonelist, offset) { in nr_free_zone_pages() 6888 struct zonelist *zonelist; in alloc_contig_pages_noprof() local 6892 zonelist = node_zonelist(nid, gfp_mask); in alloc_contig_pages_noprof() [all …]
|
| H A D | vmscan.c | 6195 static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc) in shrink_zones() argument 6216 for_each_zone_zonelist_nodemask(zone, z, zonelist, in shrink_zones() 6318 static unsigned long do_try_to_free_pages(struct zonelist *zonelist, in do_try_to_free_pages() argument 6336 shrink_zones(zonelist, sc); in do_try_to_free_pages() 6467 static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist, in throttle_direct_reclaim() argument 6505 for_each_zone_zonelist_nodemask(zone, z, zonelist, in throttle_direct_reclaim() 6547 unsigned long try_to_free_pages(struct zonelist *zonelist, int order, in try_to_free_pages() argument 6582 nr_reclaimed = do_try_to_free_pages(zonelist, &sc); in try_to_free_pages() 6658 struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask); in try_to_free_mem_cgroup_pages() local 6664 nr_reclaimed = do_try_to_free_pages(zonelist, &sc); in try_to_free_mem_cgroup_pages() [all …]
|
| H A D | oom_kill.c | 271 if (!oc->zonelist) in constrained_alloc() 295 for_each_zone_zonelist_nodemask(zone, z, oc->zonelist, in constrained_alloc()
|
| H A D | mm_init.c | 70 struct zonelist *zonelist; in mminit_verify_zonelist() local 78 zonelist = &pgdat->node_zonelists[listid]; in mminit_verify_zonelist() 89 for_each_zone_zonelist(zone, z, zonelist, zoneid) in mminit_verify_zonelist()
|
| H A D | internal.h | 555 struct zonelist *zonelist; member
|
| H A D | mempolicy.c | 1986 struct zonelist *zonelist; in mempolicy_slab_node() local 1988 zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK]; in mempolicy_slab_node() 1989 z = first_zones_zonelist(zonelist, highest_zoneidx, in mempolicy_slab_node()
|
| H A D | compaction.c | 2484 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, in compaction_zonelist_suitable() 2870 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, in try_to_compact_pages()
|
| H A D | hugetlb.c | 1326 struct zonelist *zonelist; in dequeue_hugetlb_folio_nodemask() local 1335 zonelist = node_zonelist(nid, gfp_mask); in dequeue_hugetlb_folio_nodemask() 1339 for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) { in dequeue_hugetlb_folio_nodemask()
|
| H A D | slub.c | 2911 struct zonelist *zonelist; in get_any_partial() local 2942 zonelist = node_zonelist(mempolicy_slab_node(), pc->flags); in get_any_partial() 2943 for_each_zone_zonelist(zone, z, zonelist, highest_zoneidx) { in get_any_partial()
|
| H A D | memcontrol.c | 1647 .zonelist = NULL, in mem_cgroup_out_of_memory()
|
| /linux-6.15/drivers/tty/ |
| H A D | sysrq.c | 389 .zonelist = node_zonelist(first_memory_node, gfp_mask), in moom_callback()
|
| /linux-6.15/Documentation/admin-guide/sysctl/ |
| H A D | vm.rst | 711 In non-NUMA case, a zonelist for GFP_KERNEL is ordered as following. 717 Assume 2 node NUMA and below is zonelist of Node(0)'s GFP_KERNEL::
|
| /linux-6.15/Documentation/admin-guide/mm/ |
| H A D | numa_memory_policy.rst | 234 node zonelist.
|
| /linux-6.15/Documentation/admin-guide/ |
| H A D | kernel-parameters.txt | 4407 numa_zonelist_order= [KNL, BOOT] Select zonelist order for NUMA.
|