1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
23947be19SDave Hansen /*
33947be19SDave Hansen * linux/mm/memory_hotplug.c
43947be19SDave Hansen *
53947be19SDave Hansen * Copyright (C)
63947be19SDave Hansen */
73947be19SDave Hansen
83947be19SDave Hansen #include <linux/stddef.h>
93947be19SDave Hansen #include <linux/mm.h>
10174cd4b1SIngo Molnar #include <linux/sched/signal.h>
113947be19SDave Hansen #include <linux/swap.h>
123947be19SDave Hansen #include <linux/interrupt.h>
133947be19SDave Hansen #include <linux/pagemap.h>
143947be19SDave Hansen #include <linux/compiler.h>
15b95f1b31SPaul Gortmaker #include <linux/export.h>
162d1d43f6SChandra Seetharaman #include <linux/writeback.h>
173947be19SDave Hansen #include <linux/slab.h>
183947be19SDave Hansen #include <linux/sysctl.h>
193947be19SDave Hansen #include <linux/cpu.h>
203947be19SDave Hansen #include <linux/memory.h>
214b94ffdcSDan Williams #include <linux/memremap.h>
223947be19SDave Hansen #include <linux/memory_hotplug.h>
233947be19SDave Hansen #include <linux/vmalloc.h>
240a547039SKAMEZAWA Hiroyuki #include <linux/ioport.h>
250c0e6195SKAMEZAWA Hiroyuki #include <linux/delay.h>
260c0e6195SKAMEZAWA Hiroyuki #include <linux/migrate.h>
270c0e6195SKAMEZAWA Hiroyuki #include <linux/page-isolation.h>
2871088785SBadari Pulavarty #include <linux/pfn.h>
296ad696d2SAndi Kleen #include <linux/suspend.h>
306d9c285aSKOSAKI Motohiro #include <linux/mm_inline.h>
31d96ae530S[email protected] #include <linux/firmware-map.h>
3260a5a19eSTang Chen #include <linux/stop_machine.h>
33c8721bbbSNaoya Horiguchi #include <linux/hugetlb.h>
34c5320926STang Chen #include <linux/memblock.h>
35698b1b30SVlastimil Babka #include <linux/compaction.h>
36b15c8726SMichal Hocko #include <linux/rmap.h>
378581fd40SJakub Kicinski #include <linux/module.h>
383947be19SDave Hansen
393947be19SDave Hansen #include <asm/tlbflush.h>
403947be19SDave Hansen
411e5ad9a3SAdrian Bunk #include "internal.h"
42e900a918SDan Williams #include "shuffle.h"
431e5ad9a3SAdrian Bunk
442d1f649cSAneesh Kumar K.V enum {
452d1f649cSAneesh Kumar K.V MEMMAP_ON_MEMORY_DISABLE = 0,
462d1f649cSAneesh Kumar K.V MEMMAP_ON_MEMORY_ENABLE,
472d1f649cSAneesh Kumar K.V MEMMAP_ON_MEMORY_FORCE,
482d1f649cSAneesh Kumar K.V };
492d1f649cSAneesh Kumar K.V
502d1f649cSAneesh Kumar K.V static int memmap_mode __read_mostly = MEMMAP_ON_MEMORY_DISABLE;
512d1f649cSAneesh Kumar K.V
memory_block_memmap_size(void)522d1f649cSAneesh Kumar K.V static inline unsigned long memory_block_memmap_size(void)
532d1f649cSAneesh Kumar K.V {
542d1f649cSAneesh Kumar K.V return PHYS_PFN(memory_block_size_bytes()) * sizeof(struct page);
552d1f649cSAneesh Kumar K.V }
562d1f649cSAneesh Kumar K.V
memory_block_memmap_on_memory_pages(void)572d1f649cSAneesh Kumar K.V static inline unsigned long memory_block_memmap_on_memory_pages(void)
582d1f649cSAneesh Kumar K.V {
592d1f649cSAneesh Kumar K.V unsigned long nr_pages = PFN_UP(memory_block_memmap_size());
602d1f649cSAneesh Kumar K.V
612d1f649cSAneesh Kumar K.V /*
622d1f649cSAneesh Kumar K.V * In "forced" memmap_on_memory mode, we add extra pages to align the
632d1f649cSAneesh Kumar K.V * vmemmap size to cover full pageblocks. That way, we can add memory
642d1f649cSAneesh Kumar K.V * even if the vmemmap size is not properly aligned, however, we might waste
652d1f649cSAneesh Kumar K.V * memory.
662d1f649cSAneesh Kumar K.V */
672d1f649cSAneesh Kumar K.V if (memmap_mode == MEMMAP_ON_MEMORY_FORCE)
682d1f649cSAneesh Kumar K.V return pageblock_align(nr_pages);
692d1f649cSAneesh Kumar K.V return nr_pages;
702d1f649cSAneesh Kumar K.V }
712d1f649cSAneesh Kumar K.V
726e02c46bSMuchun Song #ifdef CONFIG_MHP_MEMMAP_ON_MEMORY
73e3a9d9fcSOscar Salvador /*
74e3a9d9fcSOscar Salvador * memory_hotplug.memmap_on_memory parameter
75e3a9d9fcSOscar Salvador */
set_memmap_mode(const char * val,const struct kernel_param * kp)762d1f649cSAneesh Kumar K.V static int set_memmap_mode(const char *val, const struct kernel_param *kp)
772d1f649cSAneesh Kumar K.V {
782d1f649cSAneesh Kumar K.V int ret, mode;
792d1f649cSAneesh Kumar K.V bool enabled;
802d1f649cSAneesh Kumar K.V
812d1f649cSAneesh Kumar K.V if (sysfs_streq(val, "force") || sysfs_streq(val, "FORCE")) {
822d1f649cSAneesh Kumar K.V mode = MEMMAP_ON_MEMORY_FORCE;
832d1f649cSAneesh Kumar K.V } else {
842d1f649cSAneesh Kumar K.V ret = kstrtobool(val, &enabled);
852d1f649cSAneesh Kumar K.V if (ret < 0)
862d1f649cSAneesh Kumar K.V return ret;
872d1f649cSAneesh Kumar K.V if (enabled)
882d1f649cSAneesh Kumar K.V mode = MEMMAP_ON_MEMORY_ENABLE;
892d1f649cSAneesh Kumar K.V else
902d1f649cSAneesh Kumar K.V mode = MEMMAP_ON_MEMORY_DISABLE;
912d1f649cSAneesh Kumar K.V }
922d1f649cSAneesh Kumar K.V *((int *)kp->arg) = mode;
932d1f649cSAneesh Kumar K.V if (mode == MEMMAP_ON_MEMORY_FORCE) {
942d1f649cSAneesh Kumar K.V unsigned long memmap_pages = memory_block_memmap_on_memory_pages();
952d1f649cSAneesh Kumar K.V
962d1f649cSAneesh Kumar K.V pr_info_once("Memory hotplug will waste %ld pages in each memory block\n",
972d1f649cSAneesh Kumar K.V memmap_pages - PFN_UP(memory_block_memmap_size()));
982d1f649cSAneesh Kumar K.V }
992d1f649cSAneesh Kumar K.V return 0;
1002d1f649cSAneesh Kumar K.V }
1012d1f649cSAneesh Kumar K.V
get_memmap_mode(char * buffer,const struct kernel_param * kp)1022d1f649cSAneesh Kumar K.V static int get_memmap_mode(char *buffer, const struct kernel_param *kp)
1032d1f649cSAneesh Kumar K.V {
10411684134SSumanth Korikkar int mode = *((int *)kp->arg);
10511684134SSumanth Korikkar
10611684134SSumanth Korikkar if (mode == MEMMAP_ON_MEMORY_FORCE)
1072d1f649cSAneesh Kumar K.V return sprintf(buffer, "force\n");
10811684134SSumanth Korikkar return sprintf(buffer, "%c\n", mode ? 'Y' : 'N');
1092d1f649cSAneesh Kumar K.V }
1102d1f649cSAneesh Kumar K.V
1112d1f649cSAneesh Kumar K.V static const struct kernel_param_ops memmap_mode_ops = {
1122d1f649cSAneesh Kumar K.V .set = set_memmap_mode,
1132d1f649cSAneesh Kumar K.V .get = get_memmap_mode,
1142d1f649cSAneesh Kumar K.V };
1152d1f649cSAneesh Kumar K.V module_param_cb(memmap_on_memory, &memmap_mode_ops, &memmap_mode, 0444);
1162d1f649cSAneesh Kumar K.V MODULE_PARM_DESC(memmap_on_memory, "Enable memmap on memory for memory hotplug\n"
1172d1f649cSAneesh Kumar K.V "With value \"force\" it could result in memory wastage due "
1182d1f649cSAneesh Kumar K.V "to memmap size limitations (Y/N/force)");
1196e02c46bSMuchun Song
mhp_memmap_on_memory(void)12066361095SMuchun Song static inline bool mhp_memmap_on_memory(void)
1216e02c46bSMuchun Song {
1222d1f649cSAneesh Kumar K.V return memmap_mode != MEMMAP_ON_MEMORY_DISABLE;
1236e02c46bSMuchun Song }
12466361095SMuchun Song #else
mhp_memmap_on_memory(void)12566361095SMuchun Song static inline bool mhp_memmap_on_memory(void)
12666361095SMuchun Song {
12766361095SMuchun Song return false;
12866361095SMuchun Song }
129e3a9d9fcSOscar Salvador #endif
130a08a2ae3SOscar Salvador
131e83a437fSDavid Hildenbrand enum {
132e83a437fSDavid Hildenbrand ONLINE_POLICY_CONTIG_ZONES = 0,
133e83a437fSDavid Hildenbrand ONLINE_POLICY_AUTO_MOVABLE,
134e83a437fSDavid Hildenbrand };
135e83a437fSDavid Hildenbrand
136ac62554bSTang Yizhou static const char * const online_policy_to_str[] = {
137e83a437fSDavid Hildenbrand [ONLINE_POLICY_CONTIG_ZONES] = "contig-zones",
138e83a437fSDavid Hildenbrand [ONLINE_POLICY_AUTO_MOVABLE] = "auto-movable",
139e83a437fSDavid Hildenbrand };
140e83a437fSDavid Hildenbrand
set_online_policy(const char * val,const struct kernel_param * kp)141e83a437fSDavid Hildenbrand static int set_online_policy(const char *val, const struct kernel_param *kp)
142e83a437fSDavid Hildenbrand {
143e83a437fSDavid Hildenbrand int ret = sysfs_match_string(online_policy_to_str, val);
144e83a437fSDavid Hildenbrand
145e83a437fSDavid Hildenbrand if (ret < 0)
146e83a437fSDavid Hildenbrand return ret;
147e83a437fSDavid Hildenbrand *((int *)kp->arg) = ret;
148e83a437fSDavid Hildenbrand return 0;
149e83a437fSDavid Hildenbrand }
150e83a437fSDavid Hildenbrand
get_online_policy(char * buffer,const struct kernel_param * kp)151e83a437fSDavid Hildenbrand static int get_online_policy(char *buffer, const struct kernel_param *kp)
152e83a437fSDavid Hildenbrand {
153e83a437fSDavid Hildenbrand return sprintf(buffer, "%s\n", online_policy_to_str[*((int *)kp->arg)]);
154e83a437fSDavid Hildenbrand }
155e83a437fSDavid Hildenbrand
156e83a437fSDavid Hildenbrand /*
157e83a437fSDavid Hildenbrand * memory_hotplug.online_policy: configure online behavior when onlining without
158e83a437fSDavid Hildenbrand * specifying a zone (MMOP_ONLINE)
159e83a437fSDavid Hildenbrand *
160e83a437fSDavid Hildenbrand * "contig-zones": keep zone contiguous
161e83a437fSDavid Hildenbrand * "auto-movable": online memory to ZONE_MOVABLE if the configuration
162e83a437fSDavid Hildenbrand * (auto_movable_ratio, auto_movable_numa_aware) allows for it
163e83a437fSDavid Hildenbrand */
164e83a437fSDavid Hildenbrand static int online_policy __read_mostly = ONLINE_POLICY_CONTIG_ZONES;
165e83a437fSDavid Hildenbrand static const struct kernel_param_ops online_policy_ops = {
166e83a437fSDavid Hildenbrand .set = set_online_policy,
167e83a437fSDavid Hildenbrand .get = get_online_policy,
168e83a437fSDavid Hildenbrand };
169e83a437fSDavid Hildenbrand module_param_cb(online_policy, &online_policy_ops, &online_policy, 0644);
170e83a437fSDavid Hildenbrand MODULE_PARM_DESC(online_policy,
171e83a437fSDavid Hildenbrand "Set the online policy (\"contig-zones\", \"auto-movable\") "
172e83a437fSDavid Hildenbrand "Default: \"contig-zones\"");
173e83a437fSDavid Hildenbrand
174e83a437fSDavid Hildenbrand /*
175e83a437fSDavid Hildenbrand * memory_hotplug.auto_movable_ratio: specify maximum MOVABLE:KERNEL ratio
176e83a437fSDavid Hildenbrand *
177e83a437fSDavid Hildenbrand * The ratio represent an upper limit and the kernel might decide to not
178e83a437fSDavid Hildenbrand * online some memory to ZONE_MOVABLE -- e.g., because hotplugged KERNEL memory
179e83a437fSDavid Hildenbrand * doesn't allow for more MOVABLE memory.
180e83a437fSDavid Hildenbrand */
181e83a437fSDavid Hildenbrand static unsigned int auto_movable_ratio __read_mostly = 301;
182e83a437fSDavid Hildenbrand module_param(auto_movable_ratio, uint, 0644);
183e83a437fSDavid Hildenbrand MODULE_PARM_DESC(auto_movable_ratio,
184e83a437fSDavid Hildenbrand "Set the maximum ratio of MOVABLE:KERNEL memory in the system "
185e83a437fSDavid Hildenbrand "in percent for \"auto-movable\" online policy. Default: 301");
186e83a437fSDavid Hildenbrand
187e83a437fSDavid Hildenbrand /*
188e83a437fSDavid Hildenbrand * memory_hotplug.auto_movable_numa_aware: consider numa node stats
189e83a437fSDavid Hildenbrand */
190e83a437fSDavid Hildenbrand #ifdef CONFIG_NUMA
191e83a437fSDavid Hildenbrand static bool auto_movable_numa_aware __read_mostly = true;
192e83a437fSDavid Hildenbrand module_param(auto_movable_numa_aware, bool, 0644);
193e83a437fSDavid Hildenbrand MODULE_PARM_DESC(auto_movable_numa_aware,
194e83a437fSDavid Hildenbrand "Consider numa node stats in addition to global stats in "
195e83a437fSDavid Hildenbrand "\"auto-movable\" online policy. Default: true");
196e83a437fSDavid Hildenbrand #endif /* CONFIG_NUMA */
197e83a437fSDavid Hildenbrand
1989d0ad8caSDaniel Kiper /*
1999d0ad8caSDaniel Kiper * online_page_callback contains pointer to current page onlining function.
2009d0ad8caSDaniel Kiper * Initially it is generic_online_page(). If it is required it could be
2019d0ad8caSDaniel Kiper * changed by calling set_online_page_callback() for callback registration
2029d0ad8caSDaniel Kiper * and restore_online_page_callback() for generic callback restore.
2039d0ad8caSDaniel Kiper */
2049d0ad8caSDaniel Kiper
2059d0ad8caSDaniel Kiper static online_page_callback_t online_page_callback = generic_online_page;
206bfc8c901SVladimir Davydov static DEFINE_MUTEX(online_page_callback_lock);
2079d0ad8caSDaniel Kiper
2083f906ba2SThomas Gleixner DEFINE_STATIC_PERCPU_RWSEM(mem_hotplug_lock);
20920d6c96bSKOSAKI Motohiro
get_online_mems(void)2103f906ba2SThomas Gleixner void get_online_mems(void)
2113f906ba2SThomas Gleixner {
2123f906ba2SThomas Gleixner percpu_down_read(&mem_hotplug_lock);
2133f906ba2SThomas Gleixner }
214bfc8c901SVladimir Davydov
put_online_mems(void)2153f906ba2SThomas Gleixner void put_online_mems(void)
2163f906ba2SThomas Gleixner {
2173f906ba2SThomas Gleixner percpu_up_read(&mem_hotplug_lock);
2183f906ba2SThomas Gleixner }
219bfc8c901SVladimir Davydov
2204932381eSMichal Hocko bool movable_node_enabled = false;
2214932381eSMichal Hocko
22244d46b76SGregory Price static int mhp_default_online_type = -1;
mhp_get_default_online_type(void)22344d46b76SGregory Price int mhp_get_default_online_type(void)
22444d46b76SGregory Price {
22544d46b76SGregory Price if (mhp_default_online_type >= 0)
22644d46b76SGregory Price return mhp_default_online_type;
22744d46b76SGregory Price
22844d46b76SGregory Price if (IS_ENABLED(CONFIG_MHP_DEFAULT_ONLINE_TYPE_OFFLINE))
22944d46b76SGregory Price mhp_default_online_type = MMOP_OFFLINE;
23044d46b76SGregory Price else if (IS_ENABLED(CONFIG_MHP_DEFAULT_ONLINE_TYPE_ONLINE_AUTO))
23144d46b76SGregory Price mhp_default_online_type = MMOP_ONLINE;
23244d46b76SGregory Price else if (IS_ENABLED(CONFIG_MHP_DEFAULT_ONLINE_TYPE_ONLINE_KERNEL))
23344d46b76SGregory Price mhp_default_online_type = MMOP_ONLINE_KERNEL;
23444d46b76SGregory Price else if (IS_ENABLED(CONFIG_MHP_DEFAULT_ONLINE_TYPE_ONLINE_MOVABLE))
23544d46b76SGregory Price mhp_default_online_type = MMOP_ONLINE_MOVABLE;
23644d46b76SGregory Price else
23744d46b76SGregory Price mhp_default_online_type = MMOP_OFFLINE;
23844d46b76SGregory Price
23944d46b76SGregory Price return mhp_default_online_type;
24044d46b76SGregory Price }
24144d46b76SGregory Price
mhp_set_default_online_type(int online_type)24244d46b76SGregory Price void mhp_set_default_online_type(int online_type)
24344d46b76SGregory Price {
24444d46b76SGregory Price mhp_default_online_type = online_type;
24544d46b76SGregory Price }
24631bc3858SVitaly Kuznetsov
setup_memhp_default_state(char * str)24786dd995dSVitaly Kuznetsov static int __init setup_memhp_default_state(char *str)
24886dd995dSVitaly Kuznetsov {
2491adf8b46SAnshuman Khandual const int online_type = mhp_online_type_from_str(str);
2505f47adf7SDavid Hildenbrand
2515f47adf7SDavid Hildenbrand if (online_type >= 0)
2521adf8b46SAnshuman Khandual mhp_default_online_type = online_type;
25386dd995dSVitaly Kuznetsov
25486dd995dSVitaly Kuznetsov return 1;
25586dd995dSVitaly Kuznetsov }
25686dd995dSVitaly Kuznetsov __setup("memhp_default_state=", setup_memhp_default_state);
25786dd995dSVitaly Kuznetsov
mem_hotplug_begin(void)25830467e0bSDavid Rientjes void mem_hotplug_begin(void)
259bfc8c901SVladimir Davydov {
2603f906ba2SThomas Gleixner cpus_read_lock();
2613f906ba2SThomas Gleixner percpu_down_write(&mem_hotplug_lock);
262bfc8c901SVladimir Davydov }
263bfc8c901SVladimir Davydov
mem_hotplug_done(void)26430467e0bSDavid Rientjes void mem_hotplug_done(void)
265bfc8c901SVladimir Davydov {
2663f906ba2SThomas Gleixner percpu_up_write(&mem_hotplug_lock);
2673f906ba2SThomas Gleixner cpus_read_unlock();
268bfc8c901SVladimir Davydov }
26920d6c96bSKOSAKI Motohiro
270357b4da5SJuergen Gross u64 max_mem_size = U64_MAX;
271357b4da5SJuergen Gross
27245e0b78bSKeith Mannthey /* add this memory to iomem resource */
register_memory_resource(u64 start,u64 size,const char * resource_name)2737b7b2721SDavid Hildenbrand static struct resource *register_memory_resource(u64 start, u64 size,
2747b7b2721SDavid Hildenbrand const char *resource_name)
27545e0b78bSKeith Mannthey {
2762794129eSDave Hansen struct resource *res;
2772794129eSDave Hansen unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
2787b7b2721SDavid Hildenbrand
2797b7b2721SDavid Hildenbrand if (strcmp(resource_name, "System RAM"))
2807cf603d1SDavid Hildenbrand flags |= IORESOURCE_SYSRAM_DRIVER_MANAGED;
281357b4da5SJuergen Gross
282bca3feaaSAnshuman Khandual if (!mhp_range_allowed(start, size, true))
283bca3feaaSAnshuman Khandual return ERR_PTR(-E2BIG);
284bca3feaaSAnshuman Khandual
285f3cd4c86SBaoquan He /*
286f3cd4c86SBaoquan He * Make sure value parsed from 'mem=' only restricts memory adding
287f3cd4c86SBaoquan He * while booting, so that memory hotplug won't be impacted. Please
288f3cd4c86SBaoquan He * refer to document of 'mem=' in kernel-parameters.txt for more
289f3cd4c86SBaoquan He * details.
290f3cd4c86SBaoquan He */
291f3cd4c86SBaoquan He if (start + size > max_mem_size && system_state < SYSTEM_RUNNING)
292357b4da5SJuergen Gross return ERR_PTR(-E2BIG);
293357b4da5SJuergen Gross
2942794129eSDave Hansen /*
2952794129eSDave Hansen * Request ownership of the new memory range. This might be
2962794129eSDave Hansen * a child of an existing resource that was present but
2972794129eSDave Hansen * not marked as busy.
2982794129eSDave Hansen */
2992794129eSDave Hansen res = __request_region(&iomem_resource, start, size,
3002794129eSDave Hansen resource_name, flags);
30145e0b78bSKeith Mannthey
3022794129eSDave Hansen if (!res) {
3032794129eSDave Hansen pr_debug("Unable to reserve System RAM region: %016llx->%016llx\n",
3042794129eSDave Hansen start, start + size);
3056f754ba4SVitaly Kuznetsov return ERR_PTR(-EEXIST);
30645e0b78bSKeith Mannthey }
30745e0b78bSKeith Mannthey return res;
30845e0b78bSKeith Mannthey }
30945e0b78bSKeith Mannthey
release_memory_resource(struct resource * res)31045e0b78bSKeith Mannthey static void release_memory_resource(struct resource *res)
31145e0b78bSKeith Mannthey {
31245e0b78bSKeith Mannthey if (!res)
31345e0b78bSKeith Mannthey return;
31445e0b78bSKeith Mannthey release_resource(res);
31545e0b78bSKeith Mannthey kfree(res);
31645e0b78bSKeith Mannthey }
31745e0b78bSKeith Mannthey
check_pfn_span(unsigned long pfn,unsigned long nr_pages)318943189dbSAnshuman Khandual static int check_pfn_span(unsigned long pfn, unsigned long nr_pages)
3197ea62160SDan Williams {
3207ea62160SDan Williams /*
3217ea62160SDan Williams * Disallow all operations smaller than a sub-section and only
3227ea62160SDan Williams * allow operations smaller than a section for
3237ea62160SDan Williams * SPARSEMEM_VMEMMAP. Note that check_hotplug_memory_range()
3247ea62160SDan Williams * enforces a larger memory_block_size_bytes() granularity for
3257ea62160SDan Williams * memory that will be marked online, so this check should only
3267ea62160SDan Williams * fire for direct arch_{add,remove}_memory() users outside of
3277ea62160SDan Williams * add_memory_resource().
3287ea62160SDan Williams */
3297ea62160SDan Williams unsigned long min_align;
3307ea62160SDan Williams
3317ea62160SDan Williams if (IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP))
3327ea62160SDan Williams min_align = PAGES_PER_SUBSECTION;
3337ea62160SDan Williams else
3347ea62160SDan Williams min_align = PAGES_PER_SECTION;
335943189dbSAnshuman Khandual if (!IS_ALIGNED(pfn | nr_pages, min_align))
3367ea62160SDan Williams return -EINVAL;
3377ea62160SDan Williams return 0;
3387ea62160SDan Williams }
3397ea62160SDan Williams
3404edd7cefSDavid Rientjes /*
3419f605f26SDan Williams * Return page for the valid pfn only if the page is online. All pfn
3429f605f26SDan Williams * walkers which rely on the fully initialized page->flags and others
3439f605f26SDan Williams * should use this rather than pfn_valid && pfn_to_page
3449f605f26SDan Williams */
pfn_to_online_page(unsigned long pfn)3459f605f26SDan Williams struct page *pfn_to_online_page(unsigned long pfn)
3469f605f26SDan Williams {
3479f605f26SDan Williams unsigned long nr = pfn_to_section_nr(pfn);
3481f90a347SDan Williams struct dev_pagemap *pgmap;
3499f9b02e5SDan Williams struct mem_section *ms;
3509f605f26SDan Williams
3519f9b02e5SDan Williams if (nr >= NR_MEM_SECTIONS)
3529f605f26SDan Williams return NULL;
3539f9b02e5SDan Williams
3549f9b02e5SDan Williams ms = __nr_to_section(nr);
3559f9b02e5SDan Williams if (!online_section(ms))
3569f9b02e5SDan Williams return NULL;
3579f9b02e5SDan Williams
3589f9b02e5SDan Williams /*
3599f9b02e5SDan Williams * Save some code text when online_section() +
3609f9b02e5SDan Williams * pfn_section_valid() are sufficient.
3619f9b02e5SDan Williams */
3629f9b02e5SDan Williams if (IS_ENABLED(CONFIG_HAVE_ARCH_PFN_VALID) && !pfn_valid(pfn))
3639f9b02e5SDan Williams return NULL;
3649f9b02e5SDan Williams
3659f9b02e5SDan Williams if (!pfn_section_valid(ms, pfn))
3669f9b02e5SDan Williams return NULL;
3679f9b02e5SDan Williams
3681f90a347SDan Williams if (!online_device_section(ms))
3691f90a347SDan Williams return pfn_to_page(pfn);
3701f90a347SDan Williams
3711f90a347SDan Williams /*
3721f90a347SDan Williams * Slowpath: when ZONE_DEVICE collides with
3731f90a347SDan Williams * ZONE_{NORMAL,MOVABLE} within the same section some pfns in
3741f90a347SDan Williams * the section may be 'offline' but 'valid'. Only
3751f90a347SDan Williams * get_dev_pagemap() can determine sub-section online status.
3761f90a347SDan Williams */
3771f90a347SDan Williams pgmap = get_dev_pagemap(pfn, NULL);
3781f90a347SDan Williams put_dev_pagemap(pgmap);
3791f90a347SDan Williams
3801f90a347SDan Williams /* The presence of a pgmap indicates ZONE_DEVICE offline pfn */
3811f90a347SDan Williams if (pgmap)
3821f90a347SDan Williams return NULL;
3831f90a347SDan Williams
3849f9b02e5SDan Williams return pfn_to_page(pfn);
3859f605f26SDan Williams }
3869f605f26SDan Williams EXPORT_SYMBOL_GPL(pfn_to_online_page);
3879f605f26SDan Williams
__add_pages(int nid,unsigned long pfn,unsigned long nr_pages,struct mhp_params * params)388f732e242SWei Yang int __add_pages(int nid, unsigned long pfn, unsigned long nr_pages,
389f5637d3bSLogan Gunthorpe struct mhp_params *params)
3904edd7cefSDavid Rientjes {
3916cdd0b30SDavid Hildenbrand const unsigned long end_pfn = pfn + nr_pages;
3926cdd0b30SDavid Hildenbrand unsigned long cur_nr_pages;
3939a845030SDan Williams int err;
394f5637d3bSLogan Gunthorpe struct vmem_altmap *altmap = params->altmap;
3954b94ffdcSDan Williams
3966366238bSliusongtang if (WARN_ON_ONCE(!pgprot_val(params->pgprot)))
397bfeb022fSLogan Gunthorpe return -EINVAL;
398bfeb022fSLogan Gunthorpe
399bca3feaaSAnshuman Khandual VM_BUG_ON(!mhp_range_allowed(PFN_PHYS(pfn), nr_pages * PAGE_SIZE, false));
400dca4436dSAlastair D'Silva
4014b94ffdcSDan Williams if (altmap) {
4024b94ffdcSDan Williams /*
4034b94ffdcSDan Williams * Validate altmap is within bounds of the total request
4044b94ffdcSDan Williams */
4057ea62160SDan Williams if (altmap->base_pfn != pfn
4064b94ffdcSDan Williams || vmem_altmap_offset(altmap) > nr_pages) {
4074b94ffdcSDan Williams pr_warn_once("memory add fail, invalid altmap\n");
4087ea62160SDan Williams return -EINVAL;
4094b94ffdcSDan Williams }
4104b94ffdcSDan Williams altmap->alloc = 0;
4114b94ffdcSDan Williams }
4124b94ffdcSDan Williams
413943189dbSAnshuman Khandual if (check_pfn_span(pfn, nr_pages)) {
41450135045SRick Wertenbroek WARN(1, "Misaligned %s start: %#lx end: %#lx\n", __func__, pfn, pfn + nr_pages - 1);
415943189dbSAnshuman Khandual return -EINVAL;
416943189dbSAnshuman Khandual }
4177ea62160SDan Williams
4186cdd0b30SDavid Hildenbrand for (; pfn < end_pfn; pfn += cur_nr_pages) {
4196cdd0b30SDavid Hildenbrand /* Select all remaining pages up to the next section boundary */
4206cdd0b30SDavid Hildenbrand cur_nr_pages = min(end_pfn - pfn,
4216cdd0b30SDavid Hildenbrand SECTION_ALIGN_UP(pfn + 1) - pfn);
422e3246d8fSJoao Martins err = sparse_add_section(nid, pfn, cur_nr_pages, altmap,
423e3246d8fSJoao Martins params->pgmap);
424ba72b4c8SDan Williams if (err)
425ba72b4c8SDan Williams break;
426f64ac5e6SMichal Hocko cond_resched();
4274edd7cefSDavid Rientjes }
428c435a390SZhu Guihua vmemmap_populate_print_last();
4294edd7cefSDavid Rientjes return err;
4304edd7cefSDavid Rientjes }
4314edd7cefSDavid Rientjes
432815121d2SYasuaki Ishimatsu /* find the smallest valid pfn in the range [start_pfn, end_pfn) */
find_smallest_section_pfn(int nid,struct zone * zone,unsigned long start_pfn,unsigned long end_pfn)433d09b0137SYASUAKI ISHIMATSU static unsigned long find_smallest_section_pfn(int nid, struct zone *zone,
434815121d2SYasuaki Ishimatsu unsigned long start_pfn,
435815121d2SYasuaki Ishimatsu unsigned long end_pfn)
436815121d2SYasuaki Ishimatsu {
43749ba3c6bSDan Williams for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SUBSECTION) {
4387ce700bfSDavid Hildenbrand if (unlikely(!pfn_to_online_page(start_pfn)))
439815121d2SYasuaki Ishimatsu continue;
440815121d2SYasuaki Ishimatsu
441815121d2SYasuaki Ishimatsu if (unlikely(pfn_to_nid(start_pfn) != nid))
442815121d2SYasuaki Ishimatsu continue;
443815121d2SYasuaki Ishimatsu
4449b05158fSDavid Hildenbrand if (zone != page_zone(pfn_to_page(start_pfn)))
445815121d2SYasuaki Ishimatsu continue;
446815121d2SYasuaki Ishimatsu
447815121d2SYasuaki Ishimatsu return start_pfn;
448815121d2SYasuaki Ishimatsu }
449815121d2SYasuaki Ishimatsu
450815121d2SYasuaki Ishimatsu return 0;
451815121d2SYasuaki Ishimatsu }
452815121d2SYasuaki Ishimatsu
453815121d2SYasuaki Ishimatsu /* find the biggest valid pfn in the range [start_pfn, end_pfn). */
find_biggest_section_pfn(int nid,struct zone * zone,unsigned long start_pfn,unsigned long end_pfn)454d09b0137SYASUAKI ISHIMATSU static unsigned long find_biggest_section_pfn(int nid, struct zone *zone,
455815121d2SYasuaki Ishimatsu unsigned long start_pfn,
456815121d2SYasuaki Ishimatsu unsigned long end_pfn)
457815121d2SYasuaki Ishimatsu {
458815121d2SYasuaki Ishimatsu unsigned long pfn;
459815121d2SYasuaki Ishimatsu
460815121d2SYasuaki Ishimatsu /* pfn is the end pfn of a memory section. */
461815121d2SYasuaki Ishimatsu pfn = end_pfn - 1;
46249ba3c6bSDan Williams for (; pfn >= start_pfn; pfn -= PAGES_PER_SUBSECTION) {
4637ce700bfSDavid Hildenbrand if (unlikely(!pfn_to_online_page(pfn)))
464815121d2SYasuaki Ishimatsu continue;
465815121d2SYasuaki Ishimatsu
466815121d2SYasuaki Ishimatsu if (unlikely(pfn_to_nid(pfn) != nid))
467815121d2SYasuaki Ishimatsu continue;
468815121d2SYasuaki Ishimatsu
4699b05158fSDavid Hildenbrand if (zone != page_zone(pfn_to_page(pfn)))
470815121d2SYasuaki Ishimatsu continue;
471815121d2SYasuaki Ishimatsu
472815121d2SYasuaki Ishimatsu return pfn;
473815121d2SYasuaki Ishimatsu }
474815121d2SYasuaki Ishimatsu
475815121d2SYasuaki Ishimatsu return 0;
476815121d2SYasuaki Ishimatsu }
477815121d2SYasuaki Ishimatsu
shrink_zone_span(struct zone * zone,unsigned long start_pfn,unsigned long end_pfn)478815121d2SYasuaki Ishimatsu static void shrink_zone_span(struct zone *zone, unsigned long start_pfn,
479815121d2SYasuaki Ishimatsu unsigned long end_pfn)
480815121d2SYasuaki Ishimatsu {
481815121d2SYasuaki Ishimatsu unsigned long pfn;
482815121d2SYasuaki Ishimatsu int nid = zone_to_nid(zone);
483815121d2SYasuaki Ishimatsu
4845d12071cSDavid Hildenbrand if (zone->zone_start_pfn == start_pfn) {
485815121d2SYasuaki Ishimatsu /*
486815121d2SYasuaki Ishimatsu * If the section is smallest section in the zone, it need
487815121d2SYasuaki Ishimatsu * shrink zone->zone_start_pfn and zone->zone_spanned_pages.
488815121d2SYasuaki Ishimatsu * In this case, we find second smallest valid mem_section
489815121d2SYasuaki Ishimatsu * for shrinking zone.
490815121d2SYasuaki Ishimatsu */
491815121d2SYasuaki Ishimatsu pfn = find_smallest_section_pfn(nid, zone, end_pfn,
4925d12071cSDavid Hildenbrand zone_end_pfn(zone));
493815121d2SYasuaki Ishimatsu if (pfn) {
4945d12071cSDavid Hildenbrand zone->spanned_pages = zone_end_pfn(zone) - pfn;
495815121d2SYasuaki Ishimatsu zone->zone_start_pfn = pfn;
496950b68d9SDavid Hildenbrand } else {
497950b68d9SDavid Hildenbrand zone->zone_start_pfn = 0;
498950b68d9SDavid Hildenbrand zone->spanned_pages = 0;
499815121d2SYasuaki Ishimatsu }
5005d12071cSDavid Hildenbrand } else if (zone_end_pfn(zone) == end_pfn) {
501815121d2SYasuaki Ishimatsu /*
502815121d2SYasuaki Ishimatsu * If the section is biggest section in the zone, it need
503815121d2SYasuaki Ishimatsu * shrink zone->spanned_pages.
504815121d2SYasuaki Ishimatsu * In this case, we find second biggest valid mem_section for
505815121d2SYasuaki Ishimatsu * shrinking zone.
506815121d2SYasuaki Ishimatsu */
5075d12071cSDavid Hildenbrand pfn = find_biggest_section_pfn(nid, zone, zone->zone_start_pfn,
508815121d2SYasuaki Ishimatsu start_pfn);
509815121d2SYasuaki Ishimatsu if (pfn)
5105d12071cSDavid Hildenbrand zone->spanned_pages = pfn - zone->zone_start_pfn + 1;
511950b68d9SDavid Hildenbrand else {
512815121d2SYasuaki Ishimatsu zone->zone_start_pfn = 0;
513815121d2SYasuaki Ishimatsu zone->spanned_pages = 0;
514950b68d9SDavid Hildenbrand }
515950b68d9SDavid Hildenbrand }
516815121d2SYasuaki Ishimatsu }
517815121d2SYasuaki Ishimatsu
update_pgdat_span(struct pglist_data * pgdat)51800d6c019SDavid Hildenbrand static void update_pgdat_span(struct pglist_data *pgdat)
519815121d2SYasuaki Ishimatsu {
52000d6c019SDavid Hildenbrand unsigned long node_start_pfn = 0, node_end_pfn = 0;
52100d6c019SDavid Hildenbrand struct zone *zone;
522815121d2SYasuaki Ishimatsu
52300d6c019SDavid Hildenbrand for (zone = pgdat->node_zones;
52400d6c019SDavid Hildenbrand zone < pgdat->node_zones + MAX_NR_ZONES; zone++) {
5256c922cf7SMiaohe Lin unsigned long end_pfn = zone_end_pfn(zone);
52600d6c019SDavid Hildenbrand
52700d6c019SDavid Hildenbrand /* No need to lock the zones, they can't change. */
528656d5711SDavid Hildenbrand if (!zone->spanned_pages)
529656d5711SDavid Hildenbrand continue;
530656d5711SDavid Hildenbrand if (!node_end_pfn) {
531656d5711SDavid Hildenbrand node_start_pfn = zone->zone_start_pfn;
5326c922cf7SMiaohe Lin node_end_pfn = end_pfn;
533656d5711SDavid Hildenbrand continue;
534656d5711SDavid Hildenbrand }
535656d5711SDavid Hildenbrand
5366c922cf7SMiaohe Lin if (end_pfn > node_end_pfn)
5376c922cf7SMiaohe Lin node_end_pfn = end_pfn;
53800d6c019SDavid Hildenbrand if (zone->zone_start_pfn < node_start_pfn)
53900d6c019SDavid Hildenbrand node_start_pfn = zone->zone_start_pfn;
540815121d2SYasuaki Ishimatsu }
541815121d2SYasuaki Ishimatsu
54200d6c019SDavid Hildenbrand pgdat->node_start_pfn = node_start_pfn;
54300d6c019SDavid Hildenbrand pgdat->node_spanned_pages = node_end_pfn - node_start_pfn;
544815121d2SYasuaki Ishimatsu }
545815121d2SYasuaki Ishimatsu
remove_pfn_range_from_zone(struct zone * zone,unsigned long start_pfn,unsigned long nr_pages)546f732e242SWei Yang void remove_pfn_range_from_zone(struct zone *zone,
547feee6b29SDavid Hildenbrand unsigned long start_pfn,
5487ea62160SDan Williams unsigned long nr_pages)
549815121d2SYasuaki Ishimatsu {
550b7e3debdSBen Widawsky const unsigned long end_pfn = start_pfn + nr_pages;
551815121d2SYasuaki Ishimatsu struct pglist_data *pgdat = zone->zone_pgdat;
55227cacaadSOscar Salvador unsigned long pfn, cur_nr_pages;
553815121d2SYasuaki Ishimatsu
554d33695b1SDavid Hildenbrand /* Poison struct pages because they are now uninitialized again. */
555b7e3debdSBen Widawsky for (pfn = start_pfn; pfn < end_pfn; pfn += cur_nr_pages) {
556b7e3debdSBen Widawsky cond_resched();
557b7e3debdSBen Widawsky
558b7e3debdSBen Widawsky /* Select all remaining pages up to the next section boundary */
559b7e3debdSBen Widawsky cur_nr_pages =
560b7e3debdSBen Widawsky min(end_pfn - pfn, SECTION_ALIGN_UP(pfn + 1) - pfn);
561b7e3debdSBen Widawsky page_init_poison(pfn_to_page(pfn),
562b7e3debdSBen Widawsky sizeof(struct page) * cur_nr_pages);
563b7e3debdSBen Widawsky }
564d33695b1SDavid Hildenbrand
5657ce700bfSDavid Hildenbrand /*
5667ce700bfSDavid Hildenbrand * Zone shrinking code cannot properly deal with ZONE_DEVICE. So
5677ce700bfSDavid Hildenbrand * we will not try to shrink the zones - which is okay as
5687ce700bfSDavid Hildenbrand * set_zone_contiguous() cannot deal with ZONE_DEVICE either way.
5697ce700bfSDavid Hildenbrand */
5705ef5f810SMiaohe Lin if (zone_is_zone_device(zone))
5717ce700bfSDavid Hildenbrand return;
5727ce700bfSDavid Hildenbrand
573feee6b29SDavid Hildenbrand clear_zone_contiguous(zone);
574feee6b29SDavid Hildenbrand
575815121d2SYasuaki Ishimatsu shrink_zone_span(zone, start_pfn, start_pfn + nr_pages);
57600d6c019SDavid Hildenbrand update_pgdat_span(pgdat);
577feee6b29SDavid Hildenbrand
578feee6b29SDavid Hildenbrand set_zone_contiguous(zone);
579815121d2SYasuaki Ishimatsu }
580815121d2SYasuaki Ishimatsu
581ea01ea93SBadari Pulavarty /**
582feee6b29SDavid Hildenbrand * __remove_pages() - remove sections of pages
5837ea62160SDan Williams * @pfn: starting pageframe (must be aligned to start of a section)
584ea01ea93SBadari Pulavarty * @nr_pages: number of pages to remove (must be multiple of section size)
585e8b098fcSMike Rapoport * @altmap: alternative device page map or %NULL if default memmap is used
586ea01ea93SBadari Pulavarty *
587ea01ea93SBadari Pulavarty * Generic helper function to remove section mappings and sysfs entries
588ea01ea93SBadari Pulavarty * for the section of the memory we are removing. Caller needs to make
589ea01ea93SBadari Pulavarty * sure that pages are marked reserved and zones are adjust properly by
590ea01ea93SBadari Pulavarty * calling offline_pages().
591ea01ea93SBadari Pulavarty */
__remove_pages(unsigned long pfn,unsigned long nr_pages,struct vmem_altmap * altmap)592feee6b29SDavid Hildenbrand void __remove_pages(unsigned long pfn, unsigned long nr_pages,
593feee6b29SDavid Hildenbrand struct vmem_altmap *altmap)
594ea01ea93SBadari Pulavarty {
59552fb87c8SDavid Hildenbrand const unsigned long end_pfn = pfn + nr_pages;
59652fb87c8SDavid Hildenbrand unsigned long cur_nr_pages;
597ea01ea93SBadari Pulavarty
598943189dbSAnshuman Khandual if (check_pfn_span(pfn, nr_pages)) {
59950135045SRick Wertenbroek WARN(1, "Misaligned %s start: %#lx end: %#lx\n", __func__, pfn, pfn + nr_pages - 1);
6007ea62160SDan Williams return;
601943189dbSAnshuman Khandual }
602ea01ea93SBadari Pulavarty
60352fb87c8SDavid Hildenbrand for (; pfn < end_pfn; pfn += cur_nr_pages) {
604dd33ad7bSMichal Hocko cond_resched();
60552fb87c8SDavid Hildenbrand /* Select all remaining pages up to the next section boundary */
606a11b9419SDavid Hildenbrand cur_nr_pages = min(end_pfn - pfn,
607a11b9419SDavid Hildenbrand SECTION_ALIGN_UP(pfn + 1) - pfn);
608bd5f79abSYajun Deng sparse_remove_section(pfn, cur_nr_pages, altmap);
609ea01ea93SBadari Pulavarty }
610ea01ea93SBadari Pulavarty }
611ea01ea93SBadari Pulavarty
set_online_page_callback(online_page_callback_t callback)6129d0ad8caSDaniel Kiper int set_online_page_callback(online_page_callback_t callback)
6139d0ad8caSDaniel Kiper {
6149d0ad8caSDaniel Kiper int rc = -EINVAL;
6159d0ad8caSDaniel Kiper
616bfc8c901SVladimir Davydov get_online_mems();
617bfc8c901SVladimir Davydov mutex_lock(&online_page_callback_lock);
6189d0ad8caSDaniel Kiper
6199d0ad8caSDaniel Kiper if (online_page_callback == generic_online_page) {
6209d0ad8caSDaniel Kiper online_page_callback = callback;
6219d0ad8caSDaniel Kiper rc = 0;
6229d0ad8caSDaniel Kiper }
6239d0ad8caSDaniel Kiper
624bfc8c901SVladimir Davydov mutex_unlock(&online_page_callback_lock);
625bfc8c901SVladimir Davydov put_online_mems();
6269d0ad8caSDaniel Kiper
6279d0ad8caSDaniel Kiper return rc;
6289d0ad8caSDaniel Kiper }
6299d0ad8caSDaniel Kiper EXPORT_SYMBOL_GPL(set_online_page_callback);
6309d0ad8caSDaniel Kiper
restore_online_page_callback(online_page_callback_t callback)6319d0ad8caSDaniel Kiper int restore_online_page_callback(online_page_callback_t callback)
6329d0ad8caSDaniel Kiper {
6339d0ad8caSDaniel Kiper int rc = -EINVAL;
6349d0ad8caSDaniel Kiper
635bfc8c901SVladimir Davydov get_online_mems();
636bfc8c901SVladimir Davydov mutex_lock(&online_page_callback_lock);
6379d0ad8caSDaniel Kiper
6389d0ad8caSDaniel Kiper if (online_page_callback == callback) {
6399d0ad8caSDaniel Kiper online_page_callback = generic_online_page;
6409d0ad8caSDaniel Kiper rc = 0;
6419d0ad8caSDaniel Kiper }
6429d0ad8caSDaniel Kiper
643bfc8c901SVladimir Davydov mutex_unlock(&online_page_callback_lock);
644bfc8c901SVladimir Davydov put_online_mems();
6459d0ad8caSDaniel Kiper
6469d0ad8caSDaniel Kiper return rc;
6479d0ad8caSDaniel Kiper }
6489d0ad8caSDaniel Kiper EXPORT_SYMBOL_GPL(restore_online_page_callback);
6499d0ad8caSDaniel Kiper
650f6953e22SWei Yang /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
generic_online_page(struct page * page,unsigned int order)651f732e242SWei Yang void generic_online_page(struct page *page, unsigned int order)
6529d0ad8caSDaniel Kiper {
65313c52654SDavid Hildenbrand __free_pages_core(page, order, MEMINIT_HOTPLUG);
654a9cd410aSArun KS }
65518db1491SDavid Hildenbrand EXPORT_SYMBOL_GPL(generic_online_page);
656a9cd410aSArun KS
online_pages_range(unsigned long start_pfn,unsigned long nr_pages)657aac65321SDavid Hildenbrand static void online_pages_range(unsigned long start_pfn, unsigned long nr_pages)
6583947be19SDave Hansen {
659b2c2ab20SDavid Hildenbrand const unsigned long end_pfn = start_pfn + nr_pages;
660b2c2ab20SDavid Hildenbrand unsigned long pfn;
6612d070eabSMichal Hocko
662b2c2ab20SDavid Hildenbrand /*
6635e0a760bSKirill A. Shutemov * Online the pages in MAX_PAGE_ORDER aligned chunks. The callback might
664aac65321SDavid Hildenbrand * decide to not expose all pages to the buddy (e.g., expose them
665aac65321SDavid Hildenbrand * later). We account all pages as being online and belonging to this
666aac65321SDavid Hildenbrand * zone ("present").
667a08a2ae3SOscar Salvador * When using memmap_on_memory, the range might not be aligned to
668a08a2ae3SOscar Salvador * MAX_ORDER_NR_PAGES - 1, but pageblock aligned. __ffs() will detect
669a08a2ae3SOscar Salvador * this and the first chunk to online will be pageblock_nr_pages.
670b2c2ab20SDavid Hildenbrand */
671a08a2ae3SOscar Salvador for (pfn = start_pfn; pfn < end_pfn;) {
672dd467f92SDavid Hildenbrand struct page *page = pfn_to_page(pfn);
67359f876fbSKirill A. Shutemov int order;
67459f876fbSKirill A. Shutemov
67559f876fbSKirill A. Shutemov /*
67659f876fbSKirill A. Shutemov * Free to online pages in the largest chunks alignment allows.
67759f876fbSKirill A. Shutemov *
67859f876fbSKirill A. Shutemov * __ffs() behaviour is undefined for 0. start == 0 is
6795e0a760bSKirill A. Shutemov * MAX_PAGE_ORDER-aligned, Set order to MAX_PAGE_ORDER for
6805e0a760bSKirill A. Shutemov * the case.
68159f876fbSKirill A. Shutemov */
68259f876fbSKirill A. Shutemov if (pfn)
6835e0a760bSKirill A. Shutemov order = min_t(int, MAX_PAGE_ORDER, __ffs(pfn));
68459f876fbSKirill A. Shutemov else
6855e0a760bSKirill A. Shutemov order = MAX_PAGE_ORDER;
686a08a2ae3SOscar Salvador
687dd467f92SDavid Hildenbrand /*
688dd467f92SDavid Hildenbrand * Exposing the page to the buddy by freeing can cause
689dd467f92SDavid Hildenbrand * issues with debug_pagealloc enabled: some archs don't
690dd467f92SDavid Hildenbrand * like double-unmappings. So treat them like any pages that
691dd467f92SDavid Hildenbrand * were allocated from the buddy.
692dd467f92SDavid Hildenbrand */
693dd467f92SDavid Hildenbrand debug_pagealloc_map_pages(page, 1 << order);
694dd467f92SDavid Hildenbrand (*online_page_callback)(page, order);
695a08a2ae3SOscar Salvador pfn += (1UL << order);
696a08a2ae3SOscar Salvador }
6972d070eabSMichal Hocko
698b2c2ab20SDavid Hildenbrand /* mark all involved sections as online */
699b2c2ab20SDavid Hildenbrand online_mem_sections(start_pfn, end_pfn);
70075884fb1SKAMEZAWA Hiroyuki }
70175884fb1SKAMEZAWA Hiroyuki
702d9713679SLai Jiangshan /* check which state of node_states will be changed when online memory */
node_states_check_changes_online(unsigned long nr_pages,struct zone * zone,struct memory_notify * arg)703d9713679SLai Jiangshan static void node_states_check_changes_online(unsigned long nr_pages,
704d9713679SLai Jiangshan struct zone *zone, struct memory_notify *arg)
705d9713679SLai Jiangshan {
706d9713679SLai Jiangshan int nid = zone_to_nid(zone);
707d9713679SLai Jiangshan
70898fa15f3SAnshuman Khandual arg->status_change_nid = NUMA_NO_NODE;
70998fa15f3SAnshuman Khandual arg->status_change_nid_normal = NUMA_NO_NODE;
7106715ddf9SLai Jiangshan
7116715ddf9SLai Jiangshan if (!node_state(nid, N_MEMORY))
712d9713679SLai Jiangshan arg->status_change_nid = nid;
7138efe33f4SOscar Salvador if (zone_idx(zone) <= ZONE_NORMAL && !node_state(nid, N_NORMAL_MEMORY))
7148efe33f4SOscar Salvador arg->status_change_nid_normal = nid;
715d9713679SLai Jiangshan }
716d9713679SLai Jiangshan
node_states_set_node(int node,struct memory_notify * arg)717d9713679SLai Jiangshan static void node_states_set_node(int node, struct memory_notify *arg)
718d9713679SLai Jiangshan {
719d9713679SLai Jiangshan if (arg->status_change_nid_normal >= 0)
720d9713679SLai Jiangshan node_set_state(node, N_NORMAL_MEMORY);
721d9713679SLai Jiangshan
72283d83612SOscar Salvador if (arg->status_change_nid >= 0)
7236715ddf9SLai Jiangshan node_set_state(node, N_MEMORY);
724d9713679SLai Jiangshan }
725d9713679SLai Jiangshan
resize_zone_range(struct zone * zone,unsigned long start_pfn,unsigned long nr_pages)726f1dd2cd1SMichal Hocko static void __meminit resize_zone_range(struct zone *zone, unsigned long start_pfn,
727f1dd2cd1SMichal Hocko unsigned long nr_pages)
728f1dd2cd1SMichal Hocko {
729f1dd2cd1SMichal Hocko unsigned long old_end_pfn = zone_end_pfn(zone);
730f1dd2cd1SMichal Hocko
731f1dd2cd1SMichal Hocko if (zone_is_empty(zone) || start_pfn < zone->zone_start_pfn)
732f1dd2cd1SMichal Hocko zone->zone_start_pfn = start_pfn;
733f1dd2cd1SMichal Hocko
734f1dd2cd1SMichal Hocko zone->spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - zone->zone_start_pfn;
735f1dd2cd1SMichal Hocko }
736f1dd2cd1SMichal Hocko
resize_pgdat_range(struct pglist_data * pgdat,unsigned long start_pfn,unsigned long nr_pages)737f1dd2cd1SMichal Hocko static void __meminit resize_pgdat_range(struct pglist_data *pgdat, unsigned long start_pfn,
738f1dd2cd1SMichal Hocko unsigned long nr_pages)
739f1dd2cd1SMichal Hocko {
740f1dd2cd1SMichal Hocko unsigned long old_end_pfn = pgdat_end_pfn(pgdat);
741f1dd2cd1SMichal Hocko
742f1dd2cd1SMichal Hocko if (!pgdat->node_spanned_pages || start_pfn < pgdat->node_start_pfn)
743f1dd2cd1SMichal Hocko pgdat->node_start_pfn = start_pfn;
744f1dd2cd1SMichal Hocko
745f1dd2cd1SMichal Hocko pgdat->node_spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - pgdat->node_start_pfn;
746f1dd2cd1SMichal Hocko
7473fccb74cSDavid Hildenbrand }
7481f90a347SDan Williams
749ed7802ddSMuchun Song #ifdef CONFIG_ZONE_DEVICE
section_taint_zone_device(unsigned long pfn)7501f90a347SDan Williams static void section_taint_zone_device(unsigned long pfn)
7511f90a347SDan Williams {
7521f90a347SDan Williams struct mem_section *ms = __pfn_to_section(pfn);
7531f90a347SDan Williams
7541f90a347SDan Williams ms->section_mem_map |= SECTION_TAINT_ZONE_DEVICE;
7551f90a347SDan Williams }
756ed7802ddSMuchun Song #else
section_taint_zone_device(unsigned long pfn)757ed7802ddSMuchun Song static inline void section_taint_zone_device(unsigned long pfn)
758ed7802ddSMuchun Song {
759ed7802ddSMuchun Song }
760ed7802ddSMuchun Song #endif
7611f90a347SDan Williams
7623fccb74cSDavid Hildenbrand /*
7633fccb74cSDavid Hildenbrand * Associate the pfn range with the given zone, initializing the memmaps
7643fccb74cSDavid Hildenbrand * and resizing the pgdat/zone data to span the added pages. After this
765503b158fSDavid Hildenbrand * call, all affected pages are PageOffline().
766d882c006SDavid Hildenbrand *
767d882c006SDavid Hildenbrand * All aligned pageblocks are initialized to the specified migratetype
768d882c006SDavid Hildenbrand * (usually MIGRATE_MOVABLE). Besides setting the migratetype, no related
769d882c006SDavid Hildenbrand * zone stats (e.g., nr_isolate_pageblock) are touched.
7703fccb74cSDavid Hildenbrand */
move_pfn_range_to_zone(struct zone * zone,unsigned long start_pfn,unsigned long nr_pages,struct vmem_altmap * altmap,int migratetype)771f732e242SWei Yang void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
772d882c006SDavid Hildenbrand unsigned long nr_pages,
773d882c006SDavid Hildenbrand struct vmem_altmap *altmap, int migratetype)
774f1dd2cd1SMichal Hocko {
775f1dd2cd1SMichal Hocko struct pglist_data *pgdat = zone->zone_pgdat;
776f1dd2cd1SMichal Hocko int nid = pgdat->node_id;
777f1dd2cd1SMichal Hocko
778f1dd2cd1SMichal Hocko clear_zone_contiguous(zone);
779f1dd2cd1SMichal Hocko
780fa004ab7SWei Yang if (zone_is_empty(zone))
781fa004ab7SWei Yang init_currently_empty_zone(zone, start_pfn, nr_pages);
782f1dd2cd1SMichal Hocko resize_zone_range(zone, start_pfn, nr_pages);
783f1dd2cd1SMichal Hocko resize_pgdat_range(pgdat, start_pfn, nr_pages);
784f1dd2cd1SMichal Hocko
785f1dd2cd1SMichal Hocko /*
7861f90a347SDan Williams * Subsection population requires care in pfn_to_online_page().
7871f90a347SDan Williams * Set the taint to enable the slow path detection of
7881f90a347SDan Williams * ZONE_DEVICE pages in an otherwise ZONE_{NORMAL,MOVABLE}
7891f90a347SDan Williams * section.
7901f90a347SDan Williams */
7911f90a347SDan Williams if (zone_is_zone_device(zone)) {
7921f90a347SDan Williams if (!IS_ALIGNED(start_pfn, PAGES_PER_SECTION))
7931f90a347SDan Williams section_taint_zone_device(start_pfn);
7941f90a347SDan Williams if (!IS_ALIGNED(start_pfn + nr_pages, PAGES_PER_SECTION))
7951f90a347SDan Williams section_taint_zone_device(start_pfn + nr_pages);
7961f90a347SDan Williams }
7971f90a347SDan Williams
7981f90a347SDan Williams /*
799f1dd2cd1SMichal Hocko * TODO now we have a visible range of pages which are not associated
800f1dd2cd1SMichal Hocko * with their zone properly. Not nice but set_pfnblock_flags_mask
801f1dd2cd1SMichal Hocko * expects the zone spans the pfn range. All the pages in the range
802f1dd2cd1SMichal Hocko * are reserved so nobody should be touching them so we should be safe
803f1dd2cd1SMichal Hocko */
804ab28cb6eSBaoquan He memmap_init_range(nr_pages, nid, zone_idx(zone), start_pfn, 0,
805d882c006SDavid Hildenbrand MEMINIT_HOTPLUG, altmap, migratetype);
806f1dd2cd1SMichal Hocko
807f1dd2cd1SMichal Hocko set_zone_contiguous(zone);
808f1dd2cd1SMichal Hocko }
809f1dd2cd1SMichal Hocko
810e83a437fSDavid Hildenbrand struct auto_movable_stats {
811e83a437fSDavid Hildenbrand unsigned long kernel_early_pages;
812e83a437fSDavid Hildenbrand unsigned long movable_pages;
813e83a437fSDavid Hildenbrand };
814e83a437fSDavid Hildenbrand
auto_movable_stats_account_zone(struct auto_movable_stats * stats,struct zone * zone)815e83a437fSDavid Hildenbrand static void auto_movable_stats_account_zone(struct auto_movable_stats *stats,
816e83a437fSDavid Hildenbrand struct zone *zone)
817e83a437fSDavid Hildenbrand {
818e83a437fSDavid Hildenbrand if (zone_idx(zone) == ZONE_MOVABLE) {
819e83a437fSDavid Hildenbrand stats->movable_pages += zone->present_pages;
820e83a437fSDavid Hildenbrand } else {
821e83a437fSDavid Hildenbrand stats->kernel_early_pages += zone->present_early_pages;
822e83a437fSDavid Hildenbrand #ifdef CONFIG_CMA
823e83a437fSDavid Hildenbrand /*
824e83a437fSDavid Hildenbrand * CMA pages (never on hotplugged memory) behave like
825e83a437fSDavid Hildenbrand * ZONE_MOVABLE.
826e83a437fSDavid Hildenbrand */
827e83a437fSDavid Hildenbrand stats->movable_pages += zone->cma_pages;
828e83a437fSDavid Hildenbrand stats->kernel_early_pages -= zone->cma_pages;
829e83a437fSDavid Hildenbrand #endif /* CONFIG_CMA */
830e83a437fSDavid Hildenbrand }
831e83a437fSDavid Hildenbrand }
8323fcebf90SDavid Hildenbrand struct auto_movable_group_stats {
8333fcebf90SDavid Hildenbrand unsigned long movable_pages;
8343fcebf90SDavid Hildenbrand unsigned long req_kernel_early_pages;
8353fcebf90SDavid Hildenbrand };
836e83a437fSDavid Hildenbrand
auto_movable_stats_account_group(struct memory_group * group,void * arg)8373fcebf90SDavid Hildenbrand static int auto_movable_stats_account_group(struct memory_group *group,
8383fcebf90SDavid Hildenbrand void *arg)
839e83a437fSDavid Hildenbrand {
8403fcebf90SDavid Hildenbrand const int ratio = READ_ONCE(auto_movable_ratio);
8413fcebf90SDavid Hildenbrand struct auto_movable_group_stats *stats = arg;
8423fcebf90SDavid Hildenbrand long pages;
8433fcebf90SDavid Hildenbrand
8443fcebf90SDavid Hildenbrand /*
8453fcebf90SDavid Hildenbrand * We don't support modifying the config while the auto-movable online
8463fcebf90SDavid Hildenbrand * policy is already enabled. Just avoid the division by zero below.
8473fcebf90SDavid Hildenbrand */
8483fcebf90SDavid Hildenbrand if (!ratio)
8493fcebf90SDavid Hildenbrand return 0;
8503fcebf90SDavid Hildenbrand
8513fcebf90SDavid Hildenbrand /*
8523fcebf90SDavid Hildenbrand * Calculate how many early kernel pages this group requires to
8533fcebf90SDavid Hildenbrand * satisfy the configured zone ratio.
8543fcebf90SDavid Hildenbrand */
8553fcebf90SDavid Hildenbrand pages = group->present_movable_pages * 100 / ratio;
8563fcebf90SDavid Hildenbrand pages -= group->present_kernel_pages;
8573fcebf90SDavid Hildenbrand
8583fcebf90SDavid Hildenbrand if (pages > 0)
8593fcebf90SDavid Hildenbrand stats->req_kernel_early_pages += pages;
8603fcebf90SDavid Hildenbrand stats->movable_pages += group->present_movable_pages;
8613fcebf90SDavid Hildenbrand return 0;
8623fcebf90SDavid Hildenbrand }
8633fcebf90SDavid Hildenbrand
auto_movable_can_online_movable(int nid,struct memory_group * group,unsigned long nr_pages)8643fcebf90SDavid Hildenbrand static bool auto_movable_can_online_movable(int nid, struct memory_group *group,
8653fcebf90SDavid Hildenbrand unsigned long nr_pages)
8663fcebf90SDavid Hildenbrand {
867e83a437fSDavid Hildenbrand unsigned long kernel_early_pages, movable_pages;
8683fcebf90SDavid Hildenbrand struct auto_movable_group_stats group_stats = {};
8693fcebf90SDavid Hildenbrand struct auto_movable_stats stats = {};
870e83a437fSDavid Hildenbrand struct zone *zone;
871e83a437fSDavid Hildenbrand int i;
872e83a437fSDavid Hildenbrand
873e83a437fSDavid Hildenbrand /* Walk all relevant zones and collect MOVABLE vs. KERNEL stats. */
874e83a437fSDavid Hildenbrand if (nid == NUMA_NO_NODE) {
875e83a437fSDavid Hildenbrand /* TODO: cache values */
876e83a437fSDavid Hildenbrand for_each_populated_zone(zone)
877e83a437fSDavid Hildenbrand auto_movable_stats_account_zone(&stats, zone);
878e83a437fSDavid Hildenbrand } else {
879e83a437fSDavid Hildenbrand for (i = 0; i < MAX_NR_ZONES; i++) {
8805958d359SAnastasia Belova pg_data_t *pgdat = NODE_DATA(nid);
8815958d359SAnastasia Belova
882e83a437fSDavid Hildenbrand zone = pgdat->node_zones + i;
883e83a437fSDavid Hildenbrand if (populated_zone(zone))
884e83a437fSDavid Hildenbrand auto_movable_stats_account_zone(&stats, zone);
885e83a437fSDavid Hildenbrand }
886e83a437fSDavid Hildenbrand }
887e83a437fSDavid Hildenbrand
888e83a437fSDavid Hildenbrand kernel_early_pages = stats.kernel_early_pages;
889e83a437fSDavid Hildenbrand movable_pages = stats.movable_pages;
890e83a437fSDavid Hildenbrand
891e83a437fSDavid Hildenbrand /*
8923fcebf90SDavid Hildenbrand * Kernel memory inside dynamic memory group allows for more MOVABLE
8933fcebf90SDavid Hildenbrand * memory within the same group. Remove the effect of all but the
8943fcebf90SDavid Hildenbrand * current group from the stats.
8953fcebf90SDavid Hildenbrand */
8963fcebf90SDavid Hildenbrand walk_dynamic_memory_groups(nid, auto_movable_stats_account_group,
8973fcebf90SDavid Hildenbrand group, &group_stats);
8983fcebf90SDavid Hildenbrand if (kernel_early_pages <= group_stats.req_kernel_early_pages)
8993fcebf90SDavid Hildenbrand return false;
9003fcebf90SDavid Hildenbrand kernel_early_pages -= group_stats.req_kernel_early_pages;
9013fcebf90SDavid Hildenbrand movable_pages -= group_stats.movable_pages;
9023fcebf90SDavid Hildenbrand
9033fcebf90SDavid Hildenbrand if (group && group->is_dynamic)
9043fcebf90SDavid Hildenbrand kernel_early_pages += group->present_kernel_pages;
9053fcebf90SDavid Hildenbrand
9063fcebf90SDavid Hildenbrand /*
907e83a437fSDavid Hildenbrand * Test if we could online the given number of pages to ZONE_MOVABLE
908e83a437fSDavid Hildenbrand * and still stay in the configured ratio.
909e83a437fSDavid Hildenbrand */
910e83a437fSDavid Hildenbrand movable_pages += nr_pages;
911e83a437fSDavid Hildenbrand return movable_pages <= (auto_movable_ratio * kernel_early_pages) / 100;
912e83a437fSDavid Hildenbrand }
913e83a437fSDavid Hildenbrand
914f1dd2cd1SMichal Hocko /*
915c246a213SMichal Hocko * Returns a default kernel memory zone for the given pfn range.
916c246a213SMichal Hocko * If no kernel zone covers this pfn range it will automatically go
917c246a213SMichal Hocko * to the ZONE_NORMAL.
918c246a213SMichal Hocko */
default_kernel_zone_for_pfn(int nid,unsigned long start_pfn,unsigned long nr_pages)919c6f03e29SMichal Hocko static struct zone *default_kernel_zone_for_pfn(int nid, unsigned long start_pfn,
920c246a213SMichal Hocko unsigned long nr_pages)
921c246a213SMichal Hocko {
922c246a213SMichal Hocko struct pglist_data *pgdat = NODE_DATA(nid);
923c246a213SMichal Hocko int zid;
924c246a213SMichal Hocko
925d6aad201SMiaohe Lin for (zid = 0; zid < ZONE_NORMAL; zid++) {
926c246a213SMichal Hocko struct zone *zone = &pgdat->node_zones[zid];
927c246a213SMichal Hocko
928c246a213SMichal Hocko if (zone_intersects(zone, start_pfn, nr_pages))
929c246a213SMichal Hocko return zone;
930c246a213SMichal Hocko }
931c246a213SMichal Hocko
932c246a213SMichal Hocko return &pgdat->node_zones[ZONE_NORMAL];
933c246a213SMichal Hocko }
934c246a213SMichal Hocko
935e83a437fSDavid Hildenbrand /*
936e83a437fSDavid Hildenbrand * Determine to which zone to online memory dynamically based on user
937e83a437fSDavid Hildenbrand * configuration and system stats. We care about the following ratio:
938e83a437fSDavid Hildenbrand *
939e83a437fSDavid Hildenbrand * MOVABLE : KERNEL
940e83a437fSDavid Hildenbrand *
941e83a437fSDavid Hildenbrand * Whereby MOVABLE is memory in ZONE_MOVABLE and KERNEL is memory in
942e83a437fSDavid Hildenbrand * one of the kernel zones. CMA pages inside one of the kernel zones really
943e83a437fSDavid Hildenbrand * behaves like ZONE_MOVABLE, so we treat them accordingly.
944e83a437fSDavid Hildenbrand *
945e83a437fSDavid Hildenbrand * We don't allow for hotplugged memory in a KERNEL zone to increase the
946e83a437fSDavid Hildenbrand * amount of MOVABLE memory we can have, so we end up with:
947e83a437fSDavid Hildenbrand *
948e83a437fSDavid Hildenbrand * MOVABLE : KERNEL_EARLY
949e83a437fSDavid Hildenbrand *
950e83a437fSDavid Hildenbrand * Whereby KERNEL_EARLY is memory in one of the kernel zones, available sinze
951e83a437fSDavid Hildenbrand * boot. We base our calculation on KERNEL_EARLY internally, because:
952e83a437fSDavid Hildenbrand *
953e83a437fSDavid Hildenbrand * a) Hotplugged memory in one of the kernel zones can sometimes still get
954e83a437fSDavid Hildenbrand * hotunplugged, especially when hot(un)plugging individual memory blocks.
955e83a437fSDavid Hildenbrand * There is no coordination across memory devices, therefore "automatic"
956e83a437fSDavid Hildenbrand * hotunplugging, as implemented in hypervisors, could result in zone
957e83a437fSDavid Hildenbrand * imbalances.
958e83a437fSDavid Hildenbrand * b) Early/boot memory in one of the kernel zones can usually not get
959e83a437fSDavid Hildenbrand * hotunplugged again (e.g., no firmware interface to unplug, fragmented
960e83a437fSDavid Hildenbrand * with unmovable allocations). While there are corner cases where it might
961e83a437fSDavid Hildenbrand * still work, it is barely relevant in practice.
962e83a437fSDavid Hildenbrand *
9633fcebf90SDavid Hildenbrand * Exceptions are dynamic memory groups, which allow for more MOVABLE
9643fcebf90SDavid Hildenbrand * memory within the same memory group -- because in that case, there is
9653fcebf90SDavid Hildenbrand * coordination within the single memory device managed by a single driver.
9663fcebf90SDavid Hildenbrand *
967e83a437fSDavid Hildenbrand * We rely on "present pages" instead of "managed pages", as the latter is
968e83a437fSDavid Hildenbrand * highly unreliable and dynamic in virtualized environments, and does not
969e83a437fSDavid Hildenbrand * consider boot time allocations. For example, memory ballooning adjusts the
970e83a437fSDavid Hildenbrand * managed pages when inflating/deflating the balloon, and balloon compaction
971e83a437fSDavid Hildenbrand * can even migrate inflated pages between zones.
972e83a437fSDavid Hildenbrand *
973e83a437fSDavid Hildenbrand * Using "present pages" is better but some things to keep in mind are:
974e83a437fSDavid Hildenbrand *
975e83a437fSDavid Hildenbrand * a) Some memblock allocations, such as for the crashkernel area, are
976e83a437fSDavid Hildenbrand * effectively unused by the kernel, yet they account to "present pages".
977e83a437fSDavid Hildenbrand * Fortunately, these allocations are comparatively small in relevant setups
978e83a437fSDavid Hildenbrand * (e.g., fraction of system memory).
979e83a437fSDavid Hildenbrand * b) Some hotplugged memory blocks in virtualized environments, esecially
980e83a437fSDavid Hildenbrand * hotplugged by virtio-mem, look like they are completely present, however,
981e83a437fSDavid Hildenbrand * only parts of the memory block are actually currently usable.
982e83a437fSDavid Hildenbrand * "present pages" is an upper limit that can get reached at runtime. As
983e83a437fSDavid Hildenbrand * we base our calculations on KERNEL_EARLY, this is not an issue.
984e83a437fSDavid Hildenbrand */
auto_movable_zone_for_pfn(int nid,struct memory_group * group,unsigned long pfn,unsigned long nr_pages)985445fcf7cSDavid Hildenbrand static struct zone *auto_movable_zone_for_pfn(int nid,
986445fcf7cSDavid Hildenbrand struct memory_group *group,
987445fcf7cSDavid Hildenbrand unsigned long pfn,
988e83a437fSDavid Hildenbrand unsigned long nr_pages)
989e83a437fSDavid Hildenbrand {
990445fcf7cSDavid Hildenbrand unsigned long online_pages = 0, max_pages, end_pfn;
991445fcf7cSDavid Hildenbrand struct page *page;
992445fcf7cSDavid Hildenbrand
993e83a437fSDavid Hildenbrand if (!auto_movable_ratio)
994e83a437fSDavid Hildenbrand goto kernel_zone;
995e83a437fSDavid Hildenbrand
996445fcf7cSDavid Hildenbrand if (group && !group->is_dynamic) {
997445fcf7cSDavid Hildenbrand max_pages = group->s.max_pages;
998445fcf7cSDavid Hildenbrand online_pages = group->present_movable_pages;
999445fcf7cSDavid Hildenbrand
1000445fcf7cSDavid Hildenbrand /* If anything is !MOVABLE online the rest !MOVABLE. */
1001445fcf7cSDavid Hildenbrand if (group->present_kernel_pages)
1002445fcf7cSDavid Hildenbrand goto kernel_zone;
1003445fcf7cSDavid Hildenbrand } else if (!group || group->d.unit_pages == nr_pages) {
1004445fcf7cSDavid Hildenbrand max_pages = nr_pages;
1005445fcf7cSDavid Hildenbrand } else {
1006445fcf7cSDavid Hildenbrand max_pages = group->d.unit_pages;
1007445fcf7cSDavid Hildenbrand /*
1008445fcf7cSDavid Hildenbrand * Take a look at all online sections in the current unit.
1009445fcf7cSDavid Hildenbrand * We can safely assume that all pages within a section belong
1010445fcf7cSDavid Hildenbrand * to the same zone, because dynamic memory groups only deal
1011445fcf7cSDavid Hildenbrand * with hotplugged memory.
1012445fcf7cSDavid Hildenbrand */
1013445fcf7cSDavid Hildenbrand pfn = ALIGN_DOWN(pfn, group->d.unit_pages);
1014445fcf7cSDavid Hildenbrand end_pfn = pfn + group->d.unit_pages;
1015445fcf7cSDavid Hildenbrand for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
1016445fcf7cSDavid Hildenbrand page = pfn_to_online_page(pfn);
1017445fcf7cSDavid Hildenbrand if (!page)
1018445fcf7cSDavid Hildenbrand continue;
1019445fcf7cSDavid Hildenbrand /* If anything is !MOVABLE online the rest !MOVABLE. */
102007252dfeSKefeng Wang if (!is_zone_movable_page(page))
1021445fcf7cSDavid Hildenbrand goto kernel_zone;
1022445fcf7cSDavid Hildenbrand online_pages += PAGES_PER_SECTION;
1023445fcf7cSDavid Hildenbrand }
1024445fcf7cSDavid Hildenbrand }
1025445fcf7cSDavid Hildenbrand
1026445fcf7cSDavid Hildenbrand /*
1027445fcf7cSDavid Hildenbrand * Online MOVABLE if we could *currently* online all remaining parts
1028445fcf7cSDavid Hildenbrand * MOVABLE. We expect to (add+) online them immediately next, so if
1029445fcf7cSDavid Hildenbrand * nobody interferes, all will be MOVABLE if possible.
1030445fcf7cSDavid Hildenbrand */
1031445fcf7cSDavid Hildenbrand nr_pages = max_pages - online_pages;
10323fcebf90SDavid Hildenbrand if (!auto_movable_can_online_movable(NUMA_NO_NODE, group, nr_pages))
1033e83a437fSDavid Hildenbrand goto kernel_zone;
1034e83a437fSDavid Hildenbrand
1035e83a437fSDavid Hildenbrand #ifdef CONFIG_NUMA
1036e83a437fSDavid Hildenbrand if (auto_movable_numa_aware &&
10373fcebf90SDavid Hildenbrand !auto_movable_can_online_movable(nid, group, nr_pages))
1038e83a437fSDavid Hildenbrand goto kernel_zone;
1039e83a437fSDavid Hildenbrand #endif /* CONFIG_NUMA */
1040e83a437fSDavid Hildenbrand
1041e83a437fSDavid Hildenbrand return &NODE_DATA(nid)->node_zones[ZONE_MOVABLE];
1042e83a437fSDavid Hildenbrand kernel_zone:
1043e83a437fSDavid Hildenbrand return default_kernel_zone_for_pfn(nid, pfn, nr_pages);
1044e83a437fSDavid Hildenbrand }
1045e83a437fSDavid Hildenbrand
default_zone_for_pfn(int nid,unsigned long start_pfn,unsigned long nr_pages)1046c6f03e29SMichal Hocko static inline struct zone *default_zone_for_pfn(int nid, unsigned long start_pfn,
1047c6f03e29SMichal Hocko unsigned long nr_pages)
1048e5e68930SMichal Hocko {
1049c6f03e29SMichal Hocko struct zone *kernel_zone = default_kernel_zone_for_pfn(nid, start_pfn,
1050c6f03e29SMichal Hocko nr_pages);
1051c6f03e29SMichal Hocko struct zone *movable_zone = &NODE_DATA(nid)->node_zones[ZONE_MOVABLE];
1052c6f03e29SMichal Hocko bool in_kernel = zone_intersects(kernel_zone, start_pfn, nr_pages);
1053c6f03e29SMichal Hocko bool in_movable = zone_intersects(movable_zone, start_pfn, nr_pages);
1054e5e68930SMichal Hocko
1055e5e68930SMichal Hocko /*
1056c6f03e29SMichal Hocko * We inherit the existing zone in a simple case where zones do not
1057c6f03e29SMichal Hocko * overlap in the given range
1058e5e68930SMichal Hocko */
1059c6f03e29SMichal Hocko if (in_kernel ^ in_movable)
1060c6f03e29SMichal Hocko return (in_kernel) ? kernel_zone : movable_zone;
1061e5e68930SMichal Hocko
1062c6f03e29SMichal Hocko /*
1063c6f03e29SMichal Hocko * If the range doesn't belong to any zone or two zones overlap in the
1064c6f03e29SMichal Hocko * given range then we use movable zone only if movable_node is
1065c6f03e29SMichal Hocko * enabled because we always online to a kernel zone by default.
1066c6f03e29SMichal Hocko */
1067c6f03e29SMichal Hocko return movable_node_enabled ? movable_zone : kernel_zone;
10689f123ab5SMichal Hocko }
10699f123ab5SMichal Hocko
zone_for_pfn_range(int online_type,int nid,struct memory_group * group,unsigned long start_pfn,unsigned long nr_pages)10707cf209baSDavid Hildenbrand struct zone *zone_for_pfn_range(int online_type, int nid,
1071445fcf7cSDavid Hildenbrand struct memory_group *group, unsigned long start_pfn,
1072e5e68930SMichal Hocko unsigned long nr_pages)
1073f1dd2cd1SMichal Hocko {
1074c6f03e29SMichal Hocko if (online_type == MMOP_ONLINE_KERNEL)
1075c6f03e29SMichal Hocko return default_kernel_zone_for_pfn(nid, start_pfn, nr_pages);
1076f1dd2cd1SMichal Hocko
1077c6f03e29SMichal Hocko if (online_type == MMOP_ONLINE_MOVABLE)
1078c6f03e29SMichal Hocko return &NODE_DATA(nid)->node_zones[ZONE_MOVABLE];
1079f1dd2cd1SMichal Hocko
1080e83a437fSDavid Hildenbrand if (online_policy == ONLINE_POLICY_AUTO_MOVABLE)
1081445fcf7cSDavid Hildenbrand return auto_movable_zone_for_pfn(nid, group, start_pfn, nr_pages);
1082e83a437fSDavid Hildenbrand
1083c6f03e29SMichal Hocko return default_zone_for_pfn(nid, start_pfn, nr_pages);
1084e5e68930SMichal Hocko }
1085e5e68930SMichal Hocko
1086a08a2ae3SOscar Salvador /*
1087a08a2ae3SOscar Salvador * This function should only be called by memory_block_{online,offline},
1088a08a2ae3SOscar Salvador * and {online,offline}_pages.
1089a08a2ae3SOscar Salvador */
adjust_present_page_count(struct page * page,struct memory_group * group,long nr_pages)1090836809ecSDavid Hildenbrand void adjust_present_page_count(struct page *page, struct memory_group *group,
1091836809ecSDavid Hildenbrand long nr_pages)
1092f9901144SDavid Hildenbrand {
10934b097002SDavid Hildenbrand struct zone *zone = page_zone(page);
1094836809ecSDavid Hildenbrand const bool movable = zone_idx(zone) == ZONE_MOVABLE;
10954b097002SDavid Hildenbrand
10964b097002SDavid Hildenbrand /*
10974b097002SDavid Hildenbrand * We only support onlining/offlining/adding/removing of complete
10984b097002SDavid Hildenbrand * memory blocks; therefore, either all is either early or hotplugged.
10994b097002SDavid Hildenbrand */
11004b097002SDavid Hildenbrand if (early_section(__pfn_to_section(page_to_pfn(page))))
11014b097002SDavid Hildenbrand zone->present_early_pages += nr_pages;
1102f9901144SDavid Hildenbrand zone->present_pages += nr_pages;
1103f9901144SDavid Hildenbrand zone->zone_pgdat->node_present_pages += nr_pages;
1104836809ecSDavid Hildenbrand
1105836809ecSDavid Hildenbrand if (group && movable)
1106836809ecSDavid Hildenbrand group->present_movable_pages += nr_pages;
1107836809ecSDavid Hildenbrand else if (group && !movable)
1108836809ecSDavid Hildenbrand group->present_kernel_pages += nr_pages;
1109f9901144SDavid Hildenbrand }
1110f9901144SDavid Hildenbrand
mhp_init_memmap_on_memory(unsigned long pfn,unsigned long nr_pages,struct zone * zone,bool mhp_off_inaccessible)1111a08a2ae3SOscar Salvador int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages,
1112c5f1e2d1SSumanth Korikkar struct zone *zone, bool mhp_off_inaccessible)
1113a08a2ae3SOscar Salvador {
1114a08a2ae3SOscar Salvador unsigned long end_pfn = pfn + nr_pages;
111566361095SMuchun Song int ret, i;
1116a08a2ae3SOscar Salvador
1117a08a2ae3SOscar Salvador ret = kasan_add_zero_shadow(__va(PFN_PHYS(pfn)), PFN_PHYS(nr_pages));
1118a08a2ae3SOscar Salvador if (ret)
1119a08a2ae3SOscar Salvador return ret;
1120a08a2ae3SOscar Salvador
1121c5f1e2d1SSumanth Korikkar /*
1122c5f1e2d1SSumanth Korikkar * Memory block is accessible at this stage and hence poison the struct
1123c5f1e2d1SSumanth Korikkar * pages now. If the memory block is accessible during memory hotplug
1124c5f1e2d1SSumanth Korikkar * addition phase, then page poisining is already performed in
1125c5f1e2d1SSumanth Korikkar * sparse_add_section().
1126c5f1e2d1SSumanth Korikkar */
1127c5f1e2d1SSumanth Korikkar if (mhp_off_inaccessible)
1128c5f1e2d1SSumanth Korikkar page_init_poison(pfn_to_page(pfn), sizeof(struct page) * nr_pages);
1129c5f1e2d1SSumanth Korikkar
1130a08a2ae3SOscar Salvador move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, MIGRATE_UNMOVABLE);
1131a08a2ae3SOscar Salvador
1132503b158fSDavid Hildenbrand for (i = 0; i < nr_pages; i++) {
1133503b158fSDavid Hildenbrand struct page *page = pfn_to_page(pfn + i);
1134503b158fSDavid Hildenbrand
1135503b158fSDavid Hildenbrand __ClearPageOffline(page);
1136503b158fSDavid Hildenbrand SetPageVmemmapSelfHosted(page);
1137503b158fSDavid Hildenbrand }
113866361095SMuchun Song
1139a08a2ae3SOscar Salvador /*
1140a08a2ae3SOscar Salvador * It might be that the vmemmap_pages fully span sections. If that is
1141a08a2ae3SOscar Salvador * the case, mark those sections online here as otherwise they will be
1142a08a2ae3SOscar Salvador * left offline.
1143a08a2ae3SOscar Salvador */
1144a08a2ae3SOscar Salvador if (nr_pages >= PAGES_PER_SECTION)
1145a08a2ae3SOscar Salvador online_mem_sections(pfn, ALIGN_DOWN(end_pfn, PAGES_PER_SECTION));
1146a08a2ae3SOscar Salvador
1147a08a2ae3SOscar Salvador return ret;
1148a08a2ae3SOscar Salvador }
1149a08a2ae3SOscar Salvador
mhp_deinit_memmap_on_memory(unsigned long pfn,unsigned long nr_pages)1150a08a2ae3SOscar Salvador void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages)
1151a08a2ae3SOscar Salvador {
1152a08a2ae3SOscar Salvador unsigned long end_pfn = pfn + nr_pages;
1153a08a2ae3SOscar Salvador
1154a08a2ae3SOscar Salvador /*
1155a08a2ae3SOscar Salvador * It might be that the vmemmap_pages fully span sections. If that is
1156a08a2ae3SOscar Salvador * the case, mark those sections offline here as otherwise they will be
1157a08a2ae3SOscar Salvador * left online.
1158a08a2ae3SOscar Salvador */
1159a08a2ae3SOscar Salvador if (nr_pages >= PAGES_PER_SECTION)
1160a08a2ae3SOscar Salvador offline_mem_sections(pfn, ALIGN_DOWN(end_pfn, PAGES_PER_SECTION));
1161a08a2ae3SOscar Salvador
1162a08a2ae3SOscar Salvador /*
1163a08a2ae3SOscar Salvador * The pages associated with this vmemmap have been offlined, so
1164a08a2ae3SOscar Salvador * we can reset its state here.
1165a08a2ae3SOscar Salvador */
1166a08a2ae3SOscar Salvador remove_pfn_range_from_zone(page_zone(pfn_to_page(pfn)), pfn, nr_pages);
1167a08a2ae3SOscar Salvador kasan_remove_zero_shadow(__va(PFN_PHYS(pfn)), PFN_PHYS(nr_pages));
1168a08a2ae3SOscar Salvador }
1169a08a2ae3SOscar Salvador
1170001002e7SSumanth Korikkar /*
1171001002e7SSumanth Korikkar * Must be called with mem_hotplug_lock in write mode.
1172001002e7SSumanth Korikkar */
online_pages(unsigned long pfn,unsigned long nr_pages,struct zone * zone,struct memory_group * group)1173f732e242SWei Yang int online_pages(unsigned long pfn, unsigned long nr_pages,
1174836809ecSDavid Hildenbrand struct zone *zone, struct memory_group *group)
117575884fb1SKAMEZAWA Hiroyuki {
1176aa47228aSCody P Schafer unsigned long flags;
11776811378eSYasunori Goto int need_zonelists_rebuild = 0;
1178a08a2ae3SOscar Salvador const int nid = zone_to_nid(zone);
11797b78d335SYasunori Goto int ret;
11807b78d335SYasunori Goto struct memory_notify arg;
11813947be19SDave Hansen
1182dd8e2f23SOscar Salvador /*
1183dd8e2f23SOscar Salvador * {on,off}lining is constrained to full memory sections (or more
1184041711ceSZhen Lei * precisely to memory blocks from the user space POV).
1185dd8e2f23SOscar Salvador * memmap_on_memory is an exception because it reserves initial part
1186dd8e2f23SOscar Salvador * of the physical memory space for vmemmaps. That space is pageblock
1187dd8e2f23SOscar Salvador * aligned.
1188dd8e2f23SOscar Salvador */
1189ee0913c4SKefeng Wang if (WARN_ON_ONCE(!nr_pages || !pageblock_aligned(pfn) ||
1190dd8e2f23SOscar Salvador !IS_ALIGNED(pfn + nr_pages, PAGES_PER_SECTION)))
11914986fac1SDavid Hildenbrand return -EINVAL;
11924986fac1SDavid Hildenbrand
1193381eab4aSDavid Hildenbrand
1194f1dd2cd1SMichal Hocko /* associate pfn range with the zone */
1195b30c5927SDavid Hildenbrand move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, MIGRATE_ISOLATE);
1196511c2abaSLai Jiangshan
11977b78d335SYasunori Goto arg.start_pfn = pfn;
11987b78d335SYasunori Goto arg.nr_pages = nr_pages;
1199d9713679SLai Jiangshan node_states_check_changes_online(nr_pages, zone, &arg);
12007b78d335SYasunori Goto
12017b78d335SYasunori Goto ret = memory_notify(MEM_GOING_ONLINE, &arg);
12027b78d335SYasunori Goto ret = notifier_to_errno(ret);
1203e33e33b4SChen Yucong if (ret)
1204e33e33b4SChen Yucong goto failed_addition;
1205e33e33b4SChen Yucong
12063947be19SDave Hansen /*
1207b30c5927SDavid Hildenbrand * Fixup the number of isolated pageblocks before marking the sections
1208b30c5927SDavid Hildenbrand * onlining, such that undo_isolate_page_range() works correctly.
1209b30c5927SDavid Hildenbrand */
1210b30c5927SDavid Hildenbrand spin_lock_irqsave(&zone->lock, flags);
1211b30c5927SDavid Hildenbrand zone->nr_isolate_pageblock += nr_pages / pageblock_nr_pages;
1212b30c5927SDavid Hildenbrand spin_unlock_irqrestore(&zone->lock, flags);
1213b30c5927SDavid Hildenbrand
1214b30c5927SDavid Hildenbrand /*
12156811378eSYasunori Goto * If this zone is not populated, then it is not in zonelist.
12166811378eSYasunori Goto * This means the page allocator ignores this zone.
12176811378eSYasunori Goto * So, zonelist must be updated after online.
12186811378eSYasunori Goto */
12196dcd73d7SWen Congyang if (!populated_zone(zone)) {
12206811378eSYasunori Goto need_zonelists_rebuild = 1;
122172675e13SMichal Hocko setup_zone_pageset(zone);
12226dcd73d7SWen Congyang }
12236811378eSYasunori Goto
1224aac65321SDavid Hildenbrand online_pages_range(pfn, nr_pages);
1225836809ecSDavid Hildenbrand adjust_present_page_count(pfn_to_page(pfn), group, nr_pages);
1226aa47228aSCody P Schafer
1227b30c5927SDavid Hildenbrand node_states_set_node(nid, &arg);
1228b30c5927SDavid Hildenbrand if (need_zonelists_rebuild)
1229b30c5927SDavid Hildenbrand build_all_zonelists(NULL);
1230b30c5927SDavid Hildenbrand
1231b30c5927SDavid Hildenbrand /* Basic onlining is complete, allow allocation of onlined pages. */
1232b30c5927SDavid Hildenbrand undo_isolate_page_range(pfn, pfn + nr_pages, MIGRATE_MOVABLE);
1233b30c5927SDavid Hildenbrand
123493146d98SDavid Hildenbrand /*
1235b86c5fc4SDavid Hildenbrand * Freshly onlined pages aren't shuffled (e.g., all pages are placed to
1236b86c5fc4SDavid Hildenbrand * the tail of the freelist when undoing isolation). Shuffle the whole
1237b86c5fc4SDavid Hildenbrand * zone to make sure the just onlined pages are properly distributed
1238b86c5fc4SDavid Hildenbrand * across the whole freelist - to create an initial shuffle.
123993146d98SDavid Hildenbrand */
1240e900a918SDan Williams shuffle_zone(zone);
1241e900a918SDan Williams
1242b92ca18eSMel Gorman /* reinitialise watermarks and update pcp limits */
12431b79acc9SKOSAKI Motohiro init_per_zone_wmark_min();
12441b79acc9SKOSAKI Motohiro
1245e888ca35SVlastimil Babka kswapd_run(nid);
1246698b1b30SVlastimil Babka kcompactd_run(nid);
124761b13993SDave Hansen
12482d1d43f6SChandra Seetharaman writeback_set_ratelimit();
12497b78d335SYasunori Goto
12507b78d335SYasunori Goto memory_notify(MEM_ONLINE, &arg);
125130467e0bSDavid Rientjes return 0;
1252e33e33b4SChen Yucong
1253e33e33b4SChen Yucong failed_addition:
1254e33e33b4SChen Yucong pr_debug("online_pages [mem %#010llx-%#010llx] failed\n",
1255e33e33b4SChen Yucong (unsigned long long) pfn << PAGE_SHIFT,
1256e33e33b4SChen Yucong (((unsigned long long) pfn + nr_pages) << PAGE_SHIFT) - 1);
1257e33e33b4SChen Yucong memory_notify(MEM_CANCEL_ONLINE, &arg);
1258feee6b29SDavid Hildenbrand remove_pfn_range_from_zone(zone, pfn, nr_pages);
1259e33e33b4SChen Yucong return ret;
12603947be19SDave Hansen }
1261bc02af93SYasunori Goto
1262e1319331SHidetoshi Seto /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
hotadd_init_pgdat(int nid)1263f732e242SWei Yang static pg_data_t *hotadd_init_pgdat(int nid)
12649af3c2deSYasunori Goto {
12659af3c2deSYasunori Goto struct pglist_data *pgdat;
12669af3c2deSYasunori Goto
126709f49dcaSMichal Hocko /*
126809f49dcaSMichal Hocko * NODE_DATA is preallocated (free_area_init) but its internal
126909f49dcaSMichal Hocko * state is not allocated completely. Add missing pieces.
127009f49dcaSMichal Hocko * Completely offline nodes stay around and they just need
127109f49dcaSMichal Hocko * reintialization.
127209f49dcaSMichal Hocko */
127370b5b46aSMichal Hocko pgdat = NODE_DATA(nid);
127403e85f9dSOscar Salvador
12759af3c2deSYasunori Goto /* init node's zones as empty zones, we don't have any present pages.*/
127670b5b46aSMichal Hocko free_area_init_core_hotplug(pgdat);
12779af3c2deSYasunori Goto
1278959ecc48SKAMEZAWA Hiroyuki /*
1279959ecc48SKAMEZAWA Hiroyuki * The node we allocated has no zone fallback lists. For avoiding
1280959ecc48SKAMEZAWA Hiroyuki * to access not-initialized zonelist, build here.
1281959ecc48SKAMEZAWA Hiroyuki */
128272675e13SMichal Hocko build_all_zonelists(pgdat);
1283959ecc48SKAMEZAWA Hiroyuki
12849af3c2deSYasunori Goto return pgdat;
12859af3c2deSYasunori Goto }
12869af3c2deSYasunori Goto
1287ba2d2666SMel Gorman /*
1288ba2d2666SMel Gorman * __try_online_node - online a node if offlined
1289e8b098fcSMike Rapoport * @nid: the node ID
1290b9ff0360SOscar Salvador * @set_node_online: Whether we want to online the node
1291cf23422bSminskey guo * called by cpu_up() to online a node without onlined memory.
1292b9ff0360SOscar Salvador *
1293b9ff0360SOscar Salvador * Returns:
1294b9ff0360SOscar Salvador * 1 -> a new node has been allocated
1295b9ff0360SOscar Salvador * 0 -> the node is already online
1296b9ff0360SOscar Salvador * -ENOMEM -> the node could not be allocated
1297cf23422bSminskey guo */
__try_online_node(int nid,bool set_node_online)1298c68ab18cSDavid Hildenbrand static int __try_online_node(int nid, bool set_node_online)
1299cf23422bSminskey guo {
1300cf23422bSminskey guo pg_data_t *pgdat;
1301b9ff0360SOscar Salvador int ret = 1;
1302cf23422bSminskey guo
130301b0f197SToshi Kani if (node_online(nid))
130401b0f197SToshi Kani return 0;
130501b0f197SToshi Kani
130609f49dcaSMichal Hocko pgdat = hotadd_init_pgdat(nid);
13077553e8f2SDavid Rientjes if (!pgdat) {
130801b0f197SToshi Kani pr_err("Cannot online node %d due to NULL pgdat\n", nid);
1309cf23422bSminskey guo ret = -ENOMEM;
1310cf23422bSminskey guo goto out;
1311cf23422bSminskey guo }
1312b9ff0360SOscar Salvador
1313b9ff0360SOscar Salvador if (set_node_online) {
1314cf23422bSminskey guo node_set_online(nid);
1315cf23422bSminskey guo ret = register_one_node(nid);
1316cf23422bSminskey guo BUG_ON(ret);
1317b9ff0360SOscar Salvador }
1318cf23422bSminskey guo out:
1319b9ff0360SOscar Salvador return ret;
1320b9ff0360SOscar Salvador }
1321b9ff0360SOscar Salvador
1322b9ff0360SOscar Salvador /*
1323b9ff0360SOscar Salvador * Users of this function always want to online/register the node
1324b9ff0360SOscar Salvador */
try_online_node(int nid)1325b9ff0360SOscar Salvador int try_online_node(int nid)
1326b9ff0360SOscar Salvador {
1327b9ff0360SOscar Salvador int ret;
1328b9ff0360SOscar Salvador
1329b9ff0360SOscar Salvador mem_hotplug_begin();
1330c68ab18cSDavid Hildenbrand ret = __try_online_node(nid, true);
1331bfc8c901SVladimir Davydov mem_hotplug_done();
1332cf23422bSminskey guo return ret;
1333cf23422bSminskey guo }
1334cf23422bSminskey guo
check_hotplug_memory_range(u64 start,u64 size)133527356f54SToshi Kani static int check_hotplug_memory_range(u64 start, u64 size)
133627356f54SToshi Kani {
1337ba325585SPavel Tatashin /* memory range must be block size aligned */
1338cec3ebd0SDavid Hildenbrand if (!size || !IS_ALIGNED(start, memory_block_size_bytes()) ||
1339cec3ebd0SDavid Hildenbrand !IS_ALIGNED(size, memory_block_size_bytes())) {
1340ba325585SPavel Tatashin pr_err("Block size [%#lx] unaligned hotplug range: start %#llx, size %#llx",
1341cec3ebd0SDavid Hildenbrand memory_block_size_bytes(), start, size);
134227356f54SToshi Kani return -EINVAL;
134327356f54SToshi Kani }
134427356f54SToshi Kani
134527356f54SToshi Kani return 0;
134627356f54SToshi Kani }
134727356f54SToshi Kani
online_memory_block(struct memory_block * mem,void * arg)134831bc3858SVitaly Kuznetsov static int online_memory_block(struct memory_block *mem, void *arg)
134931bc3858SVitaly Kuznetsov {
135044d46b76SGregory Price mem->online_type = mhp_get_default_online_type();
1351dc18d706SNathan Fontenot return device_online(&mem->dev);
135231bc3858SVitaly Kuznetsov }
135331bc3858SVitaly Kuznetsov
135485a2b4b0SAneesh Kumar K.V #ifndef arch_supports_memmap_on_memory
arch_supports_memmap_on_memory(unsigned long vmemmap_size)135585a2b4b0SAneesh Kumar K.V static inline bool arch_supports_memmap_on_memory(unsigned long vmemmap_size)
135685a2b4b0SAneesh Kumar K.V {
135785a2b4b0SAneesh Kumar K.V /*
135885a2b4b0SAneesh Kumar K.V * As default, we want the vmemmap to span a complete PMD such that we
135985a2b4b0SAneesh Kumar K.V * can map the vmemmap using a single PMD if supported by the
136085a2b4b0SAneesh Kumar K.V * architecture.
136185a2b4b0SAneesh Kumar K.V */
136285a2b4b0SAneesh Kumar K.V return IS_ALIGNED(vmemmap_size, PMD_SIZE);
136385a2b4b0SAneesh Kumar K.V }
136485a2b4b0SAneesh Kumar K.V #endif
136585a2b4b0SAneesh Kumar K.V
mhp_supports_memmap_on_memory(void)136642d93582SVishal Verma bool mhp_supports_memmap_on_memory(void)
1367a08a2ae3SOscar Salvador {
136885a2b4b0SAneesh Kumar K.V unsigned long vmemmap_size = memory_block_memmap_size();
13692d1f649cSAneesh Kumar K.V unsigned long memmap_pages = memory_block_memmap_on_memory_pages();
1370a08a2ae3SOscar Salvador
1371a08a2ae3SOscar Salvador /*
1372a08a2ae3SOscar Salvador * Besides having arch support and the feature enabled at runtime, we
1373a08a2ae3SOscar Salvador * need a few more assumptions to hold true:
1374a08a2ae3SOscar Salvador *
137542d93582SVishal Verma * a) The vmemmap pages span complete PMDs: We don't want vmemmap code
1376a08a2ae3SOscar Salvador * to populate memory from the altmap for unrelated parts (i.e.,
1377a08a2ae3SOscar Salvador * other memory blocks)
1378a08a2ae3SOscar Salvador *
137942d93582SVishal Verma * b) The vmemmap pages (and thereby the pages that will be exposed to
1380a08a2ae3SOscar Salvador * the buddy) have to cover full pageblocks: memory onlining/offlining
1381a08a2ae3SOscar Salvador * code requires applicable ranges to be page-aligned, for example, to
1382a08a2ae3SOscar Salvador * set the migratetypes properly.
1383a08a2ae3SOscar Salvador *
1384a08a2ae3SOscar Salvador * TODO: Although we have a check here to make sure that vmemmap pages
1385a08a2ae3SOscar Salvador * fully populate a PMD, it is not the right place to check for
1386a08a2ae3SOscar Salvador * this. A much better solution involves improving vmemmap code
1387a08a2ae3SOscar Salvador * to fallback to base pages when trying to populate vmemmap using
1388a08a2ae3SOscar Salvador * altmap as an alternative source of memory, and we do not exactly
1389a08a2ae3SOscar Salvador * populate a single PMD.
1390a08a2ae3SOscar Salvador */
139142d93582SVishal Verma if (!mhp_memmap_on_memory())
13922d1f649cSAneesh Kumar K.V return false;
13932d1f649cSAneesh Kumar K.V
13942d1f649cSAneesh Kumar K.V /*
13952d1f649cSAneesh Kumar K.V * Make sure the vmemmap allocation is fully contained
13962d1f649cSAneesh Kumar K.V * so that we always allocate vmemmap memory from altmap area.
13972d1f649cSAneesh Kumar K.V */
13982d1f649cSAneesh Kumar K.V if (!IS_ALIGNED(vmemmap_size, PAGE_SIZE))
13992d1f649cSAneesh Kumar K.V return false;
14002d1f649cSAneesh Kumar K.V
14012d1f649cSAneesh Kumar K.V /*
14022d1f649cSAneesh Kumar K.V * start pfn should be pageblock_nr_pages aligned for correctly
14032d1f649cSAneesh Kumar K.V * setting migrate types
14042d1f649cSAneesh Kumar K.V */
14052d1f649cSAneesh Kumar K.V if (!pageblock_aligned(memmap_pages))
14062d1f649cSAneesh Kumar K.V return false;
14072d1f649cSAneesh Kumar K.V
14082d1f649cSAneesh Kumar K.V if (memmap_pages == PHYS_PFN(memory_block_size_bytes()))
14092d1f649cSAneesh Kumar K.V /* No effective hotplugged memory doesn't make sense. */
14102d1f649cSAneesh Kumar K.V return false;
14112d1f649cSAneesh Kumar K.V
14122d1f649cSAneesh Kumar K.V return arch_supports_memmap_on_memory(vmemmap_size);
1413a08a2ae3SOscar Salvador }
141442d93582SVishal Verma EXPORT_SYMBOL_GPL(mhp_supports_memmap_on_memory);
1415a08a2ae3SOscar Salvador
remove_memory_blocks_and_altmaps(u64 start,u64 size)1416f732e242SWei Yang static void remove_memory_blocks_and_altmaps(u64 start, u64 size)
14176b8f0798SVishal Verma {
14186b8f0798SVishal Verma unsigned long memblock_size = memory_block_size_bytes();
14196b8f0798SVishal Verma u64 cur_start;
14206b8f0798SVishal Verma
14216b8f0798SVishal Verma /*
14226b8f0798SVishal Verma * For memmap_on_memory, the altmaps were added on a per-memblock
14236b8f0798SVishal Verma * basis; we have to process each individual memory block.
14246b8f0798SVishal Verma */
14256b8f0798SVishal Verma for (cur_start = start; cur_start < start + size;
14266b8f0798SVishal Verma cur_start += memblock_size) {
14276b8f0798SVishal Verma struct vmem_altmap *altmap = NULL;
14286b8f0798SVishal Verma struct memory_block *mem;
14296b8f0798SVishal Verma
14306b8f0798SVishal Verma mem = find_memory_block(pfn_to_section_nr(PFN_DOWN(cur_start)));
14316b8f0798SVishal Verma if (WARN_ON_ONCE(!mem))
14326b8f0798SVishal Verma continue;
14336b8f0798SVishal Verma
14346b8f0798SVishal Verma altmap = mem->altmap;
14356b8f0798SVishal Verma mem->altmap = NULL;
14366b8f0798SVishal Verma
14376b8f0798SVishal Verma remove_memory_block_devices(cur_start, memblock_size);
14386b8f0798SVishal Verma
14396b8f0798SVishal Verma arch_remove_memory(cur_start, memblock_size, altmap);
14406b8f0798SVishal Verma
14416b8f0798SVishal Verma /* Verify that all vmemmap pages have actually been freed. */
14426b8f0798SVishal Verma WARN(altmap->alloc, "Altmap not fully unmapped");
14436b8f0798SVishal Verma kfree(altmap);
14446b8f0798SVishal Verma }
14456b8f0798SVishal Verma }
14466b8f0798SVishal Verma
create_altmaps_and_memory_blocks(int nid,struct memory_group * group,u64 start,u64 size,mhp_t mhp_flags)14476b8f0798SVishal Verma static int create_altmaps_and_memory_blocks(int nid, struct memory_group *group,
1448c5f1e2d1SSumanth Korikkar u64 start, u64 size, mhp_t mhp_flags)
14496b8f0798SVishal Verma {
14506b8f0798SVishal Verma unsigned long memblock_size = memory_block_size_bytes();
14516b8f0798SVishal Verma u64 cur_start;
14526b8f0798SVishal Verma int ret;
14536b8f0798SVishal Verma
14546b8f0798SVishal Verma for (cur_start = start; cur_start < start + size;
14556b8f0798SVishal Verma cur_start += memblock_size) {
14566b8f0798SVishal Verma struct mhp_params params = { .pgprot =
14576b8f0798SVishal Verma pgprot_mhp(PAGE_KERNEL) };
14586b8f0798SVishal Verma struct vmem_altmap mhp_altmap = {
14596b8f0798SVishal Verma .base_pfn = PHYS_PFN(cur_start),
14606b8f0798SVishal Verma .end_pfn = PHYS_PFN(cur_start + memblock_size - 1),
14616b8f0798SVishal Verma };
14626b8f0798SVishal Verma
14636b8f0798SVishal Verma mhp_altmap.free = memory_block_memmap_on_memory_pages();
1464c5f1e2d1SSumanth Korikkar if (mhp_flags & MHP_OFFLINE_INACCESSIBLE)
1465c5f1e2d1SSumanth Korikkar mhp_altmap.inaccessible = true;
14666b8f0798SVishal Verma params.altmap = kmemdup(&mhp_altmap, sizeof(struct vmem_altmap),
14676b8f0798SVishal Verma GFP_KERNEL);
14686b8f0798SVishal Verma if (!params.altmap) {
14696b8f0798SVishal Verma ret = -ENOMEM;
14706b8f0798SVishal Verma goto out;
14716b8f0798SVishal Verma }
14726b8f0798SVishal Verma
14736b8f0798SVishal Verma /* call arch's memory hotadd */
14746b8f0798SVishal Verma ret = arch_add_memory(nid, cur_start, memblock_size, ¶ms);
14756b8f0798SVishal Verma if (ret < 0) {
14766b8f0798SVishal Verma kfree(params.altmap);
14776b8f0798SVishal Verma goto out;
14786b8f0798SVishal Verma }
14796b8f0798SVishal Verma
14806b8f0798SVishal Verma /* create memory block devices after memory was added */
14816b8f0798SVishal Verma ret = create_memory_block_devices(cur_start, memblock_size,
14826b8f0798SVishal Verma params.altmap, group);
14836b8f0798SVishal Verma if (ret) {
14846b8f0798SVishal Verma arch_remove_memory(cur_start, memblock_size, NULL);
14856b8f0798SVishal Verma kfree(params.altmap);
14866b8f0798SVishal Verma goto out;
14876b8f0798SVishal Verma }
14886b8f0798SVishal Verma }
14896b8f0798SVishal Verma
14906b8f0798SVishal Verma return 0;
14916b8f0798SVishal Verma out:
14926b8f0798SVishal Verma if (ret && cur_start != start)
14936b8f0798SVishal Verma remove_memory_blocks_and_altmaps(start, cur_start - start);
14946b8f0798SVishal Verma return ret;
14956b8f0798SVishal Verma }
14966b8f0798SVishal Verma
14978df1d0e4SDavid Hildenbrand /*
14988df1d0e4SDavid Hildenbrand * NOTE: The caller must call lock_device_hotplug() to serialize hotplug
14998df1d0e4SDavid Hildenbrand * and online/offline operations (triggered e.g. by sysfs).
15008df1d0e4SDavid Hildenbrand *
15018df1d0e4SDavid Hildenbrand * we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG
15028df1d0e4SDavid Hildenbrand */
add_memory_resource(int nid,struct resource * res,mhp_t mhp_flags)1503f732e242SWei Yang int add_memory_resource(int nid, struct resource *res, mhp_t mhp_flags)
1504bc02af93SYasunori Goto {
1505d15dfd31SCatalin Marinas struct mhp_params params = { .pgprot = pgprot_mhp(PAGE_KERNEL) };
150632befe9eSDavid Hildenbrand enum memblock_flags memblock_flags = MEMBLOCK_NONE;
1507028fc57aSDavid Hildenbrand struct memory_group *group = NULL;
150862cedb9fSDavid Vrabel u64 start, size;
1509b9ff0360SOscar Salvador bool new_node = false;
1510bc02af93SYasunori Goto int ret;
1511bc02af93SYasunori Goto
151262cedb9fSDavid Vrabel start = res->start;
151362cedb9fSDavid Vrabel size = resource_size(res);
151462cedb9fSDavid Vrabel
151527356f54SToshi Kani ret = check_hotplug_memory_range(start, size);
151627356f54SToshi Kani if (ret)
151727356f54SToshi Kani return ret;
151827356f54SToshi Kani
1519028fc57aSDavid Hildenbrand if (mhp_flags & MHP_NID_IS_MGID) {
1520028fc57aSDavid Hildenbrand group = memory_group_find_by_id(nid);
1521028fc57aSDavid Hildenbrand if (!group)
1522028fc57aSDavid Hildenbrand return -EINVAL;
1523028fc57aSDavid Hildenbrand nid = group->nid;
1524028fc57aSDavid Hildenbrand }
1525028fc57aSDavid Hildenbrand
1526fa6d9ec7SVishal Verma if (!node_possible(nid)) {
1527fa6d9ec7SVishal Verma WARN(1, "node %d was absent from the node_possible_map\n", nid);
1528fa6d9ec7SVishal Verma return -EINVAL;
1529fa6d9ec7SVishal Verma }
1530fa6d9ec7SVishal Verma
1531bfc8c901SVladimir Davydov mem_hotplug_begin();
1532ac13c462SNathan Zimmer
153353d38316SDavid Hildenbrand if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK)) {
153432befe9eSDavid Hildenbrand if (res->flags & IORESOURCE_SYSRAM_DRIVER_MANAGED)
153532befe9eSDavid Hildenbrand memblock_flags = MEMBLOCK_DRIVER_MANAGED;
153632befe9eSDavid Hildenbrand ret = memblock_add_node(start, size, nid, memblock_flags);
153753d38316SDavid Hildenbrand if (ret)
153853d38316SDavid Hildenbrand goto error_mem_hotplug_end;
153953d38316SDavid Hildenbrand }
15407f36e3e5STang Chen
1541c68ab18cSDavid Hildenbrand ret = __try_online_node(nid, false);
1542b9ff0360SOscar Salvador if (ret < 0)
154341b9e2d7SWen Congyang goto error;
1544b9ff0360SOscar Salvador new_node = ret;
15459af3c2deSYasunori Goto
1546a08a2ae3SOscar Salvador /*
1547a08a2ae3SOscar Salvador * Self hosted memmap array
1548a08a2ae3SOscar Salvador */
15496b8f0798SVishal Verma if ((mhp_flags & MHP_MEMMAP_ON_MEMORY) &&
155042d93582SVishal Verma mhp_supports_memmap_on_memory()) {
1551c5f1e2d1SSumanth Korikkar ret = create_altmaps_and_memory_blocks(nid, group, start, size, mhp_flags);
15526b8f0798SVishal Verma if (ret)
15531a8c64e1SAneesh Kumar K.V goto error;
15546b8f0798SVishal Verma } else {
1555f5637d3bSLogan Gunthorpe ret = arch_add_memory(nid, start, size, ¶ms);
15569af3c2deSYasunori Goto if (ret < 0)
15576b8f0798SVishal Verma goto error;
15589af3c2deSYasunori Goto
1559db051a0dSDavid Hildenbrand /* create memory block devices after memory was added */
15606b8f0798SVishal Verma ret = create_memory_block_devices(start, size, NULL, group);
1561db051a0dSDavid Hildenbrand if (ret) {
1562f42ce5f0SSumanth Korikkar arch_remove_memory(start, size, params.altmap);
15636b8f0798SVishal Verma goto error;
15646b8f0798SVishal Verma }
1565db051a0dSDavid Hildenbrand }
1566db051a0dSDavid Hildenbrand
1567a1e565aaSTang Chen if (new_node) {
1568d5b6f6a3SOscar Salvador /* If sysfs file of new node can't be created, cpu on the node
15690fc44159SYasunori Goto * can't be hot-added. There is no rollback way now.
15700fc44159SYasunori Goto * So, check by BUG_ON() to catch it reluctantly..
1571d5b6f6a3SOscar Salvador * We online node here. We can't roll back from here.
15720fc44159SYasunori Goto */
1573d5b6f6a3SOscar Salvador node_set_online(nid);
1574d5b6f6a3SOscar Salvador ret = __register_one_node(nid);
15750fc44159SYasunori Goto BUG_ON(ret);
15760fc44159SYasunori Goto }
15770fc44159SYasunori Goto
1578cc651559SDavid Hildenbrand register_memory_blocks_under_node(nid, PFN_DOWN(start),
1579cc651559SDavid Hildenbrand PFN_UP(start + size - 1),
1580f85086f9SLaurent Dufour MEMINIT_HOTPLUG);
1581d5b6f6a3SOscar Salvador
1582d96ae530S[email protected] /* create new memmap entry */
15837b7b2721SDavid Hildenbrand if (!strcmp(res->name, "System RAM"))
1584d96ae530S[email protected] firmware_map_add_hotplug(start, start + size, "System RAM");
1585d96ae530S[email protected]
1586381eab4aSDavid Hildenbrand /* device_online() will take the lock when calling online_pages() */
1587381eab4aSDavid Hildenbrand mem_hotplug_done();
1588381eab4aSDavid Hildenbrand
15899ca6551eSDavid Hildenbrand /*
15909ca6551eSDavid Hildenbrand * In case we're allowed to merge the resource, flag it and trigger
15919ca6551eSDavid Hildenbrand * merging now that adding succeeded.
15929ca6551eSDavid Hildenbrand */
159326011267SDavid Hildenbrand if (mhp_flags & MHP_MERGE_RESOURCE)
15949ca6551eSDavid Hildenbrand merge_system_ram_resource(res);
15959ca6551eSDavid Hildenbrand
159631bc3858SVitaly Kuznetsov /* online pages if requested */
159744d46b76SGregory Price if (mhp_get_default_online_type() != MMOP_OFFLINE)
1598fbcf73ceSDavid Hildenbrand walk_memory_blocks(start, size, NULL, online_memory_block);
159931bc3858SVitaly Kuznetsov
1600381eab4aSDavid Hildenbrand return ret;
16019af3c2deSYasunori Goto error:
160252219aeaSDavid Hildenbrand if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK))
16037f36e3e5STang Chen memblock_remove(start, size);
160453d38316SDavid Hildenbrand error_mem_hotplug_end:
1605bfc8c901SVladimir Davydov mem_hotplug_done();
1606bc02af93SYasunori Goto return ret;
1607bc02af93SYasunori Goto }
160862cedb9fSDavid Vrabel
16098df1d0e4SDavid Hildenbrand /* requires device_hotplug_lock, see add_memory_resource() */
__add_memory(int nid,u64 start,u64 size,mhp_t mhp_flags)1610f732e242SWei Yang int __add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags)
161162cedb9fSDavid Vrabel {
161262cedb9fSDavid Vrabel struct resource *res;
161362cedb9fSDavid Vrabel int ret;
161462cedb9fSDavid Vrabel
16157b7b2721SDavid Hildenbrand res = register_memory_resource(start, size, "System RAM");
16166f754ba4SVitaly Kuznetsov if (IS_ERR(res))
16176f754ba4SVitaly Kuznetsov return PTR_ERR(res);
161862cedb9fSDavid Vrabel
1619b6117199SDavid Hildenbrand ret = add_memory_resource(nid, res, mhp_flags);
162062cedb9fSDavid Vrabel if (ret < 0)
162162cedb9fSDavid Vrabel release_memory_resource(res);
162262cedb9fSDavid Vrabel return ret;
162362cedb9fSDavid Vrabel }
16248df1d0e4SDavid Hildenbrand
add_memory(int nid,u64 start,u64 size,mhp_t mhp_flags)1625b6117199SDavid Hildenbrand int add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags)
16268df1d0e4SDavid Hildenbrand {
16278df1d0e4SDavid Hildenbrand int rc;
16288df1d0e4SDavid Hildenbrand
16298df1d0e4SDavid Hildenbrand lock_device_hotplug();
1630b6117199SDavid Hildenbrand rc = __add_memory(nid, start, size, mhp_flags);
16318df1d0e4SDavid Hildenbrand unlock_device_hotplug();
16328df1d0e4SDavid Hildenbrand
16338df1d0e4SDavid Hildenbrand return rc;
16348df1d0e4SDavid Hildenbrand }
1635bc02af93SYasunori Goto EXPORT_SYMBOL_GPL(add_memory);
16360c0e6195SKAMEZAWA Hiroyuki
16377b7b2721SDavid Hildenbrand /*
16387b7b2721SDavid Hildenbrand * Add special, driver-managed memory to the system as system RAM. Such
16397b7b2721SDavid Hildenbrand * memory is not exposed via the raw firmware-provided memmap as system
16407b7b2721SDavid Hildenbrand * RAM, instead, it is detected and added by a driver - during cold boot,
16417b7b2721SDavid Hildenbrand * after a reboot, and after kexec.
16427b7b2721SDavid Hildenbrand *
16437b7b2721SDavid Hildenbrand * Reasons why this memory should not be used for the initial memmap of a
16447b7b2721SDavid Hildenbrand * kexec kernel or for placing kexec images:
16457b7b2721SDavid Hildenbrand * - The booting kernel is in charge of determining how this memory will be
16467b7b2721SDavid Hildenbrand * used (e.g., use persistent memory as system RAM)
16477b7b2721SDavid Hildenbrand * - Coordination with a hypervisor is required before this memory
16487b7b2721SDavid Hildenbrand * can be used (e.g., inaccessible parts).
16497b7b2721SDavid Hildenbrand *
16507b7b2721SDavid Hildenbrand * For this memory, no entries in /sys/firmware/memmap ("raw firmware-provided
16517b7b2721SDavid Hildenbrand * memory map") are created. Also, the created memory resource is flagged
16527cf603d1SDavid Hildenbrand * with IORESOURCE_SYSRAM_DRIVER_MANAGED, so in-kernel users can special-case
16537b7b2721SDavid Hildenbrand * this memory as well (esp., not place kexec images onto it).
16547b7b2721SDavid Hildenbrand *
16557b7b2721SDavid Hildenbrand * The resource_name (visible via /proc/iomem) has to have the format
16567b7b2721SDavid Hildenbrand * "System RAM ($DRIVER)".
16577b7b2721SDavid Hildenbrand */
add_memory_driver_managed(int nid,u64 start,u64 size,const char * resource_name,mhp_t mhp_flags)16587b7b2721SDavid Hildenbrand int add_memory_driver_managed(int nid, u64 start, u64 size,
1659b6117199SDavid Hildenbrand const char *resource_name, mhp_t mhp_flags)
16607b7b2721SDavid Hildenbrand {
16617b7b2721SDavid Hildenbrand struct resource *res;
16627b7b2721SDavid Hildenbrand int rc;
16637b7b2721SDavid Hildenbrand
16647b7b2721SDavid Hildenbrand if (!resource_name ||
16657b7b2721SDavid Hildenbrand strstr(resource_name, "System RAM (") != resource_name ||
16667b7b2721SDavid Hildenbrand resource_name[strlen(resource_name) - 1] != ')')
16677b7b2721SDavid Hildenbrand return -EINVAL;
16687b7b2721SDavid Hildenbrand
16697b7b2721SDavid Hildenbrand lock_device_hotplug();
16707b7b2721SDavid Hildenbrand
16717b7b2721SDavid Hildenbrand res = register_memory_resource(start, size, resource_name);
16727b7b2721SDavid Hildenbrand if (IS_ERR(res)) {
16737b7b2721SDavid Hildenbrand rc = PTR_ERR(res);
16747b7b2721SDavid Hildenbrand goto out_unlock;
16757b7b2721SDavid Hildenbrand }
16767b7b2721SDavid Hildenbrand
1677b6117199SDavid Hildenbrand rc = add_memory_resource(nid, res, mhp_flags);
16787b7b2721SDavid Hildenbrand if (rc < 0)
16797b7b2721SDavid Hildenbrand release_memory_resource(res);
16807b7b2721SDavid Hildenbrand
16817b7b2721SDavid Hildenbrand out_unlock:
16827b7b2721SDavid Hildenbrand unlock_device_hotplug();
16837b7b2721SDavid Hildenbrand return rc;
16847b7b2721SDavid Hildenbrand }
16857b7b2721SDavid Hildenbrand EXPORT_SYMBOL_GPL(add_memory_driver_managed);
16867b7b2721SDavid Hildenbrand
1687bca3feaaSAnshuman Khandual /*
1688bca3feaaSAnshuman Khandual * Platforms should define arch_get_mappable_range() that provides
1689bca3feaaSAnshuman Khandual * maximum possible addressable physical memory range for which the
1690bca3feaaSAnshuman Khandual * linear mapping could be created. The platform returned address
1691bca3feaaSAnshuman Khandual * range must adhere to these following semantics.
1692bca3feaaSAnshuman Khandual *
1693bca3feaaSAnshuman Khandual * - range.start <= range.end
1694bca3feaaSAnshuman Khandual * - Range includes both end points [range.start..range.end]
1695bca3feaaSAnshuman Khandual *
1696bca3feaaSAnshuman Khandual * There is also a fallback definition provided here, allowing the
1697bca3feaaSAnshuman Khandual * entire possible physical address range in case any platform does
1698bca3feaaSAnshuman Khandual * not define arch_get_mappable_range().
1699bca3feaaSAnshuman Khandual */
arch_get_mappable_range(void)1700bca3feaaSAnshuman Khandual struct range __weak arch_get_mappable_range(void)
1701bca3feaaSAnshuman Khandual {
1702bca3feaaSAnshuman Khandual struct range mhp_range = {
1703bca3feaaSAnshuman Khandual .start = 0UL,
1704bca3feaaSAnshuman Khandual .end = -1ULL,
1705bca3feaaSAnshuman Khandual };
1706bca3feaaSAnshuman Khandual return mhp_range;
1707bca3feaaSAnshuman Khandual }
1708bca3feaaSAnshuman Khandual
mhp_get_pluggable_range(bool need_mapping)1709bca3feaaSAnshuman Khandual struct range mhp_get_pluggable_range(bool need_mapping)
1710bca3feaaSAnshuman Khandual {
1711afe789b7SJohn Hubbard const u64 max_phys = DIRECT_MAP_PHYSMEM_END;
1712bca3feaaSAnshuman Khandual struct range mhp_range;
1713bca3feaaSAnshuman Khandual
1714bca3feaaSAnshuman Khandual if (need_mapping) {
1715bca3feaaSAnshuman Khandual mhp_range = arch_get_mappable_range();
1716bca3feaaSAnshuman Khandual if (mhp_range.start > max_phys) {
1717bca3feaaSAnshuman Khandual mhp_range.start = 0;
1718bca3feaaSAnshuman Khandual mhp_range.end = 0;
1719bca3feaaSAnshuman Khandual }
1720bca3feaaSAnshuman Khandual mhp_range.end = min_t(u64, mhp_range.end, max_phys);
1721bca3feaaSAnshuman Khandual } else {
1722bca3feaaSAnshuman Khandual mhp_range.start = 0;
1723bca3feaaSAnshuman Khandual mhp_range.end = max_phys;
1724bca3feaaSAnshuman Khandual }
1725bca3feaaSAnshuman Khandual return mhp_range;
1726bca3feaaSAnshuman Khandual }
1727bca3feaaSAnshuman Khandual EXPORT_SYMBOL_GPL(mhp_get_pluggable_range);
1728bca3feaaSAnshuman Khandual
mhp_range_allowed(u64 start,u64 size,bool need_mapping)1729bca3feaaSAnshuman Khandual bool mhp_range_allowed(u64 start, u64 size, bool need_mapping)
1730bca3feaaSAnshuman Khandual {
1731bca3feaaSAnshuman Khandual struct range mhp_range = mhp_get_pluggable_range(need_mapping);
1732bca3feaaSAnshuman Khandual u64 end = start + size;
1733bca3feaaSAnshuman Khandual
1734bca3feaaSAnshuman Khandual if (start < end && start >= mhp_range.start && (end - 1) <= mhp_range.end)
1735bca3feaaSAnshuman Khandual return true;
1736bca3feaaSAnshuman Khandual
1737bca3feaaSAnshuman Khandual pr_warn("Hotplug memory [%#llx-%#llx] exceeds maximum addressable range [%#llx-%#llx]\n",
1738bca3feaaSAnshuman Khandual start, end, mhp_range.start, mhp_range.end);
1739bca3feaaSAnshuman Khandual return false;
1740bca3feaaSAnshuman Khandual }
1741bca3feaaSAnshuman Khandual
17420c0e6195SKAMEZAWA Hiroyuki #ifdef CONFIG_MEMORY_HOTREMOVE
17430c0e6195SKAMEZAWA Hiroyuki /*
17440efadf48SYisheng Xie * Scan pfn range [start,end) to find movable/migratable pages (LRU pages,
1745aa218795SDavid Hildenbrand * non-lru movable pages and hugepages). Will skip over most unmovable
1746aa218795SDavid Hildenbrand * pages (esp., pages that can be skipped when offlining), but bail out on
1747aa218795SDavid Hildenbrand * definitely unmovable pages.
1748aa218795SDavid Hildenbrand *
1749aa218795SDavid Hildenbrand * Returns:
1750aa218795SDavid Hildenbrand * 0 in case a movable page is found and movable_pfn was updated.
1751aa218795SDavid Hildenbrand * -ENOENT in case no movable page was found.
1752aa218795SDavid Hildenbrand * -EBUSY in case a definitely unmovable page was found.
17530c0e6195SKAMEZAWA Hiroyuki */
scan_movable_pages(unsigned long start,unsigned long end,unsigned long * movable_pfn)1754aa218795SDavid Hildenbrand static int scan_movable_pages(unsigned long start, unsigned long end,
1755aa218795SDavid Hildenbrand unsigned long *movable_pfn)
17560c0e6195SKAMEZAWA Hiroyuki {
17570c0e6195SKAMEZAWA Hiroyuki unsigned long pfn;
1758eeb0efd0SOscar Salvador
17590c0e6195SKAMEZAWA Hiroyuki for (pfn = start; pfn < end; pfn++) {
176016540daeSSidhartha Kumar struct page *page;
176116540daeSSidhartha Kumar struct folio *folio;
1762eeb0efd0SOscar Salvador
1763eeb0efd0SOscar Salvador if (!pfn_valid(pfn))
1764eeb0efd0SOscar Salvador continue;
17650c0e6195SKAMEZAWA Hiroyuki page = pfn_to_page(pfn);
17660c0e6195SKAMEZAWA Hiroyuki if (PageLRU(page))
1767aa218795SDavid Hildenbrand goto found;
17680efadf48SYisheng Xie if (__PageMovable(page))
1769aa218795SDavid Hildenbrand goto found;
1770aa218795SDavid Hildenbrand
1771aa218795SDavid Hildenbrand /*
1772aa218795SDavid Hildenbrand * PageOffline() pages that are not marked __PageMovable() and
1773aa218795SDavid Hildenbrand * have a reference count > 0 (after MEM_GOING_OFFLINE) are
1774aa218795SDavid Hildenbrand * definitely unmovable. If their reference count would be 0,
1775aa218795SDavid Hildenbrand * they could at least be skipped when offlining memory.
1776aa218795SDavid Hildenbrand */
1777aa218795SDavid Hildenbrand if (PageOffline(page) && page_count(page))
1778aa218795SDavid Hildenbrand return -EBUSY;
1779eeb0efd0SOscar Salvador
1780eeb0efd0SOscar Salvador if (!PageHuge(page))
1781eeb0efd0SOscar Salvador continue;
178216540daeSSidhartha Kumar folio = page_folio(page);
17838f251a3dSMike Kravetz /*
17848f251a3dSMike Kravetz * This test is racy as we hold no reference or lock. The
17858f251a3dSMike Kravetz * hugetlb page could have been free'ed and head is no longer
17868f251a3dSMike Kravetz * a hugetlb page before the following check. In such unlikely
17878f251a3dSMike Kravetz * cases false positives and negatives are possible. Calling
17888f251a3dSMike Kravetz * code must deal with these scenarios.
17898f251a3dSMike Kravetz */
179016540daeSSidhartha Kumar if (folio_test_hugetlb_migratable(folio))
1791aa218795SDavid Hildenbrand goto found;
179216540daeSSidhartha Kumar pfn |= folio_nr_pages(folio) - 1;
17930c0e6195SKAMEZAWA Hiroyuki }
1794aa218795SDavid Hildenbrand return -ENOENT;
1795aa218795SDavid Hildenbrand found:
1796aa218795SDavid Hildenbrand *movable_pfn = pfn;
17970c0e6195SKAMEZAWA Hiroyuki return 0;
17980c0e6195SKAMEZAWA Hiroyuki }
17990c0e6195SKAMEZAWA Hiroyuki
do_migrate_range(unsigned long start_pfn,unsigned long end_pfn)180032cf666eSSeongJae Park static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
18010c0e6195SKAMEZAWA Hiroyuki {
18026f1833b8SKefeng Wang struct folio *folio;
18030c0e6195SKAMEZAWA Hiroyuki unsigned long pfn;
18040c0e6195SKAMEZAWA Hiroyuki LIST_HEAD(source);
1805786dee86SLiam Mark static DEFINE_RATELIMIT_STATE(migrate_rs, DEFAULT_RATELIMIT_INTERVAL,
1806786dee86SLiam Mark DEFAULT_RATELIMIT_BURST);
18070c0e6195SKAMEZAWA Hiroyuki
1808a85009c3SMichal Hocko for (pfn = start_pfn; pfn < end_pfn; pfn++) {
18096f1833b8SKefeng Wang struct page *page;
1810869f7ee6SMatthew Wilcox (Oracle)
18110c0e6195SKAMEZAWA Hiroyuki if (!pfn_valid(pfn))
18120c0e6195SKAMEZAWA Hiroyuki continue;
18130c0e6195SKAMEZAWA Hiroyuki page = pfn_to_page(pfn);
1814869f7ee6SMatthew Wilcox (Oracle) folio = page_folio(page);
1815c8721bbbSNaoya Horiguchi
1816773b9a6aSMa Wupeng if (!folio_try_get(folio))
1817773b9a6aSMa Wupeng continue;
1818773b9a6aSMa Wupeng
1819773b9a6aSMa Wupeng if (unlikely(page_folio(page) != folio))
1820773b9a6aSMa Wupeng goto put_folio;
1821773b9a6aSMa Wupeng
1822*9342bc13SJinjiang Tu if (folio_test_large(folio))
1823*9342bc13SJinjiang Tu pfn = folio_pfn(folio) + folio_nr_pages(folio) - 1;
1824*9342bc13SJinjiang Tu
18255f5ee52dSJinjiang Tu if (folio_contain_hwpoisoned_page(folio)) {
1826869f7ee6SMatthew Wilcox (Oracle) if (WARN_ON(folio_test_lru(folio)))
1827869f7ee6SMatthew Wilcox (Oracle) folio_isolate_lru(folio);
1828af288a42SMa Wupeng if (folio_mapped(folio)) {
1829af288a42SMa Wupeng folio_lock(folio);
1830b81679b1SMa Wupeng unmap_poisoned_folio(folio, pfn, false);
1831af288a42SMa Wupeng folio_unlock(folio);
1832af288a42SMa Wupeng }
1833b81679b1SMa Wupeng
18346f1833b8SKefeng Wang goto put_folio;
1835773b9a6aSMa Wupeng }
18366d9c285aSKOSAKI Motohiro
18376f1833b8SKefeng Wang if (!isolate_folio_to_list(folio, &source)) {
1838786dee86SLiam Mark if (__ratelimit(&migrate_rs)) {
18396f1833b8SKefeng Wang pr_warn("failed to isolate pfn %lx\n",
18406f1833b8SKefeng Wang page_to_pfn(page));
18410efadf48SYisheng Xie dump_page(page, "isolation failed");
18421723058eSOscar Salvador }
1843786dee86SLiam Mark }
18446f1833b8SKefeng Wang put_folio:
18456f1833b8SKefeng Wang folio_put(folio);
18460c0e6195SKAMEZAWA Hiroyuki }
1847f3ab2636SBob Liu if (!list_empty(&source)) {
1848203e6e5cSJoonsoo Kim nodemask_t nmask = node_states[N_MEMORY];
1849203e6e5cSJoonsoo Kim struct migration_target_control mtc = {
1850203e6e5cSJoonsoo Kim .nmask = &nmask,
1851a684d59aSDavid Hildenbrand .gfp_mask = GFP_KERNEL | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
1852e42dfe4eSBaolin Wang .reason = MR_MEMORY_HOTPLUG,
1853203e6e5cSJoonsoo Kim };
185432cf666eSSeongJae Park int ret;
1855203e6e5cSJoonsoo Kim
1856203e6e5cSJoonsoo Kim /*
1857203e6e5cSJoonsoo Kim * We have checked that migration range is on a single zone so
1858203e6e5cSJoonsoo Kim * we can use the nid of the first page to all the others.
1859203e6e5cSJoonsoo Kim */
18606f1833b8SKefeng Wang mtc.nid = folio_nid(list_first_entry(&source, struct folio, lru));
1861203e6e5cSJoonsoo Kim
1862203e6e5cSJoonsoo Kim /*
1863203e6e5cSJoonsoo Kim * try to allocate from a different node but reuse this node
1864203e6e5cSJoonsoo Kim * if there are no other online nodes to be used (e.g. we are
1865203e6e5cSJoonsoo Kim * offlining a part of the only existing node)
1866203e6e5cSJoonsoo Kim */
1867203e6e5cSJoonsoo Kim node_clear(mtc.nid, nmask);
1868203e6e5cSJoonsoo Kim if (nodes_empty(nmask))
1869203e6e5cSJoonsoo Kim node_set(mtc.nid, nmask);
1870203e6e5cSJoonsoo Kim ret = migrate_pages(&source, alloc_migration_target, NULL,
18715ac95884SYang Shi (unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_HOTPLUG, NULL);
18722932c8b0SMichal Hocko if (ret) {
18736f1833b8SKefeng Wang list_for_each_entry(folio, &source, lru) {
1874786dee86SLiam Mark if (__ratelimit(&migrate_rs)) {
1875786dee86SLiam Mark pr_warn("migrating pfn %lx failed ret:%d\n",
18766f1833b8SKefeng Wang folio_pfn(folio), ret);
18776f1833b8SKefeng Wang dump_page(&folio->page,
18786f1833b8SKefeng Wang "migration failure");
18792932c8b0SMichal Hocko }
1880786dee86SLiam Mark }
1881c8721bbbSNaoya Horiguchi putback_movable_pages(&source);
1882f3ab2636SBob Liu }
18832932c8b0SMichal Hocko }
18840c0e6195SKAMEZAWA Hiroyuki }
18850c0e6195SKAMEZAWA Hiroyuki
cmdline_parse_movable_node(char * p)1886c5320926STang Chen static int __init cmdline_parse_movable_node(char *p)
1887c5320926STang Chen {
188855ac590cSTang Chen movable_node_enabled = true;
1889c5320926STang Chen return 0;
1890c5320926STang Chen }
1891c5320926STang Chen early_param("movable_node", cmdline_parse_movable_node);
1892c5320926STang Chen
1893d9713679SLai Jiangshan /* check which state of node_states will be changed when offline memory */
node_states_check_changes_offline(unsigned long nr_pages,struct zone * zone,struct memory_notify * arg)1894d9713679SLai Jiangshan static void node_states_check_changes_offline(unsigned long nr_pages,
1895d9713679SLai Jiangshan struct zone *zone, struct memory_notify *arg)
1896d9713679SLai Jiangshan {
1897d9713679SLai Jiangshan struct pglist_data *pgdat = zone->zone_pgdat;
1898d9713679SLai Jiangshan unsigned long present_pages = 0;
189986b27beaSOscar Salvador enum zone_type zt;
1900d9713679SLai Jiangshan
190198fa15f3SAnshuman Khandual arg->status_change_nid = NUMA_NO_NODE;
190298fa15f3SAnshuman Khandual arg->status_change_nid_normal = NUMA_NO_NODE;
190386b27beaSOscar Salvador
190486b27beaSOscar Salvador /*
190586b27beaSOscar Salvador * Check whether node_states[N_NORMAL_MEMORY] will be changed.
190686b27beaSOscar Salvador * If the memory to be offline is within the range
190786b27beaSOscar Salvador * [0..ZONE_NORMAL], and it is the last present memory there,
190886b27beaSOscar Salvador * the zones in that range will become empty after the offlining,
190986b27beaSOscar Salvador * thus we can determine that we need to clear the node from
191086b27beaSOscar Salvador * node_states[N_NORMAL_MEMORY].
191186b27beaSOscar Salvador */
191286b27beaSOscar Salvador for (zt = 0; zt <= ZONE_NORMAL; zt++)
191386b27beaSOscar Salvador present_pages += pgdat->node_zones[zt].present_pages;
191486b27beaSOscar Salvador if (zone_idx(zone) <= ZONE_NORMAL && nr_pages >= present_pages)
191586b27beaSOscar Salvador arg->status_change_nid_normal = zone_to_nid(zone);
1916d9713679SLai Jiangshan
19176715ddf9SLai Jiangshan /*
19186b740c6cSDavid Hildenbrand * We have accounted the pages from [0..ZONE_NORMAL); ZONE_HIGHMEM
19196b740c6cSDavid Hildenbrand * does not apply as we don't support 32bit.
192086b27beaSOscar Salvador * Here we count the possible pages from ZONE_MOVABLE.
192186b27beaSOscar Salvador * If after having accounted all the pages, we see that the nr_pages
192286b27beaSOscar Salvador * to be offlined is over or equal to the accounted pages,
192386b27beaSOscar Salvador * we know that the node will become empty, and so, we can clear
192486b27beaSOscar Salvador * it for N_MEMORY as well.
1925d9713679SLai Jiangshan */
192686b27beaSOscar Salvador present_pages += pgdat->node_zones[ZONE_MOVABLE].present_pages;
1927d9713679SLai Jiangshan
1928d9713679SLai Jiangshan if (nr_pages >= present_pages)
1929d9713679SLai Jiangshan arg->status_change_nid = zone_to_nid(zone);
1930d9713679SLai Jiangshan }
1931d9713679SLai Jiangshan
node_states_clear_node(int node,struct memory_notify * arg)1932d9713679SLai Jiangshan static void node_states_clear_node(int node, struct memory_notify *arg)
1933d9713679SLai Jiangshan {
1934d9713679SLai Jiangshan if (arg->status_change_nid_normal >= 0)
1935d9713679SLai Jiangshan node_clear_state(node, N_NORMAL_MEMORY);
1936d9713679SLai Jiangshan
1937cf01f6f5SOscar Salvador if (arg->status_change_nid >= 0)
19386715ddf9SLai Jiangshan node_clear_state(node, N_MEMORY);
1939d9713679SLai Jiangshan }
1940d9713679SLai Jiangshan
count_system_ram_pages_cb(unsigned long start_pfn,unsigned long nr_pages,void * data)1941c5e79ef5SDavid Hildenbrand static int count_system_ram_pages_cb(unsigned long start_pfn,
1942c5e79ef5SDavid Hildenbrand unsigned long nr_pages, void *data)
1943c5e79ef5SDavid Hildenbrand {
1944c5e79ef5SDavid Hildenbrand unsigned long *nr_system_ram_pages = data;
1945c5e79ef5SDavid Hildenbrand
1946c5e79ef5SDavid Hildenbrand *nr_system_ram_pages += nr_pages;
1947c5e79ef5SDavid Hildenbrand return 0;
1948c5e79ef5SDavid Hildenbrand }
1949c5e79ef5SDavid Hildenbrand
1950001002e7SSumanth Korikkar /*
1951001002e7SSumanth Korikkar * Must be called with mem_hotplug_lock in write mode.
1952001002e7SSumanth Korikkar */
offline_pages(unsigned long start_pfn,unsigned long nr_pages,struct zone * zone,struct memory_group * group)1953f732e242SWei Yang int offline_pages(unsigned long start_pfn, unsigned long nr_pages,
1954395f6081SDavid Hildenbrand struct zone *zone, struct memory_group *group)
19550c0e6195SKAMEZAWA Hiroyuki {
195673a11c96SDavid Hildenbrand const unsigned long end_pfn = start_pfn + nr_pages;
195750625744SDavid Hildenbrand unsigned long pfn, managed_pages, system_ram_pages = 0;
1958395f6081SDavid Hildenbrand const int node = zone_to_nid(zone);
1959d702909fSCody P Schafer unsigned long flags;
19607b78d335SYasunori Goto struct memory_notify arg;
196179605093SMichal Hocko char *reason;
1962395f6081SDavid Hildenbrand int ret;
19630c0e6195SKAMEZAWA Hiroyuki
1964dd8e2f23SOscar Salvador /*
1965dd8e2f23SOscar Salvador * {on,off}lining is constrained to full memory sections (or more
1966041711ceSZhen Lei * precisely to memory blocks from the user space POV).
1967dd8e2f23SOscar Salvador * memmap_on_memory is an exception because it reserves initial part
1968dd8e2f23SOscar Salvador * of the physical memory space for vmemmaps. That space is pageblock
1969dd8e2f23SOscar Salvador * aligned.
1970dd8e2f23SOscar Salvador */
1971ee0913c4SKefeng Wang if (WARN_ON_ONCE(!nr_pages || !pageblock_aligned(start_pfn) ||
1972dd8e2f23SOscar Salvador !IS_ALIGNED(start_pfn + nr_pages, PAGES_PER_SECTION)))
19734986fac1SDavid Hildenbrand return -EINVAL;
19744986fac1SDavid Hildenbrand
1975c5e79ef5SDavid Hildenbrand /*
1976c5e79ef5SDavid Hildenbrand * Don't allow to offline memory blocks that contain holes.
1977c5e79ef5SDavid Hildenbrand * Consequently, memory blocks with holes can never get onlined
1978c5e79ef5SDavid Hildenbrand * via the hotplug path - online_pages() - as hotplugged memory has
1979503b158fSDavid Hildenbrand * no holes. This way, we don't have to worry about memory holes,
1980503b158fSDavid Hildenbrand * don't need pfn_valid() checks, and can avoid using
1981503b158fSDavid Hildenbrand * walk_system_ram_range() later.
1982c5e79ef5SDavid Hildenbrand */
198373a11c96SDavid Hildenbrand walk_system_ram_range(start_pfn, nr_pages, &system_ram_pages,
1984c5e79ef5SDavid Hildenbrand count_system_ram_pages_cb);
198573a11c96SDavid Hildenbrand if (system_ram_pages != nr_pages) {
1986c5e79ef5SDavid Hildenbrand ret = -EINVAL;
1987c5e79ef5SDavid Hildenbrand reason = "memory holes";
1988c5e79ef5SDavid Hildenbrand goto failed_removal;
1989c5e79ef5SDavid Hildenbrand }
1990c5e79ef5SDavid Hildenbrand
1991395f6081SDavid Hildenbrand /*
1992395f6081SDavid Hildenbrand * We only support offlining of memory blocks managed by a single zone,
1993395f6081SDavid Hildenbrand * checked by calling code. This is just a sanity check that we might
1994395f6081SDavid Hildenbrand * want to remove in the future.
1995395f6081SDavid Hildenbrand */
1996395f6081SDavid Hildenbrand if (WARN_ON_ONCE(page_zone(pfn_to_page(start_pfn)) != zone ||
1997395f6081SDavid Hildenbrand page_zone(pfn_to_page(end_pfn - 1)) != zone)) {
199879605093SMichal Hocko ret = -EINVAL;
199979605093SMichal Hocko reason = "multizone range";
200079605093SMichal Hocko goto failed_removal;
2001381eab4aSDavid Hildenbrand }
20027b78d335SYasunori Goto
2003ec6e8c7eSVlastimil Babka /*
2004ec6e8c7eSVlastimil Babka * Disable pcplists so that page isolation cannot race with freeing
2005ec6e8c7eSVlastimil Babka * in a way that pages from isolated pageblock are left on pcplists.
2006ec6e8c7eSVlastimil Babka */
2007ec6e8c7eSVlastimil Babka zone_pcp_disable(zone);
2008d479960eSMinchan Kim lru_cache_disable();
2009ec6e8c7eSVlastimil Babka
20100c0e6195SKAMEZAWA Hiroyuki /* set above range as isolated */
2011b023f468SWen Congyang ret = start_isolate_page_range(start_pfn, end_pfn,
2012d381c547SMichal Hocko MIGRATE_MOVABLE,
2013b9e40605SDavid Hildenbrand MEMORY_OFFLINE | REPORT_FAILURE);
20143fa0c7c7SDavid Hildenbrand if (ret) {
201579605093SMichal Hocko reason = "failure to isolate range";
2016ec6e8c7eSVlastimil Babka goto failed_removal_pcplists_disabled;
2017381eab4aSDavid Hildenbrand }
20187b78d335SYasunori Goto
20197b78d335SYasunori Goto arg.start_pfn = start_pfn;
20207b78d335SYasunori Goto arg.nr_pages = nr_pages;
2021d9713679SLai Jiangshan node_states_check_changes_offline(nr_pages, zone, &arg);
20227b78d335SYasunori Goto
20237b78d335SYasunori Goto ret = memory_notify(MEM_GOING_OFFLINE, &arg);
20247b78d335SYasunori Goto ret = notifier_to_errno(ret);
202579605093SMichal Hocko if (ret) {
202679605093SMichal Hocko reason = "notifier failure";
202779605093SMichal Hocko goto failed_removal_isolated;
202879605093SMichal Hocko }
20297b78d335SYasunori Goto
2030bb8965bdSMichal Hocko do {
2031aa218795SDavid Hildenbrand pfn = start_pfn;
2032aa218795SDavid Hildenbrand do {
2033de7cb03dSDavid Hildenbrand /*
2034de7cb03dSDavid Hildenbrand * Historically we always checked for any signal and
2035de7cb03dSDavid Hildenbrand * can't limit it to fatal signals without eventually
2036de7cb03dSDavid Hildenbrand * breaking user space.
2037de7cb03dSDavid Hildenbrand */
203879605093SMichal Hocko if (signal_pending(current)) {
2039bb8965bdSMichal Hocko ret = -EINTR;
204079605093SMichal Hocko reason = "signal backoff";
204179605093SMichal Hocko goto failed_removal_isolated;
204279605093SMichal Hocko }
204372b39cfcSMichal Hocko
20440c0e6195SKAMEZAWA Hiroyuki cond_resched();
20450c0e6195SKAMEZAWA Hiroyuki
2046aa218795SDavid Hildenbrand ret = scan_movable_pages(pfn, end_pfn, &pfn);
2047aa218795SDavid Hildenbrand if (!ret) {
2048bb8965bdSMichal Hocko /*
2049bb8965bdSMichal Hocko * TODO: fatal migration failures should bail
2050bb8965bdSMichal Hocko * out
2051bb8965bdSMichal Hocko */
2052bb8965bdSMichal Hocko do_migrate_range(pfn, end_pfn);
2053bb8965bdSMichal Hocko }
2054aa218795SDavid Hildenbrand } while (!ret);
2055aa218795SDavid Hildenbrand
2056aa218795SDavid Hildenbrand if (ret != -ENOENT) {
2057aa218795SDavid Hildenbrand reason = "unmovable page";
2058aa218795SDavid Hildenbrand goto failed_removal_isolated;
20590c0e6195SKAMEZAWA Hiroyuki }
206072b39cfcSMichal Hocko
2061c8721bbbSNaoya Horiguchi /*
2062d199483cSSidhartha Kumar * Dissolve free hugetlb folios in the memory block before doing
2063bb8965bdSMichal Hocko * offlining actually in order to make hugetlbfs's object
2064bb8965bdSMichal Hocko * counting consistent.
2065c8721bbbSNaoya Horiguchi */
2066d199483cSSidhartha Kumar ret = dissolve_free_hugetlb_folios(start_pfn, end_pfn);
206779605093SMichal Hocko if (ret) {
206879605093SMichal Hocko reason = "failure to dissolve huge pages";
206979605093SMichal Hocko goto failed_removal_isolated;
207079605093SMichal Hocko }
20710a1a9a00SDavid Hildenbrand
20720a1a9a00SDavid Hildenbrand ret = test_pages_isolated(start_pfn, end_pfn, MEMORY_OFFLINE);
2073ec6e8c7eSVlastimil Babka
20745557c766SMichal Hocko } while (ret);
2075bb8965bdSMichal Hocko
20760a1a9a00SDavid Hildenbrand /* Mark all sections offline and remove free pages from the buddy. */
207750625744SDavid Hildenbrand managed_pages = __offline_isolated_pages(start_pfn, end_pfn);
20787c33023aSLaurent Dufour pr_debug("Offlined Pages %ld\n", nr_pages);
20790a1a9a00SDavid Hildenbrand
20809b7ea46aSQian Cai /*
2081b30c5927SDavid Hildenbrand * The memory sections are marked offline, and the pageblock flags
2082b30c5927SDavid Hildenbrand * effectively stale; nobody should be touching them. Fixup the number
2083b30c5927SDavid Hildenbrand * of isolated pageblocks, memory onlining will properly revert this.
20849b7ea46aSQian Cai */
20859b7ea46aSQian Cai spin_lock_irqsave(&zone->lock, flags);
2086ea15153cSDavid Hildenbrand zone->nr_isolate_pageblock -= nr_pages / pageblock_nr_pages;
20879b7ea46aSQian Cai spin_unlock_irqrestore(&zone->lock, flags);
20889b7ea46aSQian Cai
2089d479960eSMinchan Kim lru_cache_enable();
2090ec6e8c7eSVlastimil Babka zone_pcp_enable(zone);
2091ec6e8c7eSVlastimil Babka
20920c0e6195SKAMEZAWA Hiroyuki /* removal success */
209350625744SDavid Hildenbrand adjust_managed_page_count(pfn_to_page(start_pfn), -managed_pages);
2094836809ecSDavid Hildenbrand adjust_present_page_count(pfn_to_page(start_pfn), group, -nr_pages);
20957b78d335SYasunori Goto
2096b92ca18eSMel Gorman /* reinitialise watermarks and update pcp limits */
20971b79acc9SKOSAKI Motohiro init_per_zone_wmark_min();
20981b79acc9SKOSAKI Motohiro
2099b7812c86SQi Zheng /*
2100b7812c86SQi Zheng * Make sure to mark the node as memory-less before rebuilding the zone
2101b7812c86SQi Zheng * list. Otherwise this node would still appear in the fallback lists.
2102b7812c86SQi Zheng */
2103b7812c86SQi Zheng node_states_clear_node(node, &arg);
21041e8537baSXishi Qiu if (!populated_zone(zone)) {
2105340175b7SJiang Liu zone_pcp_reset(zone);
210672675e13SMichal Hocko build_all_zonelists(NULL);
2107b92ca18eSMel Gorman }
2108340175b7SJiang Liu
2109698b1b30SVlastimil Babka if (arg.status_change_nid >= 0) {
2110698b1b30SVlastimil Babka kcompactd_stop(node);
2111b4a0215eSKefeng Wang kswapd_stop(node);
2112698b1b30SVlastimil Babka }
2113bce7394aSMinchan Kim
21140c0e6195SKAMEZAWA Hiroyuki writeback_set_ratelimit();
21157b78d335SYasunori Goto
21167b78d335SYasunori Goto memory_notify(MEM_OFFLINE, &arg);
2117feee6b29SDavid Hildenbrand remove_pfn_range_from_zone(zone, start_pfn, nr_pages);
21180c0e6195SKAMEZAWA Hiroyuki return 0;
21190c0e6195SKAMEZAWA Hiroyuki
212079605093SMichal Hocko failed_removal_isolated:
212136ba30bcSMiaohe Lin /* pushback to free area */
212279605093SMichal Hocko undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
2123c4efe484SQian Cai memory_notify(MEM_CANCEL_OFFLINE, &arg);
2124ec6e8c7eSVlastimil Babka failed_removal_pcplists_disabled:
2125946746d1SMiaohe Lin lru_cache_enable();
2126ec6e8c7eSVlastimil Babka zone_pcp_enable(zone);
21270c0e6195SKAMEZAWA Hiroyuki failed_removal:
212879605093SMichal Hocko pr_debug("memory offlining [mem %#010llx-%#010llx] failed due to %s\n",
2129a62e2f4fSBjorn Helgaas (unsigned long long) start_pfn << PAGE_SHIFT,
213079605093SMichal Hocko ((unsigned long long) end_pfn << PAGE_SHIFT) - 1,
213179605093SMichal Hocko reason);
21320c0e6195SKAMEZAWA Hiroyuki return ret;
21330c0e6195SKAMEZAWA Hiroyuki }
213471088785SBadari Pulavarty
check_memblock_offlined_cb(struct memory_block * mem,void * arg)2135d6de9d53SXishi Qiu static int check_memblock_offlined_cb(struct memory_block *mem, void *arg)
2136bbc76be6SWen Congyang {
2137e1c158e4SDavid Hildenbrand int *nid = arg;
2138bbc76be6SWen Congyang
2139e1c158e4SDavid Hildenbrand *nid = mem->nid;
2140639118d1SKefeng Wang if (unlikely(mem->state != MEM_OFFLINE)) {
2141349daa0fSRandy Dunlap phys_addr_t beginpa, endpa;
2142349daa0fSRandy Dunlap
2143349daa0fSRandy Dunlap beginpa = PFN_PHYS(section_nr_to_pfn(mem->start_section_nr));
2144b6c88d3bSDavid Hildenbrand endpa = beginpa + memory_block_size_bytes() - 1;
2145756a025fSJoe Perches pr_warn("removing memory fails, because memory [%pa-%pa] is onlined\n",
2146349daa0fSRandy Dunlap &beginpa, &endpa);
2147bbc76be6SWen Congyang
2148eca499abSPavel Tatashin return -EBUSY;
2149eca499abSPavel Tatashin }
2150eca499abSPavel Tatashin return 0;
2151bbc76be6SWen Congyang }
2152bbc76be6SWen Congyang
count_memory_range_altmaps_cb(struct memory_block * mem,void * arg)21536b8f0798SVishal Verma static int count_memory_range_altmaps_cb(struct memory_block *mem, void *arg)
2154a08a2ae3SOscar Salvador {
21556b8f0798SVishal Verma u64 *num_altmaps = (u64 *)arg;
21566b8f0798SVishal Verma
21576b8f0798SVishal Verma if (mem->altmap)
21586b8f0798SVishal Verma *num_altmaps += 1;
21596b8f0798SVishal Verma
21601a8c64e1SAneesh Kumar K.V return 0;
2161a08a2ae3SOscar Salvador }
2162a08a2ae3SOscar Salvador
check_cpu_on_node(int nid)2163b27340a5SMiaohe Lin static int check_cpu_on_node(int nid)
216460a5a19eSTang Chen {
216560a5a19eSTang Chen int cpu;
216660a5a19eSTang Chen
216760a5a19eSTang Chen for_each_present_cpu(cpu) {
2168b27340a5SMiaohe Lin if (cpu_to_node(cpu) == nid)
216960a5a19eSTang Chen /*
217060a5a19eSTang Chen * the cpu on this node isn't removed, and we can't
217160a5a19eSTang Chen * offline this node.
217260a5a19eSTang Chen */
217360a5a19eSTang Chen return -EBUSY;
217460a5a19eSTang Chen }
217560a5a19eSTang Chen
217660a5a19eSTang Chen return 0;
217760a5a19eSTang Chen }
217860a5a19eSTang Chen
check_no_memblock_for_node_cb(struct memory_block * mem,void * arg)21792c91f8fcSDavid Hildenbrand static int check_no_memblock_for_node_cb(struct memory_block *mem, void *arg)
21802c91f8fcSDavid Hildenbrand {
21812c91f8fcSDavid Hildenbrand int nid = *(int *)arg;
21822c91f8fcSDavid Hildenbrand
21832c91f8fcSDavid Hildenbrand /*
21842c91f8fcSDavid Hildenbrand * If a memory block belongs to multiple nodes, the stored nid is not
21852c91f8fcSDavid Hildenbrand * reliable. However, such blocks are always online (e.g., cannot get
21862c91f8fcSDavid Hildenbrand * offlined) and, therefore, are still spanned by the node.
21872c91f8fcSDavid Hildenbrand */
21882c91f8fcSDavid Hildenbrand return mem->nid == nid ? -EEXIST : 0;
21892c91f8fcSDavid Hildenbrand }
21902c91f8fcSDavid Hildenbrand
21910f1cfe9dSToshi Kani /**
21920f1cfe9dSToshi Kani * try_offline_node
2193e8b098fcSMike Rapoport * @nid: the node ID
21940f1cfe9dSToshi Kani *
21950f1cfe9dSToshi Kani * Offline a node if all memory sections and cpus of the node are removed.
21960f1cfe9dSToshi Kani *
21970f1cfe9dSToshi Kani * NOTE: The caller must call lock_device_hotplug() to serialize hotplug
21980f1cfe9dSToshi Kani * and online/offline operations before this call.
21990f1cfe9dSToshi Kani */
try_offline_node(int nid)220090b30cdcSWen Congyang void try_offline_node(int nid)
220160a5a19eSTang Chen {
22022c91f8fcSDavid Hildenbrand int rc;
220360a5a19eSTang Chen
220460a5a19eSTang Chen /*
22052c91f8fcSDavid Hildenbrand * If the node still spans pages (especially ZONE_DEVICE), don't
22062c91f8fcSDavid Hildenbrand * offline it. A node spans memory after move_pfn_range_to_zone(),
22072c91f8fcSDavid Hildenbrand * e.g., after the memory block was onlined.
220860a5a19eSTang Chen */
2209b27340a5SMiaohe Lin if (node_spanned_pages(nid))
221060a5a19eSTang Chen return;
22112c91f8fcSDavid Hildenbrand
22122c91f8fcSDavid Hildenbrand /*
22132c91f8fcSDavid Hildenbrand * Especially offline memory blocks might not be spanned by the
22142c91f8fcSDavid Hildenbrand * node. They will get spanned by the node once they get onlined.
22152c91f8fcSDavid Hildenbrand * However, they link to the node in sysfs and can get onlined later.
22162c91f8fcSDavid Hildenbrand */
22172c91f8fcSDavid Hildenbrand rc = for_each_memory_block(&nid, check_no_memblock_for_node_cb);
22182c91f8fcSDavid Hildenbrand if (rc)
22192c91f8fcSDavid Hildenbrand return;
222060a5a19eSTang Chen
2221b27340a5SMiaohe Lin if (check_cpu_on_node(nid))
222260a5a19eSTang Chen return;
222360a5a19eSTang Chen
222460a5a19eSTang Chen /*
222560a5a19eSTang Chen * all memory/cpu of this node are removed, we can offline this
222660a5a19eSTang Chen * node now.
222760a5a19eSTang Chen */
222860a5a19eSTang Chen node_set_offline(nid);
222960a5a19eSTang Chen unregister_one_node(nid);
223060a5a19eSTang Chen }
223190b30cdcSWen Congyang EXPORT_SYMBOL(try_offline_node);
223260a5a19eSTang Chen
memory_blocks_have_altmaps(u64 start,u64 size)22336b8f0798SVishal Verma static int memory_blocks_have_altmaps(u64 start, u64 size)
22346b8f0798SVishal Verma {
22356b8f0798SVishal Verma u64 num_memblocks = size / memory_block_size_bytes();
22366b8f0798SVishal Verma u64 num_altmaps = 0;
22376b8f0798SVishal Verma
22386b8f0798SVishal Verma if (!mhp_memmap_on_memory())
22396b8f0798SVishal Verma return 0;
22406b8f0798SVishal Verma
22416b8f0798SVishal Verma walk_memory_blocks(start, size, &num_altmaps,
22426b8f0798SVishal Verma count_memory_range_altmaps_cb);
22436b8f0798SVishal Verma
22446b8f0798SVishal Verma if (num_altmaps == 0)
22456b8f0798SVishal Verma return 0;
22466b8f0798SVishal Verma
22476b8f0798SVishal Verma if (WARN_ON_ONCE(num_memblocks != num_altmaps))
22486b8f0798SVishal Verma return -EINVAL;
22496b8f0798SVishal Verma
22506b8f0798SVishal Verma return 1;
22516b8f0798SVishal Verma }
22526b8f0798SVishal Verma
try_remove_memory(u64 start,u64 size)2253f732e242SWei Yang static int try_remove_memory(u64 start, u64 size)
2254bbc76be6SWen Congyang {
22556b8f0798SVishal Verma int rc, nid = NUMA_NO_NODE;
2256993c1aadSWen Congyang
225727356f54SToshi Kani BUG_ON(check_hotplug_memory_range(start, size));
225827356f54SToshi Kani
22596677e3eaSYasuaki Ishimatsu /*
2260242831ebSRafael J. Wysocki * All memory blocks must be offlined before removing memory. Check
2261eca499abSPavel Tatashin * whether all memory blocks in question are offline and return error
2262242831ebSRafael J. Wysocki * if this is not the case.
2263e1c158e4SDavid Hildenbrand *
2264e1c158e4SDavid Hildenbrand * While at it, determine the nid. Note that if we'd have mixed nodes,
2265e1c158e4SDavid Hildenbrand * we'd only try to offline the last determined one -- which is good
2266e1c158e4SDavid Hildenbrand * enough for the cases we care about.
22676677e3eaSYasuaki Ishimatsu */
2268e1c158e4SDavid Hildenbrand rc = walk_memory_blocks(start, size, &nid, check_memblock_offlined_cb);
2269eca499abSPavel Tatashin if (rc)
2270b4223a51SJia He return rc;
22716677e3eaSYasuaki Ishimatsu
227246c66c4bSYasuaki Ishimatsu /* remove memmap entry */
227346c66c4bSYasuaki Ishimatsu firmware_map_remove(start, start + size, "System RAM");
227446c66c4bSYasuaki Ishimatsu
22756b8f0798SVishal Verma mem_hotplug_begin();
22766b8f0798SVishal Verma
22776b8f0798SVishal Verma rc = memory_blocks_have_altmaps(start, size);
22786b8f0798SVishal Verma if (rc < 0) {
22796b8f0798SVishal Verma mem_hotplug_done();
22806b8f0798SVishal Verma return rc;
22816b8f0798SVishal Verma } else if (!rc) {
2282f1037ec0SDan Williams /*
2283f1037ec0SDan Williams * Memory block device removal under the device_hotplug_lock is
2284f1037ec0SDan Williams * a barrier against racing online attempts.
22856b8f0798SVishal Verma * No altmaps present, do the removal directly
2286f1037ec0SDan Williams */
22874c4b7f9bSDavid Hildenbrand remove_memory_block_devices(start, size);
22886b8f0798SVishal Verma arch_remove_memory(start, size, NULL);
22896b8f0798SVishal Verma } else {
22906b8f0798SVishal Verma /* all memblocks in the range have altmaps */
22916b8f0798SVishal Verma remove_memory_blocks_and_altmaps(start, size);
22921a8c64e1SAneesh Kumar K.V }
22931a8c64e1SAneesh Kumar K.V
22947b09fa7eSJonathan Cameron if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK))
229532d1fe8fSAnshuman Khandual memblock_remove(start, size);
229652219aeaSDavid Hildenbrand
2297cb8e3c8bSDavid Hildenbrand release_mem_region_adjustable(start, size);
229824d335caSWen Congyang
2299e1c158e4SDavid Hildenbrand if (nid != NUMA_NO_NODE)
230060a5a19eSTang Chen try_offline_node(nid);
230160a5a19eSTang Chen
2302bfc8c901SVladimir Davydov mem_hotplug_done();
2303b4223a51SJia He return 0;
230471088785SBadari Pulavarty }
2305d15e5926SDavid Hildenbrand
2306eca499abSPavel Tatashin /**
23075640c9caSMel Gorman * __remove_memory - Remove memory if every memory block is offline
2308eca499abSPavel Tatashin * @start: physical address of the region to remove
2309eca499abSPavel Tatashin * @size: size of the region to remove
2310eca499abSPavel Tatashin *
2311eca499abSPavel Tatashin * NOTE: The caller must call lock_device_hotplug() to serialize hotplug
2312eca499abSPavel Tatashin * and online/offline operations before this call, as required by
2313eca499abSPavel Tatashin * try_offline_node().
2314eca499abSPavel Tatashin */
__remove_memory(u64 start,u64 size)2315e1c158e4SDavid Hildenbrand void __remove_memory(u64 start, u64 size)
2316d15e5926SDavid Hildenbrand {
2317eca499abSPavel Tatashin
2318eca499abSPavel Tatashin /*
231929a90db9SSouptick Joarder * trigger BUG() if some memory is not offlined prior to calling this
2320eca499abSPavel Tatashin * function
2321eca499abSPavel Tatashin */
2322e1c158e4SDavid Hildenbrand if (try_remove_memory(start, size))
2323eca499abSPavel Tatashin BUG();
2324eca499abSPavel Tatashin }
2325eca499abSPavel Tatashin
2326eca499abSPavel Tatashin /*
2327eca499abSPavel Tatashin * Remove memory if every memory block is offline, otherwise return -EBUSY is
2328eca499abSPavel Tatashin * some memory is not offline
2329eca499abSPavel Tatashin */
remove_memory(u64 start,u64 size)2330e1c158e4SDavid Hildenbrand int remove_memory(u64 start, u64 size)
2331eca499abSPavel Tatashin {
2332eca499abSPavel Tatashin int rc;
2333eca499abSPavel Tatashin
2334d15e5926SDavid Hildenbrand lock_device_hotplug();
2335e1c158e4SDavid Hildenbrand rc = try_remove_memory(start, size);
2336d15e5926SDavid Hildenbrand unlock_device_hotplug();
2337eca499abSPavel Tatashin
2338eca499abSPavel Tatashin return rc;
2339d15e5926SDavid Hildenbrand }
234071088785SBadari Pulavarty EXPORT_SYMBOL_GPL(remove_memory);
234108b3acd7SDavid Hildenbrand
try_offline_memory_block(struct memory_block * mem,void * arg)23428dc4bb58SDavid Hildenbrand static int try_offline_memory_block(struct memory_block *mem, void *arg)
23438dc4bb58SDavid Hildenbrand {
23448dc4bb58SDavid Hildenbrand uint8_t online_type = MMOP_ONLINE_KERNEL;
23458dc4bb58SDavid Hildenbrand uint8_t **online_types = arg;
23468dc4bb58SDavid Hildenbrand struct page *page;
23478dc4bb58SDavid Hildenbrand int rc;
23488dc4bb58SDavid Hildenbrand
234908b3acd7SDavid Hildenbrand /*
23508dc4bb58SDavid Hildenbrand * Sense the online_type via the zone of the memory block. Offlining
23518dc4bb58SDavid Hildenbrand * with multiple zones within one memory block will be rejected
23528dc4bb58SDavid Hildenbrand * by offlining code ... so we don't care about that.
23538dc4bb58SDavid Hildenbrand */
23548dc4bb58SDavid Hildenbrand page = pfn_to_online_page(section_nr_to_pfn(mem->start_section_nr));
23558dc4bb58SDavid Hildenbrand if (page && zone_idx(page_zone(page)) == ZONE_MOVABLE)
23568dc4bb58SDavid Hildenbrand online_type = MMOP_ONLINE_MOVABLE;
23578dc4bb58SDavid Hildenbrand
23588dc4bb58SDavid Hildenbrand rc = device_offline(&mem->dev);
23598dc4bb58SDavid Hildenbrand /*
23608dc4bb58SDavid Hildenbrand * Default is MMOP_OFFLINE - change it only if offlining succeeded,
23618dc4bb58SDavid Hildenbrand * so try_reonline_memory_block() can do the right thing.
23628dc4bb58SDavid Hildenbrand */
23638dc4bb58SDavid Hildenbrand if (!rc)
23648dc4bb58SDavid Hildenbrand **online_types = online_type;
23658dc4bb58SDavid Hildenbrand
23668dc4bb58SDavid Hildenbrand (*online_types)++;
23678dc4bb58SDavid Hildenbrand /* Ignore if already offline. */
23688dc4bb58SDavid Hildenbrand return rc < 0 ? rc : 0;
23698dc4bb58SDavid Hildenbrand }
23708dc4bb58SDavid Hildenbrand
try_reonline_memory_block(struct memory_block * mem,void * arg)23718dc4bb58SDavid Hildenbrand static int try_reonline_memory_block(struct memory_block *mem, void *arg)
23728dc4bb58SDavid Hildenbrand {
23738dc4bb58SDavid Hildenbrand uint8_t **online_types = arg;
23748dc4bb58SDavid Hildenbrand int rc;
23758dc4bb58SDavid Hildenbrand
23768dc4bb58SDavid Hildenbrand if (**online_types != MMOP_OFFLINE) {
23778dc4bb58SDavid Hildenbrand mem->online_type = **online_types;
23788dc4bb58SDavid Hildenbrand rc = device_online(&mem->dev);
23798dc4bb58SDavid Hildenbrand if (rc < 0)
23808dc4bb58SDavid Hildenbrand pr_warn("%s: Failed to re-online memory: %d",
23818dc4bb58SDavid Hildenbrand __func__, rc);
23828dc4bb58SDavid Hildenbrand }
23838dc4bb58SDavid Hildenbrand
23848dc4bb58SDavid Hildenbrand /* Continue processing all remaining memory blocks. */
23858dc4bb58SDavid Hildenbrand (*online_types)++;
23868dc4bb58SDavid Hildenbrand return 0;
23878dc4bb58SDavid Hildenbrand }
23888dc4bb58SDavid Hildenbrand
23898dc4bb58SDavid Hildenbrand /*
23908dc4bb58SDavid Hildenbrand * Try to offline and remove memory. Might take a long time to finish in case
23918dc4bb58SDavid Hildenbrand * memory is still in use. Primarily useful for memory devices that logically
23928dc4bb58SDavid Hildenbrand * unplugged all memory (so it's no longer in use) and want to offline + remove
23938dc4bb58SDavid Hildenbrand * that memory.
239408b3acd7SDavid Hildenbrand */
offline_and_remove_memory(u64 start,u64 size)2395e1c158e4SDavid Hildenbrand int offline_and_remove_memory(u64 start, u64 size)
239608b3acd7SDavid Hildenbrand {
23978dc4bb58SDavid Hildenbrand const unsigned long mb_count = size / memory_block_size_bytes();
23988dc4bb58SDavid Hildenbrand uint8_t *online_types, *tmp;
23998dc4bb58SDavid Hildenbrand int rc;
240008b3acd7SDavid Hildenbrand
240108b3acd7SDavid Hildenbrand if (!IS_ALIGNED(start, memory_block_size_bytes()) ||
24028dc4bb58SDavid Hildenbrand !IS_ALIGNED(size, memory_block_size_bytes()) || !size)
24038dc4bb58SDavid Hildenbrand return -EINVAL;
240408b3acd7SDavid Hildenbrand
240508b3acd7SDavid Hildenbrand /*
24068dc4bb58SDavid Hildenbrand * We'll remember the old online type of each memory block, so we can
24078dc4bb58SDavid Hildenbrand * try to revert whatever we did when offlining one memory block fails
24088dc4bb58SDavid Hildenbrand * after offlining some others succeeded.
24098dc4bb58SDavid Hildenbrand */
24108dc4bb58SDavid Hildenbrand online_types = kmalloc_array(mb_count, sizeof(*online_types),
24118dc4bb58SDavid Hildenbrand GFP_KERNEL);
24128dc4bb58SDavid Hildenbrand if (!online_types)
24138dc4bb58SDavid Hildenbrand return -ENOMEM;
24148dc4bb58SDavid Hildenbrand /*
24158dc4bb58SDavid Hildenbrand * Initialize all states to MMOP_OFFLINE, so when we abort processing in
24168dc4bb58SDavid Hildenbrand * try_offline_memory_block(), we'll skip all unprocessed blocks in
24178dc4bb58SDavid Hildenbrand * try_reonline_memory_block().
24188dc4bb58SDavid Hildenbrand */
24198dc4bb58SDavid Hildenbrand memset(online_types, MMOP_OFFLINE, mb_count);
24208dc4bb58SDavid Hildenbrand
24218dc4bb58SDavid Hildenbrand lock_device_hotplug();
24228dc4bb58SDavid Hildenbrand
24238dc4bb58SDavid Hildenbrand tmp = online_types;
24248dc4bb58SDavid Hildenbrand rc = walk_memory_blocks(start, size, &tmp, try_offline_memory_block);
24258dc4bb58SDavid Hildenbrand
24268dc4bb58SDavid Hildenbrand /*
24278dc4bb58SDavid Hildenbrand * In case we succeeded to offline all memory, remove it.
242808b3acd7SDavid Hildenbrand * This cannot fail as it cannot get onlined in the meantime.
242908b3acd7SDavid Hildenbrand */
243008b3acd7SDavid Hildenbrand if (!rc) {
2431e1c158e4SDavid Hildenbrand rc = try_remove_memory(start, size);
24328dc4bb58SDavid Hildenbrand if (rc)
24338dc4bb58SDavid Hildenbrand pr_err("%s: Failed to remove memory: %d", __func__, rc);
24348dc4bb58SDavid Hildenbrand }
24358dc4bb58SDavid Hildenbrand
24368dc4bb58SDavid Hildenbrand /*
24378dc4bb58SDavid Hildenbrand * Rollback what we did. While memory onlining might theoretically fail
24388dc4bb58SDavid Hildenbrand * (nacked by a notifier), it barely ever happens.
24398dc4bb58SDavid Hildenbrand */
24408dc4bb58SDavid Hildenbrand if (rc) {
24418dc4bb58SDavid Hildenbrand tmp = online_types;
24428dc4bb58SDavid Hildenbrand walk_memory_blocks(start, size, &tmp,
24438dc4bb58SDavid Hildenbrand try_reonline_memory_block);
244408b3acd7SDavid Hildenbrand }
244508b3acd7SDavid Hildenbrand unlock_device_hotplug();
244608b3acd7SDavid Hildenbrand
24478dc4bb58SDavid Hildenbrand kfree(online_types);
244808b3acd7SDavid Hildenbrand return rc;
244908b3acd7SDavid Hildenbrand }
245008b3acd7SDavid Hildenbrand EXPORT_SYMBOL_GPL(offline_and_remove_memory);
2451aba6efc4SRafael J. Wysocki #endif /* CONFIG_MEMORY_HOTREMOVE */
2452