1399145f9SAnshuman Khandual // SPDX-License-Identifier: GPL-2.0-only
2399145f9SAnshuman Khandual /*
3399145f9SAnshuman Khandual * This kernel test validates architecture page table helpers and
4399145f9SAnshuman Khandual * accessors and helps in verifying their continued compliance with
5399145f9SAnshuman Khandual * expected generic MM semantics.
6399145f9SAnshuman Khandual *
7399145f9SAnshuman Khandual * Copyright (C) 2019 ARM Ltd.
8399145f9SAnshuman Khandual *
9399145f9SAnshuman Khandual * Author: Anshuman Khandual <[email protected]>
10399145f9SAnshuman Khandual */
116315df41SAnshuman Khandual #define pr_fmt(fmt) "debug_vm_pgtable: [%-25s]: " fmt, __func__
12399145f9SAnshuman Khandual
13399145f9SAnshuman Khandual #include <linux/gfp.h>
14399145f9SAnshuman Khandual #include <linux/highmem.h>
15399145f9SAnshuman Khandual #include <linux/hugetlb.h>
16399145f9SAnshuman Khandual #include <linux/kernel.h>
17399145f9SAnshuman Khandual #include <linux/kconfig.h>
18c4876ff6SFrank van der Linden #include <linux/memblock.h>
19399145f9SAnshuman Khandual #include <linux/mm.h>
20399145f9SAnshuman Khandual #include <linux/mman.h>
21399145f9SAnshuman Khandual #include <linux/mm_types.h>
22399145f9SAnshuman Khandual #include <linux/module.h>
23399145f9SAnshuman Khandual #include <linux/pfn_t.h>
24399145f9SAnshuman Khandual #include <linux/printk.h>
25a5c3b9ffSAnshuman Khandual #include <linux/pgtable.h>
26399145f9SAnshuman Khandual #include <linux/random.h>
27399145f9SAnshuman Khandual #include <linux/spinlock.h>
28399145f9SAnshuman Khandual #include <linux/swap.h>
29399145f9SAnshuman Khandual #include <linux/swapops.h>
30399145f9SAnshuman Khandual #include <linux/start_kernel.h>
31399145f9SAnshuman Khandual #include <linux/sched/mm.h>
3285a14463SAneesh Kumar K.V #include <linux/io.h>
330069455bSKent Overstreet #include <linux/vmalloc.h>
348c5b3a8aSGavin Shan
358c5b3a8aSGavin Shan #include <asm/cacheflush.h>
36399145f9SAnshuman Khandual #include <asm/pgalloc.h>
37a5c3b9ffSAnshuman Khandual #include <asm/tlbflush.h>
38399145f9SAnshuman Khandual
39b1d00007SAnshuman Khandual /*
40ee65728eSMike Rapoport * Please refer Documentation/mm/arch_pgtable_helpers.rst for the semantics
41b1d00007SAnshuman Khandual * expectations that are being validated here. All future changes in here
42b1d00007SAnshuman Khandual * or the documentation need to be in sync.
43399145f9SAnshuman Khandual */
44399145f9SAnshuman Khandual #define RANDOM_NZVALUE GENMASK(7, 0)
45399145f9SAnshuman Khandual
463c9b84f0SGavin Shan struct pgtable_debug_args {
473c9b84f0SGavin Shan struct mm_struct *mm;
483c9b84f0SGavin Shan struct vm_area_struct *vma;
493c9b84f0SGavin Shan
503c9b84f0SGavin Shan pgd_t *pgdp;
513c9b84f0SGavin Shan p4d_t *p4dp;
523c9b84f0SGavin Shan pud_t *pudp;
533c9b84f0SGavin Shan pmd_t *pmdp;
543c9b84f0SGavin Shan pte_t *ptep;
553c9b84f0SGavin Shan
563c9b84f0SGavin Shan p4d_t *start_p4dp;
573c9b84f0SGavin Shan pud_t *start_pudp;
583c9b84f0SGavin Shan pmd_t *start_pmdp;
593c9b84f0SGavin Shan pgtable_t start_ptep;
603c9b84f0SGavin Shan
613c9b84f0SGavin Shan unsigned long vaddr;
623c9b84f0SGavin Shan pgprot_t page_prot;
633c9b84f0SGavin Shan pgprot_t page_prot_none;
643c9b84f0SGavin Shan
653c9b84f0SGavin Shan bool is_contiguous_page;
663c9b84f0SGavin Shan unsigned long pud_pfn;
673c9b84f0SGavin Shan unsigned long pmd_pfn;
683c9b84f0SGavin Shan unsigned long pte_pfn;
693c9b84f0SGavin Shan
70c4876ff6SFrank van der Linden unsigned long fixed_alignment;
713c9b84f0SGavin Shan unsigned long fixed_pgd_pfn;
723c9b84f0SGavin Shan unsigned long fixed_p4d_pfn;
733c9b84f0SGavin Shan unsigned long fixed_pud_pfn;
743c9b84f0SGavin Shan unsigned long fixed_pmd_pfn;
753c9b84f0SGavin Shan unsigned long fixed_pte_pfn;
763c9b84f0SGavin Shan };
773c9b84f0SGavin Shan
pte_basic_tests(struct pgtable_debug_args * args,int idx)7836b77d1eSGavin Shan static void __init pte_basic_tests(struct pgtable_debug_args *args, int idx)
79399145f9SAnshuman Khandual {
8031d17076SAnshuman Khandual pgprot_t prot = vm_get_page_prot(idx);
8136b77d1eSGavin Shan pte_t pte = pfn_pte(args->fixed_pte_pfn, prot);
822e326c07SAnshuman Khandual unsigned long val = idx, *ptr = &val;
83399145f9SAnshuman Khandual
842e326c07SAnshuman Khandual pr_debug("Validating PTE basic (%pGv)\n", ptr);
85bb5c47ceSAnshuman Khandual
86bb5c47ceSAnshuman Khandual /*
87bb5c47ceSAnshuman Khandual * This test needs to be executed after the given page table entry
8831d17076SAnshuman Khandual * is created with pfn_pte() to make sure that vm_get_page_prot(idx)
89bb5c47ceSAnshuman Khandual * does not have the dirty bit enabled from the beginning. This is
90bb5c47ceSAnshuman Khandual * important for platforms like arm64 where (!PTE_RDONLY) indicate
91bb5c47ceSAnshuman Khandual * dirty bit being set.
92bb5c47ceSAnshuman Khandual */
93bb5c47ceSAnshuman Khandual WARN_ON(pte_dirty(pte_wrprotect(pte)));
94bb5c47ceSAnshuman Khandual
95399145f9SAnshuman Khandual WARN_ON(!pte_same(pte, pte));
96399145f9SAnshuman Khandual WARN_ON(!pte_young(pte_mkyoung(pte_mkold(pte))));
97399145f9SAnshuman Khandual WARN_ON(!pte_dirty(pte_mkdirty(pte_mkclean(pte))));
98161e393cSRick Edgecombe WARN_ON(!pte_write(pte_mkwrite(pte_wrprotect(pte), args->vma)));
99399145f9SAnshuman Khandual WARN_ON(pte_young(pte_mkold(pte_mkyoung(pte))));
100399145f9SAnshuman Khandual WARN_ON(pte_dirty(pte_mkclean(pte_mkdirty(pte))));
101161e393cSRick Edgecombe WARN_ON(pte_write(pte_wrprotect(pte_mkwrite(pte, args->vma))));
102bb5c47ceSAnshuman Khandual WARN_ON(pte_dirty(pte_wrprotect(pte_mkclean(pte))));
103bb5c47ceSAnshuman Khandual WARN_ON(!pte_dirty(pte_wrprotect(pte_mkdirty(pte))));
104399145f9SAnshuman Khandual }
105399145f9SAnshuman Khandual
pte_advanced_tests(struct pgtable_debug_args * args)10644966c44SGavin Shan static void __init pte_advanced_tests(struct pgtable_debug_args *args)
107a5c3b9ffSAnshuman Khandual {
1088c5b3a8aSGavin Shan struct page *page;
109b593b90dSShixin Liu pte_t pte;
110a5c3b9ffSAnshuman Khandual
111c3824e18SAneesh Kumar K.V /*
112c3824e18SAneesh Kumar K.V * Architectures optimize set_pte_at by avoiding TLB flush.
113c3824e18SAneesh Kumar K.V * This requires set_pte_at to be not used to update an
114c3824e18SAneesh Kumar K.V * existing pte entry. Clear pte before we do set_pte_at
1158c5b3a8aSGavin Shan *
1168c5b3a8aSGavin Shan * flush_dcache_page() is called after set_pte_at() to clear
1178c5b3a8aSGavin Shan * PG_arch_1 for the page on ARM64. The page flag isn't cleared
1188c5b3a8aSGavin Shan * when it's released and page allocation check will fail when
1198c5b3a8aSGavin Shan * the page is allocated again. For architectures other than ARM64,
1208c5b3a8aSGavin Shan * the unexpected overhead of cache flushing is acceptable.
121c3824e18SAneesh Kumar K.V */
1228c5b3a8aSGavin Shan page = (args->pte_pfn != ULONG_MAX) ? pfn_to_page(args->pte_pfn) : NULL;
1238c5b3a8aSGavin Shan if (!page)
12444966c44SGavin Shan return;
125c3824e18SAneesh Kumar K.V
1266315df41SAnshuman Khandual pr_debug("Validating PTE advanced\n");
1279f2bad09SHugh Dickins if (WARN_ON(!args->ptep))
1289f2bad09SHugh Dickins return;
1299f2bad09SHugh Dickins
13044966c44SGavin Shan pte = pfn_pte(args->pte_pfn, args->page_prot);
13144966c44SGavin Shan set_pte_at(args->mm, args->vaddr, args->ptep, pte);
1328c5b3a8aSGavin Shan flush_dcache_page(page);
13344966c44SGavin Shan ptep_set_wrprotect(args->mm, args->vaddr, args->ptep);
13444966c44SGavin Shan pte = ptep_get(args->ptep);
135a5c3b9ffSAnshuman Khandual WARN_ON(pte_write(pte));
13644966c44SGavin Shan ptep_get_and_clear(args->mm, args->vaddr, args->ptep);
13744966c44SGavin Shan pte = ptep_get(args->ptep);
138a5c3b9ffSAnshuman Khandual WARN_ON(!pte_none(pte));
139a5c3b9ffSAnshuman Khandual
14044966c44SGavin Shan pte = pfn_pte(args->pte_pfn, args->page_prot);
141a5c3b9ffSAnshuman Khandual pte = pte_wrprotect(pte);
142a5c3b9ffSAnshuman Khandual pte = pte_mkclean(pte);
14344966c44SGavin Shan set_pte_at(args->mm, args->vaddr, args->ptep, pte);
1448c5b3a8aSGavin Shan flush_dcache_page(page);
145161e393cSRick Edgecombe pte = pte_mkwrite(pte, args->vma);
146a5c3b9ffSAnshuman Khandual pte = pte_mkdirty(pte);
14744966c44SGavin Shan ptep_set_access_flags(args->vma, args->vaddr, args->ptep, pte, 1);
14844966c44SGavin Shan pte = ptep_get(args->ptep);
149a5c3b9ffSAnshuman Khandual WARN_ON(!(pte_write(pte) && pte_dirty(pte)));
15044966c44SGavin Shan ptep_get_and_clear_full(args->mm, args->vaddr, args->ptep, 1);
15144966c44SGavin Shan pte = ptep_get(args->ptep);
152a5c3b9ffSAnshuman Khandual WARN_ON(!pte_none(pte));
153a5c3b9ffSAnshuman Khandual
15444966c44SGavin Shan pte = pfn_pte(args->pte_pfn, args->page_prot);
155a5c3b9ffSAnshuman Khandual pte = pte_mkyoung(pte);
15644966c44SGavin Shan set_pte_at(args->mm, args->vaddr, args->ptep, pte);
1578c5b3a8aSGavin Shan flush_dcache_page(page);
15844966c44SGavin Shan ptep_test_and_clear_young(args->vma, args->vaddr, args->ptep);
15944966c44SGavin Shan pte = ptep_get(args->ptep);
160a5c3b9ffSAnshuman Khandual WARN_ON(pte_young(pte));
161fb5222aaSPasha Tatashin
162fb5222aaSPasha Tatashin ptep_get_and_clear_full(args->mm, args->vaddr, args->ptep, 1);
163a5c3b9ffSAnshuman Khandual }
164a5c3b9ffSAnshuman Khandual
165399145f9SAnshuman Khandual #ifdef CONFIG_TRANSPARENT_HUGEPAGE
pmd_basic_tests(struct pgtable_debug_args * args,int idx)16636b77d1eSGavin Shan static void __init pmd_basic_tests(struct pgtable_debug_args *args, int idx)
167399145f9SAnshuman Khandual {
16831d17076SAnshuman Khandual pgprot_t prot = vm_get_page_prot(idx);
1692e326c07SAnshuman Khandual unsigned long val = idx, *ptr = &val;
17065ac1a60SAnshuman Khandual pmd_t pmd;
171399145f9SAnshuman Khandual
172787d563bSAneesh Kumar K.V if (!has_transparent_hugepage())
173787d563bSAneesh Kumar K.V return;
174787d563bSAneesh Kumar K.V
1752e326c07SAnshuman Khandual pr_debug("Validating PMD basic (%pGv)\n", ptr);
17636b77d1eSGavin Shan pmd = pfn_pmd(args->fixed_pmd_pfn, prot);
177bb5c47ceSAnshuman Khandual
178bb5c47ceSAnshuman Khandual /*
179bb5c47ceSAnshuman Khandual * This test needs to be executed after the given page table entry
18031d17076SAnshuman Khandual * is created with pfn_pmd() to make sure that vm_get_page_prot(idx)
181bb5c47ceSAnshuman Khandual * does not have the dirty bit enabled from the beginning. This is
182bb5c47ceSAnshuman Khandual * important for platforms like arm64 where (!PTE_RDONLY) indicate
183bb5c47ceSAnshuman Khandual * dirty bit being set.
184bb5c47ceSAnshuman Khandual */
185bb5c47ceSAnshuman Khandual WARN_ON(pmd_dirty(pmd_wrprotect(pmd)));
186bb5c47ceSAnshuman Khandual
187bb5c47ceSAnshuman Khandual
188399145f9SAnshuman Khandual WARN_ON(!pmd_same(pmd, pmd));
189399145f9SAnshuman Khandual WARN_ON(!pmd_young(pmd_mkyoung(pmd_mkold(pmd))));
190399145f9SAnshuman Khandual WARN_ON(!pmd_dirty(pmd_mkdirty(pmd_mkclean(pmd))));
191161e393cSRick Edgecombe WARN_ON(!pmd_write(pmd_mkwrite(pmd_wrprotect(pmd), args->vma)));
192399145f9SAnshuman Khandual WARN_ON(pmd_young(pmd_mkold(pmd_mkyoung(pmd))));
193399145f9SAnshuman Khandual WARN_ON(pmd_dirty(pmd_mkclean(pmd_mkdirty(pmd))));
194161e393cSRick Edgecombe WARN_ON(pmd_write(pmd_wrprotect(pmd_mkwrite(pmd, args->vma))));
195bb5c47ceSAnshuman Khandual WARN_ON(pmd_dirty(pmd_wrprotect(pmd_mkclean(pmd))));
196bb5c47ceSAnshuman Khandual WARN_ON(!pmd_dirty(pmd_wrprotect(pmd_mkdirty(pmd))));
197399145f9SAnshuman Khandual /*
198399145f9SAnshuman Khandual * A huge page does not point to next level page table
199399145f9SAnshuman Khandual * entry. Hence this must qualify as pmd_bad().
200399145f9SAnshuman Khandual */
201399145f9SAnshuman Khandual WARN_ON(!pmd_bad(pmd_mkhuge(pmd)));
202399145f9SAnshuman Khandual }
203399145f9SAnshuman Khandual
pmd_advanced_tests(struct pgtable_debug_args * args)204c0fe07b0SGavin Shan static void __init pmd_advanced_tests(struct pgtable_debug_args *args)
205a5c3b9ffSAnshuman Khandual {
2068c5b3a8aSGavin Shan struct page *page;
20765ac1a60SAnshuman Khandual pmd_t pmd;
208c0fe07b0SGavin Shan unsigned long vaddr = args->vaddr;
209a5c3b9ffSAnshuman Khandual
210a5c3b9ffSAnshuman Khandual if (!has_transparent_hugepage())
211a5c3b9ffSAnshuman Khandual return;
212a5c3b9ffSAnshuman Khandual
2138c5b3a8aSGavin Shan page = (args->pmd_pfn != ULONG_MAX) ? pfn_to_page(args->pmd_pfn) : NULL;
2148c5b3a8aSGavin Shan if (!page)
215c0fe07b0SGavin Shan return;
216c0fe07b0SGavin Shan
2178c5b3a8aSGavin Shan /*
2188c5b3a8aSGavin Shan * flush_dcache_page() is called after set_pmd_at() to clear
2198c5b3a8aSGavin Shan * PG_arch_1 for the page on ARM64. The page flag isn't cleared
2208c5b3a8aSGavin Shan * when it's released and page allocation check will fail when
2218c5b3a8aSGavin Shan * the page is allocated again. For architectures other than ARM64,
2228c5b3a8aSGavin Shan * the unexpected overhead of cache flushing is acceptable.
2238c5b3a8aSGavin Shan */
2246315df41SAnshuman Khandual pr_debug("Validating PMD advanced\n");
225a5c3b9ffSAnshuman Khandual /* Align the address wrt HPAGE_PMD_SIZE */
22604f7ce3fSGerald Schaefer vaddr &= HPAGE_PMD_MASK;
227a5c3b9ffSAnshuman Khandual
228c0fe07b0SGavin Shan pgtable_trans_huge_deposit(args->mm, args->pmdp, args->start_ptep);
22987f34986SAneesh Kumar K.V
230c0fe07b0SGavin Shan pmd = pfn_pmd(args->pmd_pfn, args->page_prot);
231c0fe07b0SGavin Shan set_pmd_at(args->mm, vaddr, args->pmdp, pmd);
2328c5b3a8aSGavin Shan flush_dcache_page(page);
233c0fe07b0SGavin Shan pmdp_set_wrprotect(args->mm, vaddr, args->pmdp);
234*a0c9fd22SAnshuman Khandual pmd = pmdp_get(args->pmdp);
235a5c3b9ffSAnshuman Khandual WARN_ON(pmd_write(pmd));
236c0fe07b0SGavin Shan pmdp_huge_get_and_clear(args->mm, vaddr, args->pmdp);
237*a0c9fd22SAnshuman Khandual pmd = pmdp_get(args->pmdp);
238a5c3b9ffSAnshuman Khandual WARN_ON(!pmd_none(pmd));
239a5c3b9ffSAnshuman Khandual
240c0fe07b0SGavin Shan pmd = pfn_pmd(args->pmd_pfn, args->page_prot);
241a5c3b9ffSAnshuman Khandual pmd = pmd_wrprotect(pmd);
242a5c3b9ffSAnshuman Khandual pmd = pmd_mkclean(pmd);
243c0fe07b0SGavin Shan set_pmd_at(args->mm, vaddr, args->pmdp, pmd);
2448c5b3a8aSGavin Shan flush_dcache_page(page);
245161e393cSRick Edgecombe pmd = pmd_mkwrite(pmd, args->vma);
246a5c3b9ffSAnshuman Khandual pmd = pmd_mkdirty(pmd);
247c0fe07b0SGavin Shan pmdp_set_access_flags(args->vma, vaddr, args->pmdp, pmd, 1);
248*a0c9fd22SAnshuman Khandual pmd = pmdp_get(args->pmdp);
249a5c3b9ffSAnshuman Khandual WARN_ON(!(pmd_write(pmd) && pmd_dirty(pmd)));
250c0fe07b0SGavin Shan pmdp_huge_get_and_clear_full(args->vma, vaddr, args->pmdp, 1);
251*a0c9fd22SAnshuman Khandual pmd = pmdp_get(args->pmdp);
252a5c3b9ffSAnshuman Khandual WARN_ON(!pmd_none(pmd));
253a5c3b9ffSAnshuman Khandual
254c0fe07b0SGavin Shan pmd = pmd_mkhuge(pfn_pmd(args->pmd_pfn, args->page_prot));
255a5c3b9ffSAnshuman Khandual pmd = pmd_mkyoung(pmd);
256c0fe07b0SGavin Shan set_pmd_at(args->mm, vaddr, args->pmdp, pmd);
2578c5b3a8aSGavin Shan flush_dcache_page(page);
258c0fe07b0SGavin Shan pmdp_test_and_clear_young(args->vma, vaddr, args->pmdp);
259*a0c9fd22SAnshuman Khandual pmd = pmdp_get(args->pmdp);
260a5c3b9ffSAnshuman Khandual WARN_ON(pmd_young(pmd));
26187f34986SAneesh Kumar K.V
26213af0506SAneesh Kumar K.V /* Clear the pte entries */
263c0fe07b0SGavin Shan pmdp_huge_get_and_clear(args->mm, vaddr, args->pmdp);
264c0fe07b0SGavin Shan pgtable_trans_huge_withdraw(args->mm, args->pmdp);
265a5c3b9ffSAnshuman Khandual }
266a5c3b9ffSAnshuman Khandual
pmd_leaf_tests(struct pgtable_debug_args * args)2678983d231SGavin Shan static void __init pmd_leaf_tests(struct pgtable_debug_args *args)
268a5c3b9ffSAnshuman Khandual {
26965ac1a60SAnshuman Khandual pmd_t pmd;
27065ac1a60SAnshuman Khandual
27165ac1a60SAnshuman Khandual if (!has_transparent_hugepage())
27265ac1a60SAnshuman Khandual return;
273a5c3b9ffSAnshuman Khandual
2746315df41SAnshuman Khandual pr_debug("Validating PMD leaf\n");
2758983d231SGavin Shan pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot);
27665ac1a60SAnshuman Khandual
277a5c3b9ffSAnshuman Khandual /*
278a5c3b9ffSAnshuman Khandual * PMD based THP is a leaf entry.
279a5c3b9ffSAnshuman Khandual */
280a5c3b9ffSAnshuman Khandual pmd = pmd_mkhuge(pmd);
281a5c3b9ffSAnshuman Khandual WARN_ON(!pmd_leaf(pmd));
282a5c3b9ffSAnshuman Khandual }
283a5c3b9ffSAnshuman Khandual
284399145f9SAnshuman Khandual #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
pud_basic_tests(struct pgtable_debug_args * args,int idx)28536b77d1eSGavin Shan static void __init pud_basic_tests(struct pgtable_debug_args *args, int idx)
286399145f9SAnshuman Khandual {
28731d17076SAnshuman Khandual pgprot_t prot = vm_get_page_prot(idx);
2882e326c07SAnshuman Khandual unsigned long val = idx, *ptr = &val;
28965ac1a60SAnshuman Khandual pud_t pud;
290399145f9SAnshuman Khandual
291348ad160SAneesh Kumar K.V if (!has_transparent_pud_hugepage())
292787d563bSAneesh Kumar K.V return;
293787d563bSAneesh Kumar K.V
2942e326c07SAnshuman Khandual pr_debug("Validating PUD basic (%pGv)\n", ptr);
29536b77d1eSGavin Shan pud = pfn_pud(args->fixed_pud_pfn, prot);
296bb5c47ceSAnshuman Khandual
297bb5c47ceSAnshuman Khandual /*
298bb5c47ceSAnshuman Khandual * This test needs to be executed after the given page table entry
29931d17076SAnshuman Khandual * is created with pfn_pud() to make sure that vm_get_page_prot(idx)
300bb5c47ceSAnshuman Khandual * does not have the dirty bit enabled from the beginning. This is
301bb5c47ceSAnshuman Khandual * important for platforms like arm64 where (!PTE_RDONLY) indicate
302bb5c47ceSAnshuman Khandual * dirty bit being set.
303bb5c47ceSAnshuman Khandual */
304bb5c47ceSAnshuman Khandual WARN_ON(pud_dirty(pud_wrprotect(pud)));
305bb5c47ceSAnshuman Khandual
306399145f9SAnshuman Khandual WARN_ON(!pud_same(pud, pud));
307399145f9SAnshuman Khandual WARN_ON(!pud_young(pud_mkyoung(pud_mkold(pud))));
308bb5c47ceSAnshuman Khandual WARN_ON(!pud_dirty(pud_mkdirty(pud_mkclean(pud))));
309bb5c47ceSAnshuman Khandual WARN_ON(pud_dirty(pud_mkclean(pud_mkdirty(pud))));
310399145f9SAnshuman Khandual WARN_ON(!pud_write(pud_mkwrite(pud_wrprotect(pud))));
311399145f9SAnshuman Khandual WARN_ON(pud_write(pud_wrprotect(pud_mkwrite(pud))));
312399145f9SAnshuman Khandual WARN_ON(pud_young(pud_mkold(pud_mkyoung(pud))));
313bb5c47ceSAnshuman Khandual WARN_ON(pud_dirty(pud_wrprotect(pud_mkclean(pud))));
314bb5c47ceSAnshuman Khandual WARN_ON(!pud_dirty(pud_wrprotect(pud_mkdirty(pud))));
315399145f9SAnshuman Khandual
31636b77d1eSGavin Shan if (mm_pmd_folded(args->mm))
317399145f9SAnshuman Khandual return;
318399145f9SAnshuman Khandual
319399145f9SAnshuman Khandual /*
320399145f9SAnshuman Khandual * A huge page does not point to next level page table
321399145f9SAnshuman Khandual * entry. Hence this must qualify as pud_bad().
322399145f9SAnshuman Khandual */
323399145f9SAnshuman Khandual WARN_ON(!pud_bad(pud_mkhuge(pud)));
324399145f9SAnshuman Khandual }
325a5c3b9ffSAnshuman Khandual
pud_advanced_tests(struct pgtable_debug_args * args)3264cbde03bSGavin Shan static void __init pud_advanced_tests(struct pgtable_debug_args *args)
327a5c3b9ffSAnshuman Khandual {
3288c5b3a8aSGavin Shan struct page *page;
3294cbde03bSGavin Shan unsigned long vaddr = args->vaddr;
33065ac1a60SAnshuman Khandual pud_t pud;
331a5c3b9ffSAnshuman Khandual
332348ad160SAneesh Kumar K.V if (!has_transparent_pud_hugepage())
333a5c3b9ffSAnshuman Khandual return;
334a5c3b9ffSAnshuman Khandual
3358c5b3a8aSGavin Shan page = (args->pud_pfn != ULONG_MAX) ? pfn_to_page(args->pud_pfn) : NULL;
3368c5b3a8aSGavin Shan if (!page)
3374cbde03bSGavin Shan return;
3384cbde03bSGavin Shan
3398c5b3a8aSGavin Shan /*
3408c5b3a8aSGavin Shan * flush_dcache_page() is called after set_pud_at() to clear
3418c5b3a8aSGavin Shan * PG_arch_1 for the page on ARM64. The page flag isn't cleared
3428c5b3a8aSGavin Shan * when it's released and page allocation check will fail when
3438c5b3a8aSGavin Shan * the page is allocated again. For architectures other than ARM64,
3448c5b3a8aSGavin Shan * the unexpected overhead of cache flushing is acceptable.
3458c5b3a8aSGavin Shan */
3466315df41SAnshuman Khandual pr_debug("Validating PUD advanced\n");
347a5c3b9ffSAnshuman Khandual /* Align the address wrt HPAGE_PUD_SIZE */
34804f7ce3fSGerald Schaefer vaddr &= HPAGE_PUD_MASK;
349a5c3b9ffSAnshuman Khandual
3504cbde03bSGavin Shan pud = pfn_pud(args->pud_pfn, args->page_prot);
351720da1e5SAneesh Kumar K.V (IBM) /*
352720da1e5SAneesh Kumar K.V (IBM) * Some architectures have debug checks to make sure
353720da1e5SAneesh Kumar K.V (IBM) * huge pud mapping are only found with devmap entries
354720da1e5SAneesh Kumar K.V (IBM) * For now test with only devmap entries.
355720da1e5SAneesh Kumar K.V (IBM) */
356720da1e5SAneesh Kumar K.V (IBM) pud = pud_mkdevmap(pud);
3574cbde03bSGavin Shan set_pud_at(args->mm, vaddr, args->pudp, pud);
3588c5b3a8aSGavin Shan flush_dcache_page(page);
3594cbde03bSGavin Shan pudp_set_wrprotect(args->mm, vaddr, args->pudp);
360*a0c9fd22SAnshuman Khandual pud = pudp_get(args->pudp);
361a5c3b9ffSAnshuman Khandual WARN_ON(pud_write(pud));
362a5c3b9ffSAnshuman Khandual
363a5c3b9ffSAnshuman Khandual #ifndef __PAGETABLE_PMD_FOLDED
3644cbde03bSGavin Shan pudp_huge_get_and_clear(args->mm, vaddr, args->pudp);
365*a0c9fd22SAnshuman Khandual pud = pudp_get(args->pudp);
366a5c3b9ffSAnshuman Khandual WARN_ON(!pud_none(pud));
367a5c3b9ffSAnshuman Khandual #endif /* __PAGETABLE_PMD_FOLDED */
3684cbde03bSGavin Shan pud = pfn_pud(args->pud_pfn, args->page_prot);
369720da1e5SAneesh Kumar K.V (IBM) pud = pud_mkdevmap(pud);
370a5c3b9ffSAnshuman Khandual pud = pud_wrprotect(pud);
371a5c3b9ffSAnshuman Khandual pud = pud_mkclean(pud);
3724cbde03bSGavin Shan set_pud_at(args->mm, vaddr, args->pudp, pud);
3738c5b3a8aSGavin Shan flush_dcache_page(page);
374a5c3b9ffSAnshuman Khandual pud = pud_mkwrite(pud);
375a5c3b9ffSAnshuman Khandual pud = pud_mkdirty(pud);
3764cbde03bSGavin Shan pudp_set_access_flags(args->vma, vaddr, args->pudp, pud, 1);
377*a0c9fd22SAnshuman Khandual pud = pudp_get(args->pudp);
378a5c3b9ffSAnshuman Khandual WARN_ON(!(pud_write(pud) && pud_dirty(pud)));
379a5c3b9ffSAnshuman Khandual
380c3824e18SAneesh Kumar K.V #ifndef __PAGETABLE_PMD_FOLDED
381f32928abSAneesh Kumar K.V pudp_huge_get_and_clear_full(args->vma, vaddr, args->pudp, 1);
382*a0c9fd22SAnshuman Khandual pud = pudp_get(args->pudp);
383c3824e18SAneesh Kumar K.V WARN_ON(!pud_none(pud));
384c3824e18SAneesh Kumar K.V #endif /* __PAGETABLE_PMD_FOLDED */
385c3824e18SAneesh Kumar K.V
3864cbde03bSGavin Shan pud = pfn_pud(args->pud_pfn, args->page_prot);
387720da1e5SAneesh Kumar K.V (IBM) pud = pud_mkdevmap(pud);
388a5c3b9ffSAnshuman Khandual pud = pud_mkyoung(pud);
3894cbde03bSGavin Shan set_pud_at(args->mm, vaddr, args->pudp, pud);
3908c5b3a8aSGavin Shan flush_dcache_page(page);
3914cbde03bSGavin Shan pudp_test_and_clear_young(args->vma, vaddr, args->pudp);
392*a0c9fd22SAnshuman Khandual pud = pudp_get(args->pudp);
393a5c3b9ffSAnshuman Khandual WARN_ON(pud_young(pud));
39413af0506SAneesh Kumar K.V
3954cbde03bSGavin Shan pudp_huge_get_and_clear(args->mm, vaddr, args->pudp);
396a5c3b9ffSAnshuman Khandual }
397a5c3b9ffSAnshuman Khandual
pud_leaf_tests(struct pgtable_debug_args * args)3988983d231SGavin Shan static void __init pud_leaf_tests(struct pgtable_debug_args *args)
399a5c3b9ffSAnshuman Khandual {
40065ac1a60SAnshuman Khandual pud_t pud;
40165ac1a60SAnshuman Khandual
402348ad160SAneesh Kumar K.V if (!has_transparent_pud_hugepage())
40365ac1a60SAnshuman Khandual return;
404a5c3b9ffSAnshuman Khandual
4056315df41SAnshuman Khandual pr_debug("Validating PUD leaf\n");
4068983d231SGavin Shan pud = pfn_pud(args->fixed_pud_pfn, args->page_prot);
407a5c3b9ffSAnshuman Khandual /*
408a5c3b9ffSAnshuman Khandual * PUD based THP is a leaf entry.
409a5c3b9ffSAnshuman Khandual */
410a5c3b9ffSAnshuman Khandual pud = pud_mkhuge(pud);
411a5c3b9ffSAnshuman Khandual WARN_ON(!pud_leaf(pud));
412a5c3b9ffSAnshuman Khandual }
4135fe77be6SShixin Liu #else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
pud_basic_tests(struct pgtable_debug_args * args,int idx)41436b77d1eSGavin Shan static void __init pud_basic_tests(struct pgtable_debug_args *args, int idx) { }
pud_advanced_tests(struct pgtable_debug_args * args)4154cbde03bSGavin Shan static void __init pud_advanced_tests(struct pgtable_debug_args *args) { }
pud_leaf_tests(struct pgtable_debug_args * args)4168983d231SGavin Shan static void __init pud_leaf_tests(struct pgtable_debug_args *args) { }
4175fe77be6SShixin Liu #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
4185fe77be6SShixin Liu #else /* !CONFIG_TRANSPARENT_HUGEPAGE */
pmd_basic_tests(struct pgtable_debug_args * args,int idx)41936b77d1eSGavin Shan static void __init pmd_basic_tests(struct pgtable_debug_args *args, int idx) { }
pud_basic_tests(struct pgtable_debug_args * args,int idx)42036b77d1eSGavin Shan static void __init pud_basic_tests(struct pgtable_debug_args *args, int idx) { }
pmd_advanced_tests(struct pgtable_debug_args * args)421c0fe07b0SGavin Shan static void __init pmd_advanced_tests(struct pgtable_debug_args *args) { }
pud_advanced_tests(struct pgtable_debug_args * args)4224cbde03bSGavin Shan static void __init pud_advanced_tests(struct pgtable_debug_args *args) { }
pmd_leaf_tests(struct pgtable_debug_args * args)4238983d231SGavin Shan static void __init pmd_leaf_tests(struct pgtable_debug_args *args) { }
pud_leaf_tests(struct pgtable_debug_args * args)4248983d231SGavin Shan static void __init pud_leaf_tests(struct pgtable_debug_args *args) { }
4255fe77be6SShixin Liu #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
426a5c3b9ffSAnshuman Khandual
42785a14463SAneesh Kumar K.V #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
pmd_huge_tests(struct pgtable_debug_args * args)428c0fe07b0SGavin Shan static void __init pmd_huge_tests(struct pgtable_debug_args *args)
4295fe77be6SShixin Liu {
4305fe77be6SShixin Liu pmd_t pmd;
4315fe77be6SShixin Liu
432c4876ff6SFrank van der Linden if (!arch_vmap_pmd_supported(args->page_prot) ||
433c4876ff6SFrank van der Linden args->fixed_alignment < PMD_SIZE)
4345fe77be6SShixin Liu return;
4355fe77be6SShixin Liu
4365fe77be6SShixin Liu pr_debug("Validating PMD huge\n");
4375fe77be6SShixin Liu /*
4385fe77be6SShixin Liu * X86 defined pmd_set_huge() verifies that the given
4395fe77be6SShixin Liu * PMD is not a populated non-leaf entry.
4405fe77be6SShixin Liu */
441c0fe07b0SGavin Shan WRITE_ONCE(*args->pmdp, __pmd(0));
442c0fe07b0SGavin Shan WARN_ON(!pmd_set_huge(args->pmdp, __pfn_to_phys(args->fixed_pmd_pfn), args->page_prot));
443c0fe07b0SGavin Shan WARN_ON(!pmd_clear_huge(args->pmdp));
444*a0c9fd22SAnshuman Khandual pmd = pmdp_get(args->pmdp);
4455fe77be6SShixin Liu WARN_ON(!pmd_none(pmd));
4465fe77be6SShixin Liu }
4475fe77be6SShixin Liu
pud_huge_tests(struct pgtable_debug_args * args)4484cbde03bSGavin Shan static void __init pud_huge_tests(struct pgtable_debug_args *args)
449a5c3b9ffSAnshuman Khandual {
450a5c3b9ffSAnshuman Khandual pud_t pud;
451a5c3b9ffSAnshuman Khandual
452c4876ff6SFrank van der Linden if (!arch_vmap_pud_supported(args->page_prot) ||
453c4876ff6SFrank van der Linden args->fixed_alignment < PUD_SIZE)
454a5c3b9ffSAnshuman Khandual return;
4556315df41SAnshuman Khandual
4566315df41SAnshuman Khandual pr_debug("Validating PUD huge\n");
457a5c3b9ffSAnshuman Khandual /*
458a5c3b9ffSAnshuman Khandual * X86 defined pud_set_huge() verifies that the given
459a5c3b9ffSAnshuman Khandual * PUD is not a populated non-leaf entry.
460a5c3b9ffSAnshuman Khandual */
4614cbde03bSGavin Shan WRITE_ONCE(*args->pudp, __pud(0));
4624cbde03bSGavin Shan WARN_ON(!pud_set_huge(args->pudp, __pfn_to_phys(args->fixed_pud_pfn), args->page_prot));
4634cbde03bSGavin Shan WARN_ON(!pud_clear_huge(args->pudp));
464*a0c9fd22SAnshuman Khandual pud = pudp_get(args->pudp);
465a5c3b9ffSAnshuman Khandual WARN_ON(!pud_none(pud));
466a5c3b9ffSAnshuman Khandual }
46785a14463SAneesh Kumar K.V #else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
pmd_huge_tests(struct pgtable_debug_args * args)468c0fe07b0SGavin Shan static void __init pmd_huge_tests(struct pgtable_debug_args *args) { }
pud_huge_tests(struct pgtable_debug_args * args)4694cbde03bSGavin Shan static void __init pud_huge_tests(struct pgtable_debug_args *args) { }
4705fe77be6SShixin Liu #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
471399145f9SAnshuman Khandual
p4d_basic_tests(struct pgtable_debug_args * args)47236b77d1eSGavin Shan static void __init p4d_basic_tests(struct pgtable_debug_args *args)
473399145f9SAnshuman Khandual {
474399145f9SAnshuman Khandual p4d_t p4d;
475399145f9SAnshuman Khandual
4766315df41SAnshuman Khandual pr_debug("Validating P4D basic\n");
477399145f9SAnshuman Khandual memset(&p4d, RANDOM_NZVALUE, sizeof(p4d_t));
478399145f9SAnshuman Khandual WARN_ON(!p4d_same(p4d, p4d));
479399145f9SAnshuman Khandual }
480399145f9SAnshuman Khandual
pgd_basic_tests(struct pgtable_debug_args * args)48136b77d1eSGavin Shan static void __init pgd_basic_tests(struct pgtable_debug_args *args)
482399145f9SAnshuman Khandual {
483399145f9SAnshuman Khandual pgd_t pgd;
484399145f9SAnshuman Khandual
4856315df41SAnshuman Khandual pr_debug("Validating PGD basic\n");
486399145f9SAnshuman Khandual memset(&pgd, RANDOM_NZVALUE, sizeof(pgd_t));
487399145f9SAnshuman Khandual WARN_ON(!pgd_same(pgd, pgd));
488399145f9SAnshuman Khandual }
489399145f9SAnshuman Khandual
490399145f9SAnshuman Khandual #ifndef __PAGETABLE_PUD_FOLDED
pud_clear_tests(struct pgtable_debug_args * args)4914cbde03bSGavin Shan static void __init pud_clear_tests(struct pgtable_debug_args *args)
492399145f9SAnshuman Khandual {
493*a0c9fd22SAnshuman Khandual pud_t pud = pudp_get(args->pudp);
494399145f9SAnshuman Khandual
4954cbde03bSGavin Shan if (mm_pmd_folded(args->mm))
496399145f9SAnshuman Khandual return;
497399145f9SAnshuman Khandual
4986315df41SAnshuman Khandual pr_debug("Validating PUD clear\n");
4990b1ef4fdSPeter Xu WARN_ON(pud_none(pud));
5004cbde03bSGavin Shan pud_clear(args->pudp);
501*a0c9fd22SAnshuman Khandual pud = pudp_get(args->pudp);
502399145f9SAnshuman Khandual WARN_ON(!pud_none(pud));
503399145f9SAnshuman Khandual }
504399145f9SAnshuman Khandual
pud_populate_tests(struct pgtable_debug_args * args)5054cbde03bSGavin Shan static void __init pud_populate_tests(struct pgtable_debug_args *args)
506399145f9SAnshuman Khandual {
507399145f9SAnshuman Khandual pud_t pud;
508399145f9SAnshuman Khandual
5094cbde03bSGavin Shan if (mm_pmd_folded(args->mm))
510399145f9SAnshuman Khandual return;
5116315df41SAnshuman Khandual
5126315df41SAnshuman Khandual pr_debug("Validating PUD populate\n");
513399145f9SAnshuman Khandual /*
514399145f9SAnshuman Khandual * This entry points to next level page table page.
515399145f9SAnshuman Khandual * Hence this must not qualify as pud_bad().
516399145f9SAnshuman Khandual */
5174cbde03bSGavin Shan pud_populate(args->mm, args->pudp, args->start_pmdp);
518*a0c9fd22SAnshuman Khandual pud = pudp_get(args->pudp);
519399145f9SAnshuman Khandual WARN_ON(pud_bad(pud));
520399145f9SAnshuman Khandual }
521399145f9SAnshuman Khandual #else /* !__PAGETABLE_PUD_FOLDED */
pud_clear_tests(struct pgtable_debug_args * args)5224cbde03bSGavin Shan static void __init pud_clear_tests(struct pgtable_debug_args *args) { }
pud_populate_tests(struct pgtable_debug_args * args)5234cbde03bSGavin Shan static void __init pud_populate_tests(struct pgtable_debug_args *args) { }
524399145f9SAnshuman Khandual #endif /* PAGETABLE_PUD_FOLDED */
525399145f9SAnshuman Khandual
526399145f9SAnshuman Khandual #ifndef __PAGETABLE_P4D_FOLDED
p4d_clear_tests(struct pgtable_debug_args * args)5272f87f8c3SGavin Shan static void __init p4d_clear_tests(struct pgtable_debug_args *args)
528399145f9SAnshuman Khandual {
529*a0c9fd22SAnshuman Khandual p4d_t p4d = p4dp_get(args->p4dp);
530399145f9SAnshuman Khandual
5312f87f8c3SGavin Shan if (mm_pud_folded(args->mm))
532399145f9SAnshuman Khandual return;
533399145f9SAnshuman Khandual
5346315df41SAnshuman Khandual pr_debug("Validating P4D clear\n");
5350b1ef4fdSPeter Xu WARN_ON(p4d_none(p4d));
5362f87f8c3SGavin Shan p4d_clear(args->p4dp);
537*a0c9fd22SAnshuman Khandual p4d = p4dp_get(args->p4dp);
538399145f9SAnshuman Khandual WARN_ON(!p4d_none(p4d));
539399145f9SAnshuman Khandual }
540399145f9SAnshuman Khandual
p4d_populate_tests(struct pgtable_debug_args * args)5412f87f8c3SGavin Shan static void __init p4d_populate_tests(struct pgtable_debug_args *args)
542399145f9SAnshuman Khandual {
543399145f9SAnshuman Khandual p4d_t p4d;
544399145f9SAnshuman Khandual
5452f87f8c3SGavin Shan if (mm_pud_folded(args->mm))
546399145f9SAnshuman Khandual return;
547399145f9SAnshuman Khandual
5486315df41SAnshuman Khandual pr_debug("Validating P4D populate\n");
549399145f9SAnshuman Khandual /*
550399145f9SAnshuman Khandual * This entry points to next level page table page.
551399145f9SAnshuman Khandual * Hence this must not qualify as p4d_bad().
552399145f9SAnshuman Khandual */
5532f87f8c3SGavin Shan pud_clear(args->pudp);
5542f87f8c3SGavin Shan p4d_clear(args->p4dp);
5552f87f8c3SGavin Shan p4d_populate(args->mm, args->p4dp, args->start_pudp);
556*a0c9fd22SAnshuman Khandual p4d = p4dp_get(args->p4dp);
557399145f9SAnshuman Khandual WARN_ON(p4d_bad(p4d));
558399145f9SAnshuman Khandual }
559399145f9SAnshuman Khandual
pgd_clear_tests(struct pgtable_debug_args * args)5602f87f8c3SGavin Shan static void __init pgd_clear_tests(struct pgtable_debug_args *args)
561399145f9SAnshuman Khandual {
562*a0c9fd22SAnshuman Khandual pgd_t pgd = pgdp_get(args->pgdp);
563399145f9SAnshuman Khandual
5642f87f8c3SGavin Shan if (mm_p4d_folded(args->mm))
565399145f9SAnshuman Khandual return;
566399145f9SAnshuman Khandual
5676315df41SAnshuman Khandual pr_debug("Validating PGD clear\n");
5680b1ef4fdSPeter Xu WARN_ON(pgd_none(pgd));
5692f87f8c3SGavin Shan pgd_clear(args->pgdp);
570*a0c9fd22SAnshuman Khandual pgd = pgdp_get(args->pgdp);
571399145f9SAnshuman Khandual WARN_ON(!pgd_none(pgd));
572399145f9SAnshuman Khandual }
573399145f9SAnshuman Khandual
pgd_populate_tests(struct pgtable_debug_args * args)5742f87f8c3SGavin Shan static void __init pgd_populate_tests(struct pgtable_debug_args *args)
575399145f9SAnshuman Khandual {
576399145f9SAnshuman Khandual pgd_t pgd;
577399145f9SAnshuman Khandual
5782f87f8c3SGavin Shan if (mm_p4d_folded(args->mm))
579399145f9SAnshuman Khandual return;
580399145f9SAnshuman Khandual
5816315df41SAnshuman Khandual pr_debug("Validating PGD populate\n");
582399145f9SAnshuman Khandual /*
583399145f9SAnshuman Khandual * This entry points to next level page table page.
584399145f9SAnshuman Khandual * Hence this must not qualify as pgd_bad().
585399145f9SAnshuman Khandual */
5862f87f8c3SGavin Shan p4d_clear(args->p4dp);
5872f87f8c3SGavin Shan pgd_clear(args->pgdp);
5882f87f8c3SGavin Shan pgd_populate(args->mm, args->pgdp, args->start_p4dp);
589*a0c9fd22SAnshuman Khandual pgd = pgdp_get(args->pgdp);
590399145f9SAnshuman Khandual WARN_ON(pgd_bad(pgd));
591399145f9SAnshuman Khandual }
592399145f9SAnshuman Khandual #else /* !__PAGETABLE_P4D_FOLDED */
p4d_clear_tests(struct pgtable_debug_args * args)5932f87f8c3SGavin Shan static void __init p4d_clear_tests(struct pgtable_debug_args *args) { }
pgd_clear_tests(struct pgtable_debug_args * args)5942f87f8c3SGavin Shan static void __init pgd_clear_tests(struct pgtable_debug_args *args) { }
p4d_populate_tests(struct pgtable_debug_args * args)5952f87f8c3SGavin Shan static void __init p4d_populate_tests(struct pgtable_debug_args *args) { }
pgd_populate_tests(struct pgtable_debug_args * args)5962f87f8c3SGavin Shan static void __init pgd_populate_tests(struct pgtable_debug_args *args) { }
597399145f9SAnshuman Khandual #endif /* PAGETABLE_P4D_FOLDED */
598399145f9SAnshuman Khandual
pte_clear_tests(struct pgtable_debug_args * args)59944966c44SGavin Shan static void __init pte_clear_tests(struct pgtable_debug_args *args)
600399145f9SAnshuman Khandual {
6018c5b3a8aSGavin Shan struct page *page;
60244966c44SGavin Shan pte_t pte = pfn_pte(args->pte_pfn, args->page_prot);
60344966c44SGavin Shan
6048c5b3a8aSGavin Shan page = (args->pte_pfn != ULONG_MAX) ? pfn_to_page(args->pte_pfn) : NULL;
6058c5b3a8aSGavin Shan if (!page)
60644966c44SGavin Shan return;
607399145f9SAnshuman Khandual
6088c5b3a8aSGavin Shan /*
6098c5b3a8aSGavin Shan * flush_dcache_page() is called after set_pte_at() to clear
6108c5b3a8aSGavin Shan * PG_arch_1 for the page on ARM64. The page flag isn't cleared
6118c5b3a8aSGavin Shan * when it's released and page allocation check will fail when
6128c5b3a8aSGavin Shan * the page is allocated again. For architectures other than ARM64,
6138c5b3a8aSGavin Shan * the unexpected overhead of cache flushing is acceptable.
6148c5b3a8aSGavin Shan */
6156315df41SAnshuman Khandual pr_debug("Validating PTE clear\n");
6169f2bad09SHugh Dickins if (WARN_ON(!args->ptep))
6179f2bad09SHugh Dickins return;
6189f2bad09SHugh Dickins
61944966c44SGavin Shan set_pte_at(args->mm, args->vaddr, args->ptep, pte);
6200b1ef4fdSPeter Xu WARN_ON(pte_none(pte));
6218c5b3a8aSGavin Shan flush_dcache_page(page);
622399145f9SAnshuman Khandual barrier();
62308d5b29eSPasha Tatashin ptep_clear(args->mm, args->vaddr, args->ptep);
62444966c44SGavin Shan pte = ptep_get(args->ptep);
625399145f9SAnshuman Khandual WARN_ON(!pte_none(pte));
626399145f9SAnshuman Khandual }
627399145f9SAnshuman Khandual
pmd_clear_tests(struct pgtable_debug_args * args)628c0fe07b0SGavin Shan static void __init pmd_clear_tests(struct pgtable_debug_args *args)
629399145f9SAnshuman Khandual {
630*a0c9fd22SAnshuman Khandual pmd_t pmd = pmdp_get(args->pmdp);
631399145f9SAnshuman Khandual
6326315df41SAnshuman Khandual pr_debug("Validating PMD clear\n");
6330b1ef4fdSPeter Xu WARN_ON(pmd_none(pmd));
634c0fe07b0SGavin Shan pmd_clear(args->pmdp);
635*a0c9fd22SAnshuman Khandual pmd = pmdp_get(args->pmdp);
636399145f9SAnshuman Khandual WARN_ON(!pmd_none(pmd));
637399145f9SAnshuman Khandual }
638399145f9SAnshuman Khandual
pmd_populate_tests(struct pgtable_debug_args * args)639c0fe07b0SGavin Shan static void __init pmd_populate_tests(struct pgtable_debug_args *args)
640399145f9SAnshuman Khandual {
641399145f9SAnshuman Khandual pmd_t pmd;
642399145f9SAnshuman Khandual
6436315df41SAnshuman Khandual pr_debug("Validating PMD populate\n");
644399145f9SAnshuman Khandual /*
645399145f9SAnshuman Khandual * This entry points to next level page table page.
646399145f9SAnshuman Khandual * Hence this must not qualify as pmd_bad().
647399145f9SAnshuman Khandual */
648c0fe07b0SGavin Shan pmd_populate(args->mm, args->pmdp, args->start_ptep);
649*a0c9fd22SAnshuman Khandual pmd = pmdp_get(args->pmdp);
650399145f9SAnshuman Khandual WARN_ON(pmd_bad(pmd));
651399145f9SAnshuman Khandual }
652399145f9SAnshuman Khandual
pte_special_tests(struct pgtable_debug_args * args)6538cb183f2SGavin Shan static void __init pte_special_tests(struct pgtable_debug_args *args)
65405289402SAnshuman Khandual {
6558cb183f2SGavin Shan pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot);
65605289402SAnshuman Khandual
65705289402SAnshuman Khandual if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL))
65805289402SAnshuman Khandual return;
65905289402SAnshuman Khandual
6606315df41SAnshuman Khandual pr_debug("Validating PTE special\n");
66105289402SAnshuman Khandual WARN_ON(!pte_special(pte_mkspecial(pte)));
66205289402SAnshuman Khandual }
66305289402SAnshuman Khandual
pte_protnone_tests(struct pgtable_debug_args * args)6648cb183f2SGavin Shan static void __init pte_protnone_tests(struct pgtable_debug_args *args)
66505289402SAnshuman Khandual {
6668cb183f2SGavin Shan pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot_none);
66705289402SAnshuman Khandual
66805289402SAnshuman Khandual if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
66905289402SAnshuman Khandual return;
67005289402SAnshuman Khandual
6716315df41SAnshuman Khandual pr_debug("Validating PTE protnone\n");
67205289402SAnshuman Khandual WARN_ON(!pte_protnone(pte));
67305289402SAnshuman Khandual WARN_ON(!pte_present(pte));
67405289402SAnshuman Khandual }
67505289402SAnshuman Khandual
67605289402SAnshuman Khandual #ifdef CONFIG_TRANSPARENT_HUGEPAGE
pmd_protnone_tests(struct pgtable_debug_args * args)6778cb183f2SGavin Shan static void __init pmd_protnone_tests(struct pgtable_debug_args *args)
67805289402SAnshuman Khandual {
67965ac1a60SAnshuman Khandual pmd_t pmd;
68005289402SAnshuman Khandual
68105289402SAnshuman Khandual if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
68205289402SAnshuman Khandual return;
68305289402SAnshuman Khandual
68465ac1a60SAnshuman Khandual if (!has_transparent_hugepage())
68565ac1a60SAnshuman Khandual return;
68665ac1a60SAnshuman Khandual
6876315df41SAnshuman Khandual pr_debug("Validating PMD protnone\n");
6888cb183f2SGavin Shan pmd = pmd_mkhuge(pfn_pmd(args->fixed_pmd_pfn, args->page_prot_none));
68905289402SAnshuman Khandual WARN_ON(!pmd_protnone(pmd));
69005289402SAnshuman Khandual WARN_ON(!pmd_present(pmd));
69105289402SAnshuman Khandual }
69205289402SAnshuman Khandual #else /* !CONFIG_TRANSPARENT_HUGEPAGE */
pmd_protnone_tests(struct pgtable_debug_args * args)6938cb183f2SGavin Shan static void __init pmd_protnone_tests(struct pgtable_debug_args *args) { }
69405289402SAnshuman Khandual #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
69505289402SAnshuman Khandual
69605289402SAnshuman Khandual #ifdef CONFIG_ARCH_HAS_PTE_DEVMAP
pte_devmap_tests(struct pgtable_debug_args * args)6978cb183f2SGavin Shan static void __init pte_devmap_tests(struct pgtable_debug_args *args)
69805289402SAnshuman Khandual {
6998cb183f2SGavin Shan pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot);
70005289402SAnshuman Khandual
7016315df41SAnshuman Khandual pr_debug("Validating PTE devmap\n");
70205289402SAnshuman Khandual WARN_ON(!pte_devmap(pte_mkdevmap(pte)));
70305289402SAnshuman Khandual }
70405289402SAnshuman Khandual
70505289402SAnshuman Khandual #ifdef CONFIG_TRANSPARENT_HUGEPAGE
pmd_devmap_tests(struct pgtable_debug_args * args)7068cb183f2SGavin Shan static void __init pmd_devmap_tests(struct pgtable_debug_args *args)
70705289402SAnshuman Khandual {
70865ac1a60SAnshuman Khandual pmd_t pmd;
70965ac1a60SAnshuman Khandual
71065ac1a60SAnshuman Khandual if (!has_transparent_hugepage())
71165ac1a60SAnshuman Khandual return;
71205289402SAnshuman Khandual
7136315df41SAnshuman Khandual pr_debug("Validating PMD devmap\n");
7148cb183f2SGavin Shan pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot);
71505289402SAnshuman Khandual WARN_ON(!pmd_devmap(pmd_mkdevmap(pmd)));
71605289402SAnshuman Khandual }
71705289402SAnshuman Khandual
71805289402SAnshuman Khandual #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
pud_devmap_tests(struct pgtable_debug_args * args)7198cb183f2SGavin Shan static void __init pud_devmap_tests(struct pgtable_debug_args *args)
72005289402SAnshuman Khandual {
72165ac1a60SAnshuman Khandual pud_t pud;
72265ac1a60SAnshuman Khandual
723348ad160SAneesh Kumar K.V if (!has_transparent_pud_hugepage())
72465ac1a60SAnshuman Khandual return;
72505289402SAnshuman Khandual
7266315df41SAnshuman Khandual pr_debug("Validating PUD devmap\n");
7278cb183f2SGavin Shan pud = pfn_pud(args->fixed_pud_pfn, args->page_prot);
72805289402SAnshuman Khandual WARN_ON(!pud_devmap(pud_mkdevmap(pud)));
72905289402SAnshuman Khandual }
73005289402SAnshuman Khandual #else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
pud_devmap_tests(struct pgtable_debug_args * args)7318cb183f2SGavin Shan static void __init pud_devmap_tests(struct pgtable_debug_args *args) { }
73205289402SAnshuman Khandual #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
73305289402SAnshuman Khandual #else /* CONFIG_TRANSPARENT_HUGEPAGE */
pmd_devmap_tests(struct pgtable_debug_args * args)7348cb183f2SGavin Shan static void __init pmd_devmap_tests(struct pgtable_debug_args *args) { }
pud_devmap_tests(struct pgtable_debug_args * args)7358cb183f2SGavin Shan static void __init pud_devmap_tests(struct pgtable_debug_args *args) { }
73605289402SAnshuman Khandual #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
73705289402SAnshuman Khandual #else
pte_devmap_tests(struct pgtable_debug_args * args)7388cb183f2SGavin Shan static void __init pte_devmap_tests(struct pgtable_debug_args *args) { }
pmd_devmap_tests(struct pgtable_debug_args * args)7398cb183f2SGavin Shan static void __init pmd_devmap_tests(struct pgtable_debug_args *args) { }
pud_devmap_tests(struct pgtable_debug_args * args)7408cb183f2SGavin Shan static void __init pud_devmap_tests(struct pgtable_debug_args *args) { }
74105289402SAnshuman Khandual #endif /* CONFIG_ARCH_HAS_PTE_DEVMAP */
74205289402SAnshuman Khandual
pte_soft_dirty_tests(struct pgtable_debug_args * args)7435f447e80SGavin Shan static void __init pte_soft_dirty_tests(struct pgtable_debug_args *args)
74405289402SAnshuman Khandual {
7455f447e80SGavin Shan pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot);
74605289402SAnshuman Khandual
74705289402SAnshuman Khandual if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
74805289402SAnshuman Khandual return;
74905289402SAnshuman Khandual
7506315df41SAnshuman Khandual pr_debug("Validating PTE soft dirty\n");
75105289402SAnshuman Khandual WARN_ON(!pte_soft_dirty(pte_mksoft_dirty(pte)));
75205289402SAnshuman Khandual WARN_ON(pte_soft_dirty(pte_clear_soft_dirty(pte)));
75305289402SAnshuman Khandual }
75405289402SAnshuman Khandual
pte_swap_soft_dirty_tests(struct pgtable_debug_args * args)7555f447e80SGavin Shan static void __init pte_swap_soft_dirty_tests(struct pgtable_debug_args *args)
75605289402SAnshuman Khandual {
7575f447e80SGavin Shan pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot);
75805289402SAnshuman Khandual
75905289402SAnshuman Khandual if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
76005289402SAnshuman Khandual return;
76105289402SAnshuman Khandual
7626315df41SAnshuman Khandual pr_debug("Validating PTE swap soft dirty\n");
76305289402SAnshuman Khandual WARN_ON(!pte_swp_soft_dirty(pte_swp_mksoft_dirty(pte)));
76405289402SAnshuman Khandual WARN_ON(pte_swp_soft_dirty(pte_swp_clear_soft_dirty(pte)));
76505289402SAnshuman Khandual }
76605289402SAnshuman Khandual
76705289402SAnshuman Khandual #ifdef CONFIG_TRANSPARENT_HUGEPAGE
pmd_soft_dirty_tests(struct pgtable_debug_args * args)7685f447e80SGavin Shan static void __init pmd_soft_dirty_tests(struct pgtable_debug_args *args)
76905289402SAnshuman Khandual {
77065ac1a60SAnshuman Khandual pmd_t pmd;
77105289402SAnshuman Khandual
77205289402SAnshuman Khandual if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
77305289402SAnshuman Khandual return;
77405289402SAnshuman Khandual
77565ac1a60SAnshuman Khandual if (!has_transparent_hugepage())
77665ac1a60SAnshuman Khandual return;
77765ac1a60SAnshuman Khandual
7786315df41SAnshuman Khandual pr_debug("Validating PMD soft dirty\n");
7795f447e80SGavin Shan pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot);
78005289402SAnshuman Khandual WARN_ON(!pmd_soft_dirty(pmd_mksoft_dirty(pmd)));
78105289402SAnshuman Khandual WARN_ON(pmd_soft_dirty(pmd_clear_soft_dirty(pmd)));
78205289402SAnshuman Khandual }
78305289402SAnshuman Khandual
pmd_swap_soft_dirty_tests(struct pgtable_debug_args * args)7845f447e80SGavin Shan static void __init pmd_swap_soft_dirty_tests(struct pgtable_debug_args *args)
78505289402SAnshuman Khandual {
78665ac1a60SAnshuman Khandual pmd_t pmd;
78705289402SAnshuman Khandual
78805289402SAnshuman Khandual if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) ||
78905289402SAnshuman Khandual !IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION))
79005289402SAnshuman Khandual return;
79105289402SAnshuman Khandual
79265ac1a60SAnshuman Khandual if (!has_transparent_hugepage())
79365ac1a60SAnshuman Khandual return;
79465ac1a60SAnshuman Khandual
7956315df41SAnshuman Khandual pr_debug("Validating PMD swap soft dirty\n");
7965f447e80SGavin Shan pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot);
79705289402SAnshuman Khandual WARN_ON(!pmd_swp_soft_dirty(pmd_swp_mksoft_dirty(pmd)));
79805289402SAnshuman Khandual WARN_ON(pmd_swp_soft_dirty(pmd_swp_clear_soft_dirty(pmd)));
79905289402SAnshuman Khandual }
800b593b90dSShixin Liu #else /* !CONFIG_TRANSPARENT_HUGEPAGE */
pmd_soft_dirty_tests(struct pgtable_debug_args * args)8015f447e80SGavin Shan static void __init pmd_soft_dirty_tests(struct pgtable_debug_args *args) { }
pmd_swap_soft_dirty_tests(struct pgtable_debug_args * args)8025f447e80SGavin Shan static void __init pmd_swap_soft_dirty_tests(struct pgtable_debug_args *args) { }
803b593b90dSShixin Liu #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
80405289402SAnshuman Khandual
pte_swap_exclusive_tests(struct pgtable_debug_args * args)805210d1e8aSDavid Hildenbrand static void __init pte_swap_exclusive_tests(struct pgtable_debug_args *args)
806210d1e8aSDavid Hildenbrand {
8072321ba3eSDavid Hildenbrand unsigned long max_swap_offset;
8082321ba3eSDavid Hildenbrand swp_entry_t entry, entry2;
8092321ba3eSDavid Hildenbrand pte_t pte;
810210d1e8aSDavid Hildenbrand
811210d1e8aSDavid Hildenbrand pr_debug("Validating PTE swap exclusive\n");
8122321ba3eSDavid Hildenbrand
8132321ba3eSDavid Hildenbrand /* See generic_max_swapfile_size(): probe the maximum offset */
8142321ba3eSDavid Hildenbrand max_swap_offset = swp_offset(pte_to_swp_entry(swp_entry_to_pte(swp_entry(0, ~0UL))));
8152321ba3eSDavid Hildenbrand
8162321ba3eSDavid Hildenbrand /* Create a swp entry with all possible bits set */
8172321ba3eSDavid Hildenbrand entry = swp_entry((1 << MAX_SWAPFILES_SHIFT) - 1, max_swap_offset);
8182321ba3eSDavid Hildenbrand
8192321ba3eSDavid Hildenbrand pte = swp_entry_to_pte(entry);
8202321ba3eSDavid Hildenbrand WARN_ON(pte_swp_exclusive(pte));
8212321ba3eSDavid Hildenbrand WARN_ON(!is_swap_pte(pte));
8222321ba3eSDavid Hildenbrand entry2 = pte_to_swp_entry(pte);
8232321ba3eSDavid Hildenbrand WARN_ON(memcmp(&entry, &entry2, sizeof(entry)));
8242321ba3eSDavid Hildenbrand
825210d1e8aSDavid Hildenbrand pte = pte_swp_mkexclusive(pte);
826210d1e8aSDavid Hildenbrand WARN_ON(!pte_swp_exclusive(pte));
8272321ba3eSDavid Hildenbrand WARN_ON(!is_swap_pte(pte));
8282321ba3eSDavid Hildenbrand WARN_ON(pte_swp_soft_dirty(pte));
8292321ba3eSDavid Hildenbrand entry2 = pte_to_swp_entry(pte);
8302321ba3eSDavid Hildenbrand WARN_ON(memcmp(&entry, &entry2, sizeof(entry)));
8312321ba3eSDavid Hildenbrand
832210d1e8aSDavid Hildenbrand pte = pte_swp_clear_exclusive(pte);
833210d1e8aSDavid Hildenbrand WARN_ON(pte_swp_exclusive(pte));
8342321ba3eSDavid Hildenbrand WARN_ON(!is_swap_pte(pte));
8352321ba3eSDavid Hildenbrand entry2 = pte_to_swp_entry(pte);
8362321ba3eSDavid Hildenbrand WARN_ON(memcmp(&entry, &entry2, sizeof(entry)));
837210d1e8aSDavid Hildenbrand }
838210d1e8aSDavid Hildenbrand
pte_swap_tests(struct pgtable_debug_args * args)8395f447e80SGavin Shan static void __init pte_swap_tests(struct pgtable_debug_args *args)
84005289402SAnshuman Khandual {
84105289402SAnshuman Khandual swp_entry_t swp;
84205289402SAnshuman Khandual pte_t pte;
84305289402SAnshuman Khandual
8446315df41SAnshuman Khandual pr_debug("Validating PTE swap\n");
8455f447e80SGavin Shan pte = pfn_pte(args->fixed_pte_pfn, args->page_prot);
84605289402SAnshuman Khandual swp = __pte_to_swp_entry(pte);
84705289402SAnshuman Khandual pte = __swp_entry_to_pte(swp);
8485f447e80SGavin Shan WARN_ON(args->fixed_pte_pfn != pte_pfn(pte));
84905289402SAnshuman Khandual }
85005289402SAnshuman Khandual
85105289402SAnshuman Khandual #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
pmd_swap_tests(struct pgtable_debug_args * args)8525f447e80SGavin Shan static void __init pmd_swap_tests(struct pgtable_debug_args *args)
85305289402SAnshuman Khandual {
85405289402SAnshuman Khandual swp_entry_t swp;
85505289402SAnshuman Khandual pmd_t pmd;
85605289402SAnshuman Khandual
85765ac1a60SAnshuman Khandual if (!has_transparent_hugepage())
85865ac1a60SAnshuman Khandual return;
85965ac1a60SAnshuman Khandual
8606315df41SAnshuman Khandual pr_debug("Validating PMD swap\n");
8615f447e80SGavin Shan pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot);
86205289402SAnshuman Khandual swp = __pmd_to_swp_entry(pmd);
86305289402SAnshuman Khandual pmd = __swp_entry_to_pmd(swp);
8645f447e80SGavin Shan WARN_ON(args->fixed_pmd_pfn != pmd_pfn(pmd));
86505289402SAnshuman Khandual }
86605289402SAnshuman Khandual #else /* !CONFIG_ARCH_ENABLE_THP_MIGRATION */
pmd_swap_tests(struct pgtable_debug_args * args)8675f447e80SGavin Shan static void __init pmd_swap_tests(struct pgtable_debug_args *args) { }
86805289402SAnshuman Khandual #endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
86905289402SAnshuman Khandual
swap_migration_tests(struct pgtable_debug_args * args)8704878a888SGavin Shan static void __init swap_migration_tests(struct pgtable_debug_args *args)
87105289402SAnshuman Khandual {
87205289402SAnshuman Khandual struct page *page;
87305289402SAnshuman Khandual swp_entry_t swp;
87405289402SAnshuman Khandual
87505289402SAnshuman Khandual if (!IS_ENABLED(CONFIG_MIGRATION))
87605289402SAnshuman Khandual return;
8776315df41SAnshuman Khandual
87805289402SAnshuman Khandual /*
87905289402SAnshuman Khandual * swap_migration_tests() requires a dedicated page as it needs to
88005289402SAnshuman Khandual * be locked before creating a migration entry from it. Locking the
88105289402SAnshuman Khandual * page that actually maps kernel text ('start_kernel') can be real
8824878a888SGavin Shan * problematic. Lets use the allocated page explicitly for this
8834878a888SGavin Shan * purpose.
88405289402SAnshuman Khandual */
8854878a888SGavin Shan page = (args->pte_pfn != ULONG_MAX) ? pfn_to_page(args->pte_pfn) : NULL;
8864878a888SGavin Shan if (!page)
88705289402SAnshuman Khandual return;
8884878a888SGavin Shan
8894878a888SGavin Shan pr_debug("Validating swap migration\n");
89005289402SAnshuman Khandual
89105289402SAnshuman Khandual /*
89223647618SAnshuman Khandual * make_[readable|writable]_migration_entry() expects given page to
89323647618SAnshuman Khandual * be locked, otherwise it stumbles upon a BUG_ON().
89405289402SAnshuman Khandual */
89505289402SAnshuman Khandual __SetPageLocked(page);
8964dd845b5SAlistair Popple swp = make_writable_migration_entry(page_to_pfn(page));
89705289402SAnshuman Khandual WARN_ON(!is_migration_entry(swp));
8984dd845b5SAlistair Popple WARN_ON(!is_writable_migration_entry(swp));
89905289402SAnshuman Khandual
9004dd845b5SAlistair Popple swp = make_readable_migration_entry(swp_offset(swp));
90105289402SAnshuman Khandual WARN_ON(!is_migration_entry(swp));
9024dd845b5SAlistair Popple WARN_ON(is_writable_migration_entry(swp));
90305289402SAnshuman Khandual
9044dd845b5SAlistair Popple swp = make_readable_migration_entry(page_to_pfn(page));
90505289402SAnshuman Khandual WARN_ON(!is_migration_entry(swp));
9064dd845b5SAlistair Popple WARN_ON(is_writable_migration_entry(swp));
90705289402SAnshuman Khandual __ClearPageLocked(page);
90805289402SAnshuman Khandual }
90905289402SAnshuman Khandual
91005289402SAnshuman Khandual #ifdef CONFIG_HUGETLB_PAGE
hugetlb_basic_tests(struct pgtable_debug_args * args)91136b77d1eSGavin Shan static void __init hugetlb_basic_tests(struct pgtable_debug_args *args)
91205289402SAnshuman Khandual {
91305289402SAnshuman Khandual struct page *page;
91405289402SAnshuman Khandual pte_t pte;
91505289402SAnshuman Khandual
9166315df41SAnshuman Khandual pr_debug("Validating HugeTLB basic\n");
91705289402SAnshuman Khandual /*
91805289402SAnshuman Khandual * Accessing the page associated with the pfn is safe here,
91905289402SAnshuman Khandual * as it was previously derived from a real kernel symbol.
92005289402SAnshuman Khandual */
92136b77d1eSGavin Shan page = pfn_to_page(args->fixed_pmd_pfn);
92236b77d1eSGavin Shan pte = mk_huge_pte(page, args->page_prot);
92305289402SAnshuman Khandual
92405289402SAnshuman Khandual WARN_ON(!huge_pte_dirty(huge_pte_mkdirty(pte)));
92505289402SAnshuman Khandual WARN_ON(!huge_pte_write(huge_pte_mkwrite(huge_pte_wrprotect(pte))));
92605289402SAnshuman Khandual WARN_ON(huge_pte_write(huge_pte_wrprotect(huge_pte_mkwrite(pte))));
92705289402SAnshuman Khandual
92805289402SAnshuman Khandual #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
92936b77d1eSGavin Shan pte = pfn_pte(args->fixed_pmd_pfn, args->page_prot);
93005289402SAnshuman Khandual
9319dabf6e1SAnshuman Khandual WARN_ON(!pte_huge(arch_make_huge_pte(pte, PMD_SHIFT, VM_ACCESS_FLAGS)));
93205289402SAnshuman Khandual #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
93305289402SAnshuman Khandual }
93405289402SAnshuman Khandual #else /* !CONFIG_HUGETLB_PAGE */
hugetlb_basic_tests(struct pgtable_debug_args * args)93536b77d1eSGavin Shan static void __init hugetlb_basic_tests(struct pgtable_debug_args *args) { }
93605289402SAnshuman Khandual #endif /* CONFIG_HUGETLB_PAGE */
93705289402SAnshuman Khandual
93805289402SAnshuman Khandual #ifdef CONFIG_TRANSPARENT_HUGEPAGE
pmd_thp_tests(struct pgtable_debug_args * args)9394878a888SGavin Shan static void __init pmd_thp_tests(struct pgtable_debug_args *args)
94005289402SAnshuman Khandual {
94105289402SAnshuman Khandual pmd_t pmd;
94205289402SAnshuman Khandual
94305289402SAnshuman Khandual if (!has_transparent_hugepage())
94405289402SAnshuman Khandual return;
94505289402SAnshuman Khandual
9466315df41SAnshuman Khandual pr_debug("Validating PMD based THP\n");
94705289402SAnshuman Khandual /*
94805289402SAnshuman Khandual * pmd_trans_huge() and pmd_present() must return positive after
94905289402SAnshuman Khandual * MMU invalidation with pmd_mkinvalid(). This behavior is an
95005289402SAnshuman Khandual * optimization for transparent huge page. pmd_trans_huge() must
95105289402SAnshuman Khandual * be true if pmd_page() returns a valid THP to avoid taking the
95205289402SAnshuman Khandual * pmd_lock when others walk over non transhuge pmds (i.e. there
95305289402SAnshuman Khandual * are no THP allocated). Especially when splitting a THP and
95405289402SAnshuman Khandual * removing the present bit from the pmd, pmd_trans_huge() still
95505289402SAnshuman Khandual * needs to return true. pmd_present() should be true whenever
95605289402SAnshuman Khandual * pmd_trans_huge() returns true.
95705289402SAnshuman Khandual */
9584878a888SGavin Shan pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot);
95905289402SAnshuman Khandual WARN_ON(!pmd_trans_huge(pmd_mkhuge(pmd)));
96005289402SAnshuman Khandual
96105289402SAnshuman Khandual #ifndef __HAVE_ARCH_PMDP_INVALIDATE
96205289402SAnshuman Khandual WARN_ON(!pmd_trans_huge(pmd_mkinvalid(pmd_mkhuge(pmd))));
96305289402SAnshuman Khandual WARN_ON(!pmd_present(pmd_mkinvalid(pmd_mkhuge(pmd))));
964b0d7e15aSRyan Roberts WARN_ON(!pmd_leaf(pmd_mkinvalid(pmd_mkhuge(pmd))));
96505289402SAnshuman Khandual #endif /* __HAVE_ARCH_PMDP_INVALIDATE */
96605289402SAnshuman Khandual }
96705289402SAnshuman Khandual
96805289402SAnshuman Khandual #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
pud_thp_tests(struct pgtable_debug_args * args)9694878a888SGavin Shan static void __init pud_thp_tests(struct pgtable_debug_args *args)
97005289402SAnshuman Khandual {
97105289402SAnshuman Khandual pud_t pud;
97205289402SAnshuman Khandual
973348ad160SAneesh Kumar K.V if (!has_transparent_pud_hugepage())
97405289402SAnshuman Khandual return;
97505289402SAnshuman Khandual
9766315df41SAnshuman Khandual pr_debug("Validating PUD based THP\n");
9774878a888SGavin Shan pud = pfn_pud(args->fixed_pud_pfn, args->page_prot);
97805289402SAnshuman Khandual WARN_ON(!pud_trans_huge(pud_mkhuge(pud)));
97905289402SAnshuman Khandual
98005289402SAnshuman Khandual /*
98105289402SAnshuman Khandual * pud_mkinvalid() has been dropped for now. Enable back
98205289402SAnshuman Khandual * these tests when it comes back with a modified pud_present().
98305289402SAnshuman Khandual *
98405289402SAnshuman Khandual * WARN_ON(!pud_trans_huge(pud_mkinvalid(pud_mkhuge(pud))));
98505289402SAnshuman Khandual * WARN_ON(!pud_present(pud_mkinvalid(pud_mkhuge(pud))));
98605289402SAnshuman Khandual */
98705289402SAnshuman Khandual }
98805289402SAnshuman Khandual #else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
pud_thp_tests(struct pgtable_debug_args * args)9894878a888SGavin Shan static void __init pud_thp_tests(struct pgtable_debug_args *args) { }
99005289402SAnshuman Khandual #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
99105289402SAnshuman Khandual #else /* !CONFIG_TRANSPARENT_HUGEPAGE */
pmd_thp_tests(struct pgtable_debug_args * args)9924878a888SGavin Shan static void __init pmd_thp_tests(struct pgtable_debug_args *args) { }
pud_thp_tests(struct pgtable_debug_args * args)9934878a888SGavin Shan static void __init pud_thp_tests(struct pgtable_debug_args *args) { }
99405289402SAnshuman Khandual #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
99505289402SAnshuman Khandual
get_random_vaddr(void)996399145f9SAnshuman Khandual static unsigned long __init get_random_vaddr(void)
997399145f9SAnshuman Khandual {
998399145f9SAnshuman Khandual unsigned long random_vaddr, random_pages, total_user_pages;
999399145f9SAnshuman Khandual
1000399145f9SAnshuman Khandual total_user_pages = (TASK_SIZE - FIRST_USER_ADDRESS) / PAGE_SIZE;
1001399145f9SAnshuman Khandual
1002399145f9SAnshuman Khandual random_pages = get_random_long() % total_user_pages;
1003399145f9SAnshuman Khandual random_vaddr = FIRST_USER_ADDRESS + random_pages * PAGE_SIZE;
1004399145f9SAnshuman Khandual
1005399145f9SAnshuman Khandual return random_vaddr;
1006399145f9SAnshuman Khandual }
1007399145f9SAnshuman Khandual
destroy_args(struct pgtable_debug_args * args)10083c9b84f0SGavin Shan static void __init destroy_args(struct pgtable_debug_args *args)
10093c9b84f0SGavin Shan {
10103c9b84f0SGavin Shan struct page *page = NULL;
10113c9b84f0SGavin Shan
10123c9b84f0SGavin Shan /* Free (huge) page */
10133c9b84f0SGavin Shan if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
1014348ad160SAneesh Kumar K.V has_transparent_pud_hugepage() &&
10153c9b84f0SGavin Shan args->pud_pfn != ULONG_MAX) {
10163c9b84f0SGavin Shan if (args->is_contiguous_page) {
10173c9b84f0SGavin Shan free_contig_range(args->pud_pfn,
10183c9b84f0SGavin Shan (1 << (HPAGE_PUD_SHIFT - PAGE_SHIFT)));
10193c9b84f0SGavin Shan } else {
10203c9b84f0SGavin Shan page = pfn_to_page(args->pud_pfn);
10213c9b84f0SGavin Shan __free_pages(page, HPAGE_PUD_SHIFT - PAGE_SHIFT);
10223c9b84f0SGavin Shan }
10233c9b84f0SGavin Shan
10243c9b84f0SGavin Shan args->pud_pfn = ULONG_MAX;
10253c9b84f0SGavin Shan args->pmd_pfn = ULONG_MAX;
10263c9b84f0SGavin Shan args->pte_pfn = ULONG_MAX;
10273c9b84f0SGavin Shan }
10283c9b84f0SGavin Shan
10293c9b84f0SGavin Shan if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
10303c9b84f0SGavin Shan has_transparent_hugepage() &&
10313c9b84f0SGavin Shan args->pmd_pfn != ULONG_MAX) {
10323c9b84f0SGavin Shan if (args->is_contiguous_page) {
10333c9b84f0SGavin Shan free_contig_range(args->pmd_pfn, (1 << HPAGE_PMD_ORDER));
10343c9b84f0SGavin Shan } else {
10353c9b84f0SGavin Shan page = pfn_to_page(args->pmd_pfn);
10363c9b84f0SGavin Shan __free_pages(page, HPAGE_PMD_ORDER);
10373c9b84f0SGavin Shan }
10383c9b84f0SGavin Shan
10393c9b84f0SGavin Shan args->pmd_pfn = ULONG_MAX;
10403c9b84f0SGavin Shan args->pte_pfn = ULONG_MAX;
10413c9b84f0SGavin Shan }
10423c9b84f0SGavin Shan
10433c9b84f0SGavin Shan if (args->pte_pfn != ULONG_MAX) {
10443c9b84f0SGavin Shan page = pfn_to_page(args->pte_pfn);
1045dcc1be11SLorenzo Stoakes __free_page(page);
10463c9b84f0SGavin Shan
10473c9b84f0SGavin Shan args->pte_pfn = ULONG_MAX;
10483c9b84f0SGavin Shan }
10493c9b84f0SGavin Shan
10503c9b84f0SGavin Shan /* Free page table entries */
10513c9b84f0SGavin Shan if (args->start_ptep) {
10523c9b84f0SGavin Shan pte_free(args->mm, args->start_ptep);
10533c9b84f0SGavin Shan mm_dec_nr_ptes(args->mm);
10543c9b84f0SGavin Shan }
10553c9b84f0SGavin Shan
10563c9b84f0SGavin Shan if (args->start_pmdp) {
10573c9b84f0SGavin Shan pmd_free(args->mm, args->start_pmdp);
10583c9b84f0SGavin Shan mm_dec_nr_pmds(args->mm);
10593c9b84f0SGavin Shan }
10603c9b84f0SGavin Shan
10613c9b84f0SGavin Shan if (args->start_pudp) {
10623c9b84f0SGavin Shan pud_free(args->mm, args->start_pudp);
10633c9b84f0SGavin Shan mm_dec_nr_puds(args->mm);
10643c9b84f0SGavin Shan }
10653c9b84f0SGavin Shan
10663c9b84f0SGavin Shan if (args->start_p4dp)
10673c9b84f0SGavin Shan p4d_free(args->mm, args->start_p4dp);
10683c9b84f0SGavin Shan
10693c9b84f0SGavin Shan /* Free vma and mm struct */
10703c9b84f0SGavin Shan if (args->vma)
10713c9b84f0SGavin Shan vm_area_free(args->vma);
10723c9b84f0SGavin Shan
10733c9b84f0SGavin Shan if (args->mm)
10743c9b84f0SGavin Shan mmdrop(args->mm);
10753c9b84f0SGavin Shan }
10763c9b84f0SGavin Shan
10773c9b84f0SGavin Shan static struct page * __init
debug_vm_pgtable_alloc_huge_page(struct pgtable_debug_args * args,int order)10783c9b84f0SGavin Shan debug_vm_pgtable_alloc_huge_page(struct pgtable_debug_args *args, int order)
10793c9b84f0SGavin Shan {
10803c9b84f0SGavin Shan struct page *page = NULL;
10813c9b84f0SGavin Shan
10823c9b84f0SGavin Shan #ifdef CONFIG_CONTIG_ALLOC
10835e0a760bSKirill A. Shutemov if (order > MAX_PAGE_ORDER) {
10843c9b84f0SGavin Shan page = alloc_contig_pages((1 << order), GFP_KERNEL,
10853c9b84f0SGavin Shan first_online_node, NULL);
10863c9b84f0SGavin Shan if (page) {
10873c9b84f0SGavin Shan args->is_contiguous_page = true;
10883c9b84f0SGavin Shan return page;
10893c9b84f0SGavin Shan }
10903c9b84f0SGavin Shan }
10913c9b84f0SGavin Shan #endif
10923c9b84f0SGavin Shan
10935e0a760bSKirill A. Shutemov if (order <= MAX_PAGE_ORDER)
10943c9b84f0SGavin Shan page = alloc_pages(GFP_KERNEL, order);
10953c9b84f0SGavin Shan
10963c9b84f0SGavin Shan return page;
10973c9b84f0SGavin Shan }
10983c9b84f0SGavin Shan
1099c4876ff6SFrank van der Linden /*
1100c4876ff6SFrank van der Linden * Check if a physical memory range described by <pstart, pend> contains
1101c4876ff6SFrank van der Linden * an area that is of size psize, and aligned to psize.
1102c4876ff6SFrank van der Linden *
1103c4876ff6SFrank van der Linden * Don't use address 0, an all-zeroes physical address might mask bugs, and
1104c4876ff6SFrank van der Linden * it's not used on x86.
1105c4876ff6SFrank van der Linden */
phys_align_check(phys_addr_t pstart,phys_addr_t pend,unsigned long psize,phys_addr_t * physp,unsigned long * alignp)1106c4876ff6SFrank van der Linden static void __init phys_align_check(phys_addr_t pstart,
1107c4876ff6SFrank van der Linden phys_addr_t pend, unsigned long psize,
1108c4876ff6SFrank van der Linden phys_addr_t *physp, unsigned long *alignp)
1109c4876ff6SFrank van der Linden {
1110c4876ff6SFrank van der Linden phys_addr_t aligned_start, aligned_end;
1111c4876ff6SFrank van der Linden
1112c4876ff6SFrank van der Linden if (pstart == 0)
1113c4876ff6SFrank van der Linden pstart = PAGE_SIZE;
1114c4876ff6SFrank van der Linden
1115c4876ff6SFrank van der Linden aligned_start = ALIGN(pstart, psize);
1116c4876ff6SFrank van der Linden aligned_end = aligned_start + psize;
1117c4876ff6SFrank van der Linden
1118c4876ff6SFrank van der Linden if (aligned_end > aligned_start && aligned_end <= pend) {
1119c4876ff6SFrank van der Linden *alignp = psize;
1120c4876ff6SFrank van der Linden *physp = aligned_start;
1121c4876ff6SFrank van der Linden }
1122c4876ff6SFrank van der Linden }
1123c4876ff6SFrank van der Linden
init_fixed_pfns(struct pgtable_debug_args * args)1124c4876ff6SFrank van der Linden static void __init init_fixed_pfns(struct pgtable_debug_args *args)
1125c4876ff6SFrank van der Linden {
1126c4876ff6SFrank van der Linden u64 idx;
1127c4876ff6SFrank van der Linden phys_addr_t phys, pstart, pend;
1128c4876ff6SFrank van der Linden
1129c4876ff6SFrank van der Linden /*
1130c4876ff6SFrank van der Linden * Initialize the fixed pfns. To do this, try to find a
1131c4876ff6SFrank van der Linden * valid physical range, preferably aligned to PUD_SIZE,
1132c4876ff6SFrank van der Linden * but settling for aligned to PMD_SIZE as a fallback. If
1133c4876ff6SFrank van der Linden * neither of those is found, use the physical address of
1134c4876ff6SFrank van der Linden * the start_kernel symbol.
1135c4876ff6SFrank van der Linden *
1136c4876ff6SFrank van der Linden * The memory doesn't need to be allocated, it just needs to exist
1137c4876ff6SFrank van der Linden * as usable memory. It won't be touched.
1138c4876ff6SFrank van der Linden *
1139c4876ff6SFrank van der Linden * The alignment is recorded, and can be checked to see if we
1140c4876ff6SFrank van der Linden * can run the tests that require an actual valid physical
1141c4876ff6SFrank van der Linden * address range on some architectures ({pmd,pud}_huge_test
1142c4876ff6SFrank van der Linden * on x86).
1143c4876ff6SFrank van der Linden */
1144c4876ff6SFrank van der Linden
1145c4876ff6SFrank van der Linden phys = __pa_symbol(&start_kernel);
1146c4876ff6SFrank van der Linden args->fixed_alignment = PAGE_SIZE;
1147c4876ff6SFrank van der Linden
1148c4876ff6SFrank van der Linden for_each_mem_range(idx, &pstart, &pend) {
1149c4876ff6SFrank van der Linden /* First check for a PUD-aligned area */
1150c4876ff6SFrank van der Linden phys_align_check(pstart, pend, PUD_SIZE, &phys,
1151c4876ff6SFrank van der Linden &args->fixed_alignment);
1152c4876ff6SFrank van der Linden
1153c4876ff6SFrank van der Linden /* If a PUD-aligned area is found, we're done */
1154c4876ff6SFrank van der Linden if (args->fixed_alignment == PUD_SIZE)
1155c4876ff6SFrank van der Linden break;
1156c4876ff6SFrank van der Linden
1157c4876ff6SFrank van der Linden /*
1158c4876ff6SFrank van der Linden * If no PMD-aligned area found yet, check for one,
1159c4876ff6SFrank van der Linden * but continue the loop to look for a PUD-aligned area.
1160c4876ff6SFrank van der Linden */
1161c4876ff6SFrank van der Linden if (args->fixed_alignment < PMD_SIZE)
1162c4876ff6SFrank van der Linden phys_align_check(pstart, pend, PMD_SIZE, &phys,
1163c4876ff6SFrank van der Linden &args->fixed_alignment);
1164c4876ff6SFrank van der Linden }
1165c4876ff6SFrank van der Linden
1166c4876ff6SFrank van der Linden args->fixed_pgd_pfn = __phys_to_pfn(phys & PGDIR_MASK);
1167c4876ff6SFrank van der Linden args->fixed_p4d_pfn = __phys_to_pfn(phys & P4D_MASK);
1168c4876ff6SFrank van der Linden args->fixed_pud_pfn = __phys_to_pfn(phys & PUD_MASK);
1169c4876ff6SFrank van der Linden args->fixed_pmd_pfn = __phys_to_pfn(phys & PMD_MASK);
1170c4876ff6SFrank van der Linden args->fixed_pte_pfn = __phys_to_pfn(phys & PAGE_MASK);
1171c4876ff6SFrank van der Linden WARN_ON(!pfn_valid(args->fixed_pte_pfn));
1172c4876ff6SFrank van der Linden }
1173c4876ff6SFrank van der Linden
1174c4876ff6SFrank van der Linden
init_args(struct pgtable_debug_args * args)11753c9b84f0SGavin Shan static int __init init_args(struct pgtable_debug_args *args)
11763c9b84f0SGavin Shan {
11773c9b84f0SGavin Shan struct page *page = NULL;
11783c9b84f0SGavin Shan int ret = 0;
11793c9b84f0SGavin Shan
11803c9b84f0SGavin Shan /*
11813c9b84f0SGavin Shan * Initialize the debugging data.
11823c9b84f0SGavin Shan *
118331d17076SAnshuman Khandual * vm_get_page_prot(VM_NONE) or vm_get_page_prot(VM_SHARED|VM_NONE)
118431d17076SAnshuman Khandual * will help create page table entries with PROT_NONE permission as
118531d17076SAnshuman Khandual * required for pxx_protnone_tests().
11863c9b84f0SGavin Shan */
11873c9b84f0SGavin Shan memset(args, 0, sizeof(*args));
11883c9b84f0SGavin Shan args->vaddr = get_random_vaddr();
1189d7e679b6SKefeng Wang args->page_prot = vm_get_page_prot(VM_ACCESS_FLAGS);
119031d17076SAnshuman Khandual args->page_prot_none = vm_get_page_prot(VM_NONE);
11913c9b84f0SGavin Shan args->is_contiguous_page = false;
11923c9b84f0SGavin Shan args->pud_pfn = ULONG_MAX;
11933c9b84f0SGavin Shan args->pmd_pfn = ULONG_MAX;
11943c9b84f0SGavin Shan args->pte_pfn = ULONG_MAX;
11953c9b84f0SGavin Shan args->fixed_pgd_pfn = ULONG_MAX;
11963c9b84f0SGavin Shan args->fixed_p4d_pfn = ULONG_MAX;
11973c9b84f0SGavin Shan args->fixed_pud_pfn = ULONG_MAX;
11983c9b84f0SGavin Shan args->fixed_pmd_pfn = ULONG_MAX;
11993c9b84f0SGavin Shan args->fixed_pte_pfn = ULONG_MAX;
12003c9b84f0SGavin Shan
12013c9b84f0SGavin Shan /* Allocate mm and vma */
12023c9b84f0SGavin Shan args->mm = mm_alloc();
12033c9b84f0SGavin Shan if (!args->mm) {
12043c9b84f0SGavin Shan pr_err("Failed to allocate mm struct\n");
12053c9b84f0SGavin Shan ret = -ENOMEM;
12063c9b84f0SGavin Shan goto error;
12073c9b84f0SGavin Shan }
12083c9b84f0SGavin Shan
12093c9b84f0SGavin Shan args->vma = vm_area_alloc(args->mm);
12103c9b84f0SGavin Shan if (!args->vma) {
12113c9b84f0SGavin Shan pr_err("Failed to allocate vma\n");
12123c9b84f0SGavin Shan ret = -ENOMEM;
12133c9b84f0SGavin Shan goto error;
12143c9b84f0SGavin Shan }
12153c9b84f0SGavin Shan
12163c9b84f0SGavin Shan /*
12173c9b84f0SGavin Shan * Allocate page table entries. They will be modified in the tests.
12183c9b84f0SGavin Shan * Lets save the page table entries so that they can be released
12193c9b84f0SGavin Shan * when the tests are completed.
12203c9b84f0SGavin Shan */
12213c9b84f0SGavin Shan args->pgdp = pgd_offset(args->mm, args->vaddr);
12223c9b84f0SGavin Shan args->p4dp = p4d_alloc(args->mm, args->pgdp, args->vaddr);
12233c9b84f0SGavin Shan if (!args->p4dp) {
12243c9b84f0SGavin Shan pr_err("Failed to allocate p4d entries\n");
12253c9b84f0SGavin Shan ret = -ENOMEM;
12263c9b84f0SGavin Shan goto error;
12273c9b84f0SGavin Shan }
12283c9b84f0SGavin Shan args->start_p4dp = p4d_offset(args->pgdp, 0UL);
12293c9b84f0SGavin Shan WARN_ON(!args->start_p4dp);
12303c9b84f0SGavin Shan
12313c9b84f0SGavin Shan args->pudp = pud_alloc(args->mm, args->p4dp, args->vaddr);
12323c9b84f0SGavin Shan if (!args->pudp) {
12333c9b84f0SGavin Shan pr_err("Failed to allocate pud entries\n");
12343c9b84f0SGavin Shan ret = -ENOMEM;
12353c9b84f0SGavin Shan goto error;
12363c9b84f0SGavin Shan }
12373c9b84f0SGavin Shan args->start_pudp = pud_offset(args->p4dp, 0UL);
12383c9b84f0SGavin Shan WARN_ON(!args->start_pudp);
12393c9b84f0SGavin Shan
12403c9b84f0SGavin Shan args->pmdp = pmd_alloc(args->mm, args->pudp, args->vaddr);
12413c9b84f0SGavin Shan if (!args->pmdp) {
12423c9b84f0SGavin Shan pr_err("Failed to allocate pmd entries\n");
12433c9b84f0SGavin Shan ret = -ENOMEM;
12443c9b84f0SGavin Shan goto error;
12453c9b84f0SGavin Shan }
12463c9b84f0SGavin Shan args->start_pmdp = pmd_offset(args->pudp, 0UL);
12473c9b84f0SGavin Shan WARN_ON(!args->start_pmdp);
12483c9b84f0SGavin Shan
12493c9b84f0SGavin Shan if (pte_alloc(args->mm, args->pmdp)) {
12503c9b84f0SGavin Shan pr_err("Failed to allocate pte entries\n");
12513c9b84f0SGavin Shan ret = -ENOMEM;
12523c9b84f0SGavin Shan goto error;
12533c9b84f0SGavin Shan }
1254*a0c9fd22SAnshuman Khandual args->start_ptep = pmd_pgtable(pmdp_get(args->pmdp));
12553c9b84f0SGavin Shan WARN_ON(!args->start_ptep);
12563c9b84f0SGavin Shan
1257c4876ff6SFrank van der Linden init_fixed_pfns(args);
12583c9b84f0SGavin Shan
12593c9b84f0SGavin Shan /*
12603c9b84f0SGavin Shan * Allocate (huge) pages because some of the tests need to access
12613c9b84f0SGavin Shan * the data in the pages. The corresponding tests will be skipped
12623c9b84f0SGavin Shan * if we fail to allocate (huge) pages.
12633c9b84f0SGavin Shan */
12643c9b84f0SGavin Shan if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
1265348ad160SAneesh Kumar K.V has_transparent_pud_hugepage()) {
12663c9b84f0SGavin Shan page = debug_vm_pgtable_alloc_huge_page(args,
12673c9b84f0SGavin Shan HPAGE_PUD_SHIFT - PAGE_SHIFT);
12683c9b84f0SGavin Shan if (page) {
12693c9b84f0SGavin Shan args->pud_pfn = page_to_pfn(page);
12703c9b84f0SGavin Shan args->pmd_pfn = args->pud_pfn;
12713c9b84f0SGavin Shan args->pte_pfn = args->pud_pfn;
12723c9b84f0SGavin Shan return 0;
12733c9b84f0SGavin Shan }
12743c9b84f0SGavin Shan }
12753c9b84f0SGavin Shan
12763c9b84f0SGavin Shan if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
12773c9b84f0SGavin Shan has_transparent_hugepage()) {
12783c9b84f0SGavin Shan page = debug_vm_pgtable_alloc_huge_page(args, HPAGE_PMD_ORDER);
12793c9b84f0SGavin Shan if (page) {
12803c9b84f0SGavin Shan args->pmd_pfn = page_to_pfn(page);
12813c9b84f0SGavin Shan args->pte_pfn = args->pmd_pfn;
12823c9b84f0SGavin Shan return 0;
12833c9b84f0SGavin Shan }
12843c9b84f0SGavin Shan }
12853c9b84f0SGavin Shan
1286dcc1be11SLorenzo Stoakes page = alloc_page(GFP_KERNEL);
12873c9b84f0SGavin Shan if (page)
12883c9b84f0SGavin Shan args->pte_pfn = page_to_pfn(page);
12893c9b84f0SGavin Shan
12903c9b84f0SGavin Shan return 0;
12913c9b84f0SGavin Shan
12923c9b84f0SGavin Shan error:
12933c9b84f0SGavin Shan destroy_args(args);
12943c9b84f0SGavin Shan return ret;
12953c9b84f0SGavin Shan }
12963c9b84f0SGavin Shan
debug_vm_pgtable(void)1297399145f9SAnshuman Khandual static int __init debug_vm_pgtable(void)
1298399145f9SAnshuman Khandual {
12993c9b84f0SGavin Shan struct pgtable_debug_args args;
1300fea1120cSKees Cook spinlock_t *ptl = NULL;
13013c9b84f0SGavin Shan int idx, ret;
1302399145f9SAnshuman Khandual
1303399145f9SAnshuman Khandual pr_info("Validating architecture page table helpers\n");
13043c9b84f0SGavin Shan ret = init_args(&args);
13053c9b84f0SGavin Shan if (ret)
13063c9b84f0SGavin Shan return ret;
13073c9b84f0SGavin Shan
13082e326c07SAnshuman Khandual /*
130931d17076SAnshuman Khandual * Iterate over each possible vm_flags to make sure that all
13102e326c07SAnshuman Khandual * the basic page table transformation validations just hold
13112e326c07SAnshuman Khandual * true irrespective of the starting protection value for a
13122e326c07SAnshuman Khandual * given page table entry.
131331d17076SAnshuman Khandual *
1314be16dd76SMuhammad Muzammil * Protection based vm_flags combinations are always linear
131531d17076SAnshuman Khandual * and increasing i.e starting from VM_NONE and going up to
131631d17076SAnshuman Khandual * (VM_SHARED | READ | WRITE | EXEC).
13172e326c07SAnshuman Khandual */
131831d17076SAnshuman Khandual #define VM_FLAGS_START (VM_NONE)
131931d17076SAnshuman Khandual #define VM_FLAGS_END (VM_SHARED | VM_EXEC | VM_WRITE | VM_READ)
132031d17076SAnshuman Khandual
132131d17076SAnshuman Khandual for (idx = VM_FLAGS_START; idx <= VM_FLAGS_END; idx++) {
132236b77d1eSGavin Shan pte_basic_tests(&args, idx);
132336b77d1eSGavin Shan pmd_basic_tests(&args, idx);
132436b77d1eSGavin Shan pud_basic_tests(&args, idx);
13252e326c07SAnshuman Khandual }
13262e326c07SAnshuman Khandual
13272e326c07SAnshuman Khandual /*
13282e326c07SAnshuman Khandual * Both P4D and PGD level tests are very basic which do not
13292e326c07SAnshuman Khandual * involve creating page table entries from the protection
13302e326c07SAnshuman Khandual * value and the given pfn. Hence just keep them out from
13312e326c07SAnshuman Khandual * the above iteration for now to save some test execution
13322e326c07SAnshuman Khandual * time.
13332e326c07SAnshuman Khandual */
133436b77d1eSGavin Shan p4d_basic_tests(&args);
133536b77d1eSGavin Shan pgd_basic_tests(&args);
1336399145f9SAnshuman Khandual
13378983d231SGavin Shan pmd_leaf_tests(&args);
13388983d231SGavin Shan pud_leaf_tests(&args);
1339a5c3b9ffSAnshuman Khandual
13408cb183f2SGavin Shan pte_special_tests(&args);
13418cb183f2SGavin Shan pte_protnone_tests(&args);
13428cb183f2SGavin Shan pmd_protnone_tests(&args);
134305289402SAnshuman Khandual
13448cb183f2SGavin Shan pte_devmap_tests(&args);
13458cb183f2SGavin Shan pmd_devmap_tests(&args);
13468cb183f2SGavin Shan pud_devmap_tests(&args);
134705289402SAnshuman Khandual
13485f447e80SGavin Shan pte_soft_dirty_tests(&args);
13495f447e80SGavin Shan pmd_soft_dirty_tests(&args);
13505f447e80SGavin Shan pte_swap_soft_dirty_tests(&args);
13515f447e80SGavin Shan pmd_swap_soft_dirty_tests(&args);
135205289402SAnshuman Khandual
1353210d1e8aSDavid Hildenbrand pte_swap_exclusive_tests(&args);
1354210d1e8aSDavid Hildenbrand
13555f447e80SGavin Shan pte_swap_tests(&args);
13565f447e80SGavin Shan pmd_swap_tests(&args);
135705289402SAnshuman Khandual
13584878a888SGavin Shan swap_migration_tests(&args);
135905289402SAnshuman Khandual
13604878a888SGavin Shan pmd_thp_tests(&args);
13614878a888SGavin Shan pud_thp_tests(&args);
136205289402SAnshuman Khandual
136336b77d1eSGavin Shan hugetlb_basic_tests(&args);
1364e8edf0adSAneesh Kumar K.V
13656f302e27SAneesh Kumar K.V /*
13666f302e27SAneesh Kumar K.V * Page table modifying tests. They need to hold
13676f302e27SAneesh Kumar K.V * proper page table lock.
13686f302e27SAneesh Kumar K.V */
1369e8edf0adSAneesh Kumar K.V
137044966c44SGavin Shan args.ptep = pte_offset_map_lock(args.mm, args.pmdp, args.vaddr, &ptl);
137144966c44SGavin Shan pte_clear_tests(&args);
137244966c44SGavin Shan pte_advanced_tests(&args);
13739f2bad09SHugh Dickins if (args.ptep)
137444966c44SGavin Shan pte_unmap_unlock(args.ptep, ptl);
1375e8edf0adSAneesh Kumar K.V
1376c0fe07b0SGavin Shan ptl = pmd_lock(args.mm, args.pmdp);
1377c0fe07b0SGavin Shan pmd_clear_tests(&args);
1378c0fe07b0SGavin Shan pmd_advanced_tests(&args);
1379c0fe07b0SGavin Shan pmd_huge_tests(&args);
1380c0fe07b0SGavin Shan pmd_populate_tests(&args);
13816f302e27SAneesh Kumar K.V spin_unlock(ptl);
13826f302e27SAneesh Kumar K.V
13834cbde03bSGavin Shan ptl = pud_lock(args.mm, args.pudp);
13844cbde03bSGavin Shan pud_clear_tests(&args);
13854cbde03bSGavin Shan pud_advanced_tests(&args);
13864cbde03bSGavin Shan pud_huge_tests(&args);
13874cbde03bSGavin Shan pud_populate_tests(&args);
13886f302e27SAneesh Kumar K.V spin_unlock(ptl);
13896f302e27SAneesh Kumar K.V
13902f87f8c3SGavin Shan spin_lock(&(args.mm->page_table_lock));
13912f87f8c3SGavin Shan p4d_clear_tests(&args);
13922f87f8c3SGavin Shan pgd_clear_tests(&args);
13932f87f8c3SGavin Shan p4d_populate_tests(&args);
13942f87f8c3SGavin Shan pgd_populate_tests(&args);
13952f87f8c3SGavin Shan spin_unlock(&(args.mm->page_table_lock));
1396e8edf0adSAneesh Kumar K.V
13973c9b84f0SGavin Shan destroy_args(&args);
1398399145f9SAnshuman Khandual return 0;
1399399145f9SAnshuman Khandual }
1400399145f9SAnshuman Khandual late_initcall(debug_vm_pgtable);
1401