Home
last modified time | relevance | path

Searched refs:init_mm (Results 1 – 25 of 179) sorted by relevance

12345678

/linux-6.15/mm/
H A Dinit-mm.c32 struct mm_struct init_mm = { variable
37 .write_protect_seq = SEQCNT_ZERO(init_mm.write_protect_seq),
38 MMAP_LOCK_INITIALIZER(init_mm)
40 .arg_lock = __SPIN_LOCK_UNLOCKED(init_mm.arg_lock),
41 .mmlist = LIST_HEAD_INIT(init_mm.mmlist),
44 .mm_lock_seq = SEQCNT_ZERO(init_mm.mm_lock_seq),
48 INIT_MM_CONTEXT(init_mm)
54 init_mm.start_code = (unsigned long)start_code; in setup_initial_init_mm()
55 init_mm.end_code = (unsigned long)end_code; in setup_initial_init_mm()
56 init_mm.end_data = (unsigned long)end_data; in setup_initial_init_mm()
[all …]
H A Dpage_table_check.c150 if (&init_mm == mm) in __page_table_check_pte_clear()
161 if (&init_mm == mm) in __page_table_check_pmd_clear()
172 if (&init_mm == mm) in __page_table_check_pud_clear()
201 if (&init_mm == mm) in __page_table_check_ptes_set()
223 if (&init_mm == mm) in __page_table_check_pmd_set()
238 if (&init_mm == mm) in __page_table_check_pud_set()
253 if (&init_mm == mm) in __page_table_check_pte_clear_range()
H A Dhugetlb_vmemmap.c59 pgtable = pte_alloc_one_kernel(&init_mm); in vmemmap_split_pmd()
71 set_pte_at(&init_mm, addr, pte, entry); in vmemmap_split_pmd()
74 spin_lock(&init_mm.page_table_lock); in vmemmap_split_pmd()
86 pmd_populate_kernel(&init_mm, pmd, pgtable); in vmemmap_split_pmd()
90 pte_free_kernel(&init_mm, pgtable); in vmemmap_split_pmd()
92 spin_unlock(&init_mm.page_table_lock); in vmemmap_split_pmd()
108 spin_lock(&init_mm.page_table_lock); in vmemmap_pmd_entry()
131 spin_unlock(&init_mm.page_table_lock); in vmemmap_pmd_entry()
168 mmap_read_lock(&init_mm); in vmemmap_remap_range()
171 mmap_read_unlock(&init_mm); in vmemmap_remap_range()
[all …]
/linux-6.15/mm/kasan/
H A Dinit.c119 pmd_populate_kernel(&init_mm, pmd, in zero_pmd_populate()
155 pud_populate(&init_mm, pud, in zero_pud_populate()
173 pud_populate(&init_mm, pud, p); in zero_pud_populate()
194 p4d_populate(&init_mm, p4d, in zero_p4d_populate()
197 pud_populate(&init_mm, pud, in zero_p4d_populate()
215 p4d_populate(&init_mm, p4d, p); in zero_p4d_populate()
254 pgd_populate(&init_mm, pgd, in kasan_populate_early_shadow()
257 p4d_populate(&init_mm, p4d, in kasan_populate_early_shadow()
260 pud_populate(&init_mm, pud, in kasan_populate_early_shadow()
276 pgd_populate(&init_mm, pgd, in kasan_populate_early_shadow()
[all …]
H A Dshadow.c316 spin_lock(&init_mm.page_table_lock); in kasan_populate_vmalloc_pte()
318 set_pte_at(&init_mm, addr, ptep, pte); in kasan_populate_vmalloc_pte()
321 spin_unlock(&init_mm.page_table_lock); in kasan_populate_vmalloc_pte()
373 ret = apply_to_page_range(&init_mm, start, nr_pages * PAGE_SIZE, in __kasan_populate_vmalloc()
468 spin_lock(&init_mm.page_table_lock); in kasan_depopulate_vmalloc_pte()
471 pte_clear(&init_mm, addr, ptep); in kasan_depopulate_vmalloc_pte()
474 spin_unlock(&init_mm.page_table_lock); in kasan_depopulate_vmalloc_pte()
593 apply_to_existing_page_range(&init_mm, in kasan_release_vmalloc()
/linux-6.15/arch/powerpc/mm/kasan/
H A Dinit_book3e_64.c45 p4d_populate(&init_mm, p4dp, pudp); in kasan_map_kernel_page()
51 pud_populate(&init_mm, pudp, pmdp); in kasan_map_kernel_page()
57 pmd_populate_kernel(&init_mm, pmdp, ptep); in kasan_map_kernel_page()
61 __set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, prot), 0); in kasan_map_kernel_page()
93 __set_pte_at(&init_mm, (unsigned long)kasan_early_shadow_page, in kasan_early_init()
97 pmd_populate_kernel(&init_mm, &kasan_early_shadow_pmd[i], in kasan_early_init()
101 pud_populate(&init_mm, &kasan_early_shadow_pud[i], in kasan_early_init()
105 p4d_populate(&init_mm, p4d_offset(pgd++, addr), kasan_early_shadow_pud); in kasan_early_init()
121 __set_pte_at(&init_mm, (unsigned long)kasan_early_shadow_page, in kasan_init()
H A Dinit_book3s_64.c68 __set_pte_at(&init_mm, (unsigned long)kasan_early_shadow_page, in kasan_init()
72 pmd_populate_kernel(&init_mm, &kasan_early_shadow_pmd[i], in kasan_init()
76 pud_populate(&init_mm, &kasan_early_shadow_pud[i], in kasan_init()
85 __set_pte_at(&init_mm, (unsigned long)kasan_early_shadow_page, in kasan_init()
H A Dinit_32.c28 __set_pte_at(&init_mm, va, ptep, pfn_pte(PHYS_PFN(pa), prot), 1); in kasan_populate_pte()
50 pmd_populate_kernel(&init_mm, pmd, new); in kasan_init_shadow_page_tables()
77 __set_pte_at(&init_mm, k_cur, pte_offset_kernel(pmd, k_cur), pte, 0); in kasan_init_region()
95 __set_pte_at(&init_mm, k_cur, ptep, pte, 0); in kasan_update_early_region()
190 pmd_populate_kernel(&init_mm, pmd, kasan_early_shadow_pte); in kasan_early_init()
/linux-6.15/arch/openrisc/kernel/
H A Ddma.c74 mmap_write_lock(&init_mm); in arch_dma_set_uncached()
75 error = walk_page_range_novma(&init_mm, va, va + size, in arch_dma_set_uncached()
77 mmap_write_unlock(&init_mm); in arch_dma_set_uncached()
88 mmap_write_lock(&init_mm); in arch_dma_clear_uncached()
90 WARN_ON(walk_page_range_novma(&init_mm, va, va + size, in arch_dma_clear_uncached()
92 mmap_write_unlock(&init_mm); in arch_dma_clear_uncached()
/linux-6.15/arch/x86/mm/
H A Dinit_64.c254 pgd_populate(&init_mm, pgd, p4d); in fill_p4d()
266 p4d_populate(&init_mm, p4d, pud); in fill_pud()
278 pud_populate(&init_mm, pud, pmd); in fill_pmd()
578 spin_lock(&init_mm.page_table_lock); in phys_pmd_init()
665 spin_lock(&init_mm.page_table_lock); in phys_pud_init()
718 spin_lock(&init_mm.page_table_lock); in phys_p4d_init()
1040 spin_lock(&init_mm.page_table_lock); in free_pte_table()
1058 spin_lock(&init_mm.page_table_lock); in free_pmd_table()
1076 spin_lock(&init_mm.page_table_lock); in free_pud_table()
1111 pte_clear(&init_mm, addr, pte); in remove_pte_table()
[all …]
/linux-6.15/arch/powerpc/mm/nohash/
H A Dbook3e_pgtable.c86 pudp = pud_alloc(&init_mm, p4dp, ea); in map_kernel_page()
89 pmdp = pmd_alloc(&init_mm, pudp, ea); in map_kernel_page()
100 p4d_populate(&init_mm, p4dp, pudp); in map_kernel_page()
105 pud_populate(&init_mm, pudp, pmdp); in map_kernel_page()
110 pmd_populate_kernel(&init_mm, pmdp, ptep); in map_kernel_page()
114 set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, prot)); in map_kernel_page()
/linux-6.15/arch/powerpc/mm/book3s64/
H A Dradix_pgtable.c89 p4d_populate(&init_mm, p4dp, pudp); in early_map_kernel_page()
99 pud_populate(&init_mm, pudp, pmdp); in early_map_kernel_page()
155 pudp = pud_alloc(&init_mm, p4dp, ea); in __map_kernel_page()
486 init_mm.context.id = mmu_base_pid; in radix_init_pgtable()
718 pmd_free(&init_mm, pmd_start); in free_pmd_table()
733 pud_free(&init_mm, pud_start); in free_pud_table()
805 pte_clear(&init_mm, addr, pte); in remove_pte_table()
811 pte_clear(&init_mm, addr, pte); in remove_pte_table()
1644 pte_free_kernel(&init_mm, pte); in pud_free_pmd_page()
1648 pmd_free(&init_mm, pmd); in pud_free_pmd_page()
[all …]
/linux-6.15/arch/s390/boot/
H A Dvmem.c143 pgd_populate(&init_mm, pgd, kasan_early_shadow_p4d); in kasan_pgd_populate_zero_shadow()
154 p4d_populate(&init_mm, p4d, kasan_early_shadow_pud); in kasan_p4d_populate_zero_shadow()
165 pud_populate(&init_mm, pud, kasan_early_shadow_pmd); in kasan_pud_populate_zero_shadow()
389 pmd_populate(&init_mm, pmd, pte); in pgtable_pmd_populate()
421 pud_populate(&init_mm, pud, pmd); in pgtable_pud_populate()
445 p4d_populate(&init_mm, p4d, pud); in pgtable_p4d_populate()
464 pgd = pgd_offset(&init_mm, addr); in pgtable_populate()
471 pgd_populate(&init_mm, pgd, p4d); in pgtable_populate()
504 init_mm_pgd = init_mm.pgd; in setup_vmem()
505 init_mm.pgd = (pgd_t *)swapper_pg_dir; in setup_vmem()
[all …]
/linux-6.15/arch/arm64/include/asm/
H A Dpgalloc.h33 pudval |= (mm == &init_mm) ? PUD_TABLE_UXN : PUD_TABLE_PXN; in pud_populate()
55 p4dval |= (mm == &init_mm) ? P4D_TABLE_UXN : P4D_TABLE_PXN; in p4d_populate()
84 pgdval |= (mm == &init_mm) ? PGD_TABLE_UXN : PGD_TABLE_PXN; in pgd_populate()
111 VM_BUG_ON(mm && mm != &init_mm); in pmd_populate_kernel()
119 VM_BUG_ON(mm == &init_mm); in pmd_populate()
H A Dmmu_context.h108 if (mm != &init_mm && !system_uses_ttbr0_pan()) in cpu_uninstall_idmap()
118 cpu_switch_mm(lm_alias(idmap_pg_dir), &init_mm); in cpu_install_idmap()
218 if (mm == &init_mm) in update_saved_ttbr0()
240 update_saved_ttbr0(tsk, &init_mm); in enter_lazy_tlb()
249 if (next == &init_mm) { in __switch_mm()
/linux-6.15/arch/powerpc/mm/
H A Dpageattr.c22 return pte_update(&init_mm, addr, ptep, old & ~new, new & ~old, 0); in pte_update_delta()
56 pte_update(&init_mm, addr, ptep, _PAGE_PRESENT, 0, 0); in change_page_attr()
59 pte_update(&init_mm, addr, ptep, 0, _PAGE_PRESENT, 0); in change_page_attr()
102 return apply_to_existing_page_range(&init_mm, start, size, in change_memory_attr()
/linux-6.15/arch/arm/mm/
H A Dkasan_init.c86 set_pte_at(&init_mm, addr, ptep, entry); in kasan_pte_populate()
114 pmd_populate_kernel(&init_mm, pmdp, p); in kasan_pmd_populate()
146 pgd_populate(&init_mm, pgdp, p); in kasan_pgd_populate()
239 cpu_switch_mm(tmp_pgd_table, &init_mm); in kasan_init()
293 set_pte_at(&init_mm, KASAN_SHADOW_START + i*PAGE_SIZE, in kasan_init()
299 cpu_switch_mm(swapper_pg_dir, &init_mm); in kasan_init()
H A Didmap.c31 pmd = pmd_alloc_one(&init_mm, addr); in idmap_add_pmd()
43 pud_populate(&init_mm, pud, pmd); in idmap_add_pmd()
114 idmap_pgd = pgd_alloc(&init_mm); in init_static_idmap()
137 cpu_switch_mm(idmap_pgd, &init_mm); in setup_mm_for_reboot()
/linux-6.15/arch/s390/mm/
H A Dvmem.c87 page_table_free(&init_mm, table); in vmem_pte_free()
182 pte_clear(&init_mm, addr, pte); in modify_pte_table()
281 pmd_populate(&init_mm, pmd, pte); in modify_pmd_table()
348 pud_populate(&init_mm, pud, pmd); in modify_pud_table()
397 p4d_populate(&init_mm, p4d, pud); in modify_p4d_table()
448 pgd_populate(&init_mm, pgd, p4d); in modify_pagetable()
579 pgd_populate(&init_mm, pgd, p4d); in vmem_get_alloc_pte()
588 p4d_populate(&init_mm, p4d, pud); in vmem_get_alloc_pte()
597 pud_populate(&init_mm, pud, pmd); in vmem_get_alloc_pte()
608 pmd_populate(&init_mm, pmd, pte); in vmem_get_alloc_pte()
[all …]
/linux-6.15/arch/x86/xen/
H A Dgrant-table.c45 set_pte_at(&init_mm, addr, gnttab_shared_vm_area.ptes[i], in arch_gnttab_map_shared()
67 set_pte_at(&init_mm, addr, gnttab_status_vm_area.ptes[i], in arch_gnttab_map_status()
89 set_pte_at(&init_mm, addr, ptes[i], __pte(0)); in arch_gnttab_unmap()
110 if (apply_to_page_range(&init_mm, (unsigned long)area->area->addr, in arch_gnttab_valloc()
/linux-6.15/Documentation/translations/zh_CN/mm/
H A Dactive_mm.rst72 另外,一个新的规则是,**没有人** 再把 “init_mm” 作为一个真正的MM了。
73init_mm”应该被认为只是一个 “没有其他上下文时的lazy上下文”,事实上,它主
76 if (current->mm == &init_mm)
/linux-6.15/arch/loongarch/mm/
H A Dinit.c127 set_pmd_at(&init_mm, addr, pmd, entry); in vmemmap_set_pmd()
167 p4d_populate(&init_mm, p4d, pud); in populate_kernel_pte()
176 pud_populate(&init_mm, pud, pmd); in populate_kernel_pte()
187 pmd_populate_kernel(&init_mm, pmd, pte); in populate_kernel_pte()
211 pte_clear(&init_mm, addr, ptep); in __set_fixmap()
/linux-6.15/arch/nios2/mm/
H A Dioremap.c84 dir = pgd_offset(&init_mm, address); in remap_area_pages()
94 p4d = p4d_alloc(&init_mm, dir, address); in remap_area_pages()
97 pud = pud_alloc(&init_mm, p4d, address); in remap_area_pages()
100 pmd = pmd_alloc(&init_mm, pud, address); in remap_area_pages()
/linux-6.15/arch/riscv/mm/
H A Dpageattr.c278 mmap_write_lock(&init_mm); in __set_memory()
302 ret = walk_page_range_novma(&init_mm, lm_start, lm_end, in __set_memory()
320 ret = walk_page_range_novma(&init_mm, lm_start, lm_end, in __set_memory()
326 ret = walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL, in __set_memory()
330 mmap_write_unlock(&init_mm); in __set_memory()
338 ret = walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL, in __set_memory()
341 mmap_write_unlock(&init_mm); in __set_memory()
429 apply_to_existing_page_range(&init_mm, start, size, debug_pagealloc_set_page, &enable); in __kernel_map_pages()
/linux-6.15/arch/x86/kernel/
H A Despfix_64.c115 p4d = p4d_alloc(&init_mm, pgd, ESPFIX_BASE_ADDR); in init_espfix_bsp()
116 p4d_populate(&init_mm, p4d, espfix_pud_page); in init_espfix_bsp()
169 paravirt_alloc_pmd(&init_mm, __pa(pmd_p) >> PAGE_SHIFT); in init_espfix_ap()
181 paravirt_alloc_pte(&init_mm, __pa(pte_p) >> PAGE_SHIFT); in init_espfix_ap()

12345678