Home
last modified time | relevance | path

Searched refs:pgt (Results 1 – 25 of 31) sorted by relevance

12

/linux-6.15/drivers/net/ethernet/mellanox/mlxsw/
H A Dspectrum_pgt.c32 mutex_lock(&mlxsw_sp->pgt->lock); in mlxsw_sp_pgt_mid_alloc()
42 mutex_unlock(&mlxsw_sp->pgt->lock); in mlxsw_sp_pgt_mid_alloc()
52 mutex_lock(&mlxsw_sp->pgt->lock); in mlxsw_sp_pgt_mid_free()
63 mutex_lock(&mlxsw_sp->pgt->lock); in mlxsw_sp_pgt_mid_alloc_range()
313 struct mlxsw_sp_pgt *pgt; in mlxsw_sp_pgt_init() local
318 pgt = kzalloc(sizeof(*mlxsw_sp->pgt), GFP_KERNEL); in mlxsw_sp_pgt_init()
319 if (!pgt) in mlxsw_sp_pgt_init()
322 idr_init(&pgt->pgt_idr); in mlxsw_sp_pgt_init()
324 mutex_init(&pgt->lock); in mlxsw_sp_pgt_init()
326 mlxsw_sp->pgt = pgt; in mlxsw_sp_pgt_init()
[all …]
/linux-6.15/arch/arm64/kvm/hyp/
H A Dpgtable.c260 if (!pgt->pgd) in _kvm_pgtable_walk()
529 if (!pgt->pgd) in kvm_pgtable_hyp_init()
535 pgt->mmu = NULL; in kvm_pgtable_hyp_init()
564 WARN_ON(kvm_pgtable_walk(pgt, 0, BIT(pgt->ia_bits), &walker)); in kvm_pgtable_hyp_destroy()
565 pgt->mm_ops->put_page(kvm_dereference_pteref(&walker, pgt->pgd)); in kvm_pgtable_hyp_destroy()
566 pgt->pgd = NULL; in kvm_pgtable_hyp_destroy()
907 struct kvm_pgtable *pgt = data->mmu->pgt; in stage2_map_walker_try_leaf() local
1143 .arg = pgt, in kvm_pgtable_stage2_unmap()
1341 .arg = pgt, in kvm_pgtable_stage2_flush()
1520 if (!pgt->pgd) in __kvm_pgtable_stage2_init()
[all …]
/linux-6.15/arch/arm64/kvm/
H A Dpkvm.c326 pgt->pkvm_mappings = RB_ROOT; in pkvm_pgtable_stage2_init()
327 pgt->mmu = mmu; in pkvm_pgtable_stage2_init()
334 struct kvm *kvm = kvm_s2_mmu_to_kvm(pgt->mmu); in pkvm_pgtable_stage2_destroy()
342 node = rb_first(&pgt->pkvm_mappings); in pkvm_pgtable_stage2_destroy()
347 rb_erase(&mapping->node, &pgt->pkvm_mappings); in pkvm_pgtable_stage2_destroy()
356 struct kvm *kvm = kvm_s2_mmu_to_kvm(pgt->mmu); in pkvm_pgtable_stage2_map()
384 struct kvm *kvm = kvm_s2_mmu_to_kvm(pgt->mmu); in pkvm_pgtable_stage2_unmap()
394 rb_erase(&mapping->node, &pgt->pkvm_mappings); in pkvm_pgtable_stage2_unmap()
403 struct kvm *kvm = kvm_s2_mmu_to_kvm(pgt->mmu); in pkvm_pgtable_stage2_wrprotect()
420 struct kvm *kvm = kvm_s2_mmu_to_kvm(pgt->mmu); in pkvm_pgtable_stage2_flush()
[all …]
H A Dmmu.c71 struct kvm_pgtable *pgt = mmu->pgt; in stage2_apply_range() local
72 if (!pgt) in stage2_apply_range()
150 pgt = kvm->arch.mmu.pgt; in kvm_mmu_split_huge_pages()
151 if (!pgt) in kvm_mmu_split_huge_pages()
953 pgt = kzalloc(sizeof(*pgt), GFP_KERNEL_ACCOUNT); in kvm_init_stage2_mmu()
954 if (!pgt) in kvm_init_stage2_mmu()
962 mmu->pgt = pgt; in kvm_init_stage2_mmu()
1075 pgt = mmu->pgt; in kvm_free_stage2_pgd()
1076 if (pgt) { in kvm_free_stage2_pgd()
1151 struct kvm_pgtable *pgt = mmu->pgt; in kvm_phys_addr_ioremap() local
[all …]
H A Dptdump.c100 struct kvm_pgtable *pgtable = mmu->pgt; in kvm_ptdump_parser_create()
145 ret = kvm_pgtable_walk(mmu->pgt, 0, BIT(mmu->pgt->ia_bits), &walker); in kvm_ptdump_guest_show()
220 pgtable = kvm->arch.mmu.pgt; in kvm_pgtable_debugfs_open()
H A Dnested.c79 kvm->arch.nested_mmus[i].pgt->mmu = &kvm->arch.nested_mmus[i]; in kvm_vcpu_init_nested()
462 if (kvm_pgtable_get_leaf(mmu->pgt, tmp, &pte, NULL)) in get_guest_mapping_ttl()
/linux-6.15/arch/arm64/include/asm/
H A Dkvm_pgtable.h439 int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits,
449 void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt);
492 u64 kvm_pgtable_hyp_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size);
537 return __kvm_pgtable_stage2_init(pgt, mmu, mm_ops, 0, NULL); in kvm_pgtable_stage2_init()
547 void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt);
581 kvm_pte_t *kvm_pgtable_stage2_create_unlinked(struct kvm_pgtable *pgt,
615 int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
687 void kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr,
730 int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr,
789 int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size,
[all …]
H A Dkvm_pkvm.h172 int pkvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
174 void pkvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt);
175 int pkvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys,
178 int pkvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size);
179 int pkvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size);
180 int pkvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size);
181 bool pkvm_pgtable_stage2_test_clear_young(struct kvm_pgtable *pgt, u64 addr, u64 size, bool mkold);
182 int pkvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr, enum kvm_pgtable_prot prot,
184 void pkvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr,
186 int pkvm_pgtable_stage2_split(struct kvm_pgtable *pgt, u64 addr, u64 size,
[all …]
/linux-6.15/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/
H A Dvmm.c31 if (pgt) { in nvkm_vmm_pt_del()
32 kvfree(pgt->pde); in nvkm_vmm_pt_del()
33 kfree(pgt); in nvkm_vmm_pt_del()
56 if (!(pgt = kzalloc(sizeof(*pgt) + lpte, GFP_KERNEL))) in nvkm_vmm_pt_new()
62 pgt->pde = kvcalloc(pten, sizeof(*pgt->pde), GFP_KERNEL); in nvkm_vmm_pt_new()
63 if (!pgt->pde) { in nvkm_vmm_pt_new()
64 kfree(pgt); in nvkm_vmm_pt_new()
69 return pgt; in nvkm_vmm_pt_new()
216 if (!pgt->refs[0]) in nvkm_vmm_unref_sptes()
280 if (desc->type == SPT && (pgt->refs[0] || pgt->refs[1])) in nvkm_vmm_unref_ptes()
[all …]
H A Dvmmgp100.c238 struct nvkm_vmm_pt *pgt = pgd->pde[pdei]; in gp100_vmm_pd0_pde() local
242 if (pgt->pt[0] && !gp100_vmm_pde(pgt->pt[0], &data[0])) in gp100_vmm_pd0_pde()
244 if (pgt->pt[1] && !gp100_vmm_pde(pgt->pt[1], &data[1])) in gp100_vmm_pd0_pde()
365 struct nvkm_vmm_pt *pgt = pgd->pde[pdei]; in gp100_vmm_pd1_pde() local
369 if (!gp100_vmm_pde(pgt->pt[0], &data)) in gp100_vmm_pd1_pde()
H A Dvmmnv50.c106 nv50_vmm_pde(struct nvkm_vmm *vmm, struct nvkm_vmm_pt *pgt, u64 *pdata) in nv50_vmm_pde() argument
110 if (pgt && (pt = pgt->pt[0])) { in nv50_vmm_pde()
111 switch (pgt->page) { in nv50_vmm_pde()
H A Dvmmgf100.c108 struct nvkm_vmm_pt *pgt = pgd->pde[pdei]; in gf100_vmm_pgd_pde() local
113 if ((pt = pgt->pt[0])) { in gf100_vmm_pgd_pde()
127 if ((pt = pgt->pt[1])) { in gf100_vmm_pgd_pde()
/linux-6.15/arch/arm64/kvm/hyp/nvhe/
H A Dmem_protect.c151 ret = __kvm_pgtable_stage2_init(&host_mmu.pgt, mmu, in kvm_host_prepare_stage2()
157 mmu->pgd_phys = __hyp_pa(host_mmu.pgt.pgd); in kvm_host_prepare_stage2()
158 mmu->pgt = &host_mmu.pgt; in kvm_host_prepare_stage2()
264 vm->kvm.arch.mmu.pgd_phys = __hyp_pa(vm->pgt.pgd); in kvm_guest_prepare_stage2()
276 kvm_pgtable_stage2_destroy(&vm->pgt); in reclaim_pgtable_pages()
331 struct kvm_pgtable *pgt = &host_mmu.pgt; in host_stage2_unmap_dev_all() local
343 return kvm_pgtable_stage2_unmap(pgt, addr, BIT(pgt->ia_bits) - addr); in host_stage2_unmap_dev_all()
615 return kvm_pgtable_walk(pgt, addr, size, &walker); in check_page_state_range()
690 return check_page_state_range(&vm->pgt, addr, size, &d); in __guest_check_page_state_range()
950 ret = kvm_pgtable_get_leaf(&vm->pgt, ipa, &pte, &level); in __check_host_shared_guest()
[all …]
H A Dpkvm.c486 mmu->pgt = &hyp_vm->pgt; in insert_vm_table_entry()
/linux-6.15/drivers/gpu/drm/nouveau/nvkm/engine/dma/
H A Dusernv04.c52 struct nvkm_memory *pgt = in nv04_dmaobj_bind() local
55 return nvkm_gpuobj_wrap(pgt, pgpuobj); in nv04_dmaobj_bind()
56 nvkm_kmap(pgt); in nv04_dmaobj_bind()
57 offset = nvkm_ro32(pgt, 8 + (offset >> 10)); in nv04_dmaobj_bind()
59 nvkm_done(pgt); in nv04_dmaobj_bind()
/linux-6.15/drivers/firmware/efi/libstub/
H A Dx86-5lvl.c68 u64 *pgt = (void *)la57_toggle + PAGE_SIZE; in efi_5level_switch() local
81 new_cr3 = memset(pgt, 0, PAGE_SIZE); in efi_5level_switch()
89 new_cr3 = memcpy(pgt, new_cr3, PAGE_SIZE); in efi_5level_switch()
/linux-6.15/arch/s390/kvm/
H A Dgaccess.c1214 unsigned long *pgt, int *dat_protection, in kvm_s390_shadow_tables() argument
1270 *pgt = ptr + vaddr.rfx * 8; in kvm_s390_shadow_tables()
1298 *pgt = ptr + vaddr.rsx * 8; in kvm_s390_shadow_tables()
1327 *pgt = ptr + vaddr.rtx * 8; in kvm_s390_shadow_tables()
1365 *pgt = ptr + vaddr.sx * 8; in kvm_s390_shadow_tables()
1392 *pgt = ptr; in kvm_s390_shadow_tables()
1423 *pgt = pt_index & ~GMAP_SHADOW_FAKE_TABLE; in shadow_pgt_lookup()
1453 unsigned long pgt = 0; in kvm_s390_shadow_fault() local
1475 pte.val = pgt + vaddr.px * PAGE_SIZE; in kvm_s390_shadow_fault()
1484 pgt |= PEI_NOT_PTE; in kvm_s390_shadow_fault()
[all …]
/linux-6.15/arch/x86/kernel/acpi/
H A Dmadt_wakeup.c68 static void __init free_pgt_page(void *pgt, void *dummy) in free_pgt_page() argument
70 return memblock_free(pgt, PAGE_SIZE); in free_pgt_page()
/linux-6.15/arch/s390/mm/
H A Dgmap.c1222 unsigned long *pgt) in __gmap_unshadow_pgt() argument
1228 pgt[i] = _PAGE_INVALID; in __gmap_unshadow_pgt()
1241 phys_addr_t sto, pgt; in gmap_unshadow_pgt() local
1251 pgt = *ste & _SEGMENT_ENTRY_ORIGIN; in gmap_unshadow_pgt()
1253 __gmap_unshadow_pgt(sg, raddr, __va(pgt)); in gmap_unshadow_pgt()
1255 ptdesc = page_ptdesc(phys_to_page(pgt)); in gmap_unshadow_pgt()
1271 phys_addr_t pgt; in __gmap_unshadow_sgt() local
1278 pgt = sgt[i] & _REGION_ENTRY_ORIGIN; in __gmap_unshadow_sgt()
1280 __gmap_unshadow_pgt(sg, raddr, __va(pgt)); in __gmap_unshadow_sgt()
1282 ptdesc = page_ptdesc(phys_to_page(pgt)); in __gmap_unshadow_sgt()
[all …]
/linux-6.15/arch/powerpc/kvm/
H A Dbook3s_64_mmu_radix.c1288 pgd_t *pgt; in debugfs_radix_read() local
1326 pgt = NULL; in debugfs_radix_read()
1330 pgt = NULL; in debugfs_radix_read()
1340 if (!pgt) { in debugfs_radix_read()
1342 pgt = kvm->arch.pgtable; in debugfs_radix_read()
1349 pgt = nested->shadow_pgtable; in debugfs_radix_read()
1358 "pgdir: %lx\n", (unsigned long)pgt); in debugfs_radix_read()
1363 pgdp = pgt + pgd_index(gpa); in debugfs_radix_read()
/linux-6.15/arch/arm64/kvm/hyp/include/nvhe/
H A Dpkvm.h41 struct kvm_pgtable pgt; member
H A Dmem_protect.h20 struct kvm_pgtable pgt; member
/linux-6.15/arch/s390/include/asm/
H A Dgmap.h126 int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt,
/linux-6.15/arch/x86/events/intel/
H A Duncore_nhmex.c878 DEFINE_UNCORE_FORMAT_ATTR(pgt, pgt, "config1:0-31");
/linux-6.15/drivers/accel/habanalabs/common/mmu/
H A Dmmu.c953 u64 hl_mmu_hr_pte_phys_to_virt(struct hl_ctx *ctx, struct pgt_info *pgt, in hl_mmu_hr_pte_phys_to_virt() argument
959 return pgt->virt_addr + pte_offset; in hl_mmu_hr_pte_phys_to_virt()

12