| /linux-6.15/drivers/dma-buf/ |
| H A D | dma-buf.c | 808 attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, in __map_dma_buf() 918 attach = kzalloc(sizeof(*attach), GFP_KERNEL); in dma_buf_dynamic_attach() 930 ret = dmabuf->ops->attach(dmabuf, attach); in dma_buf_dynamic_attach() 1026 __unmap_dma_buf(attach, attach->sgt, attach->dir); in dma_buf_detach() 1122 if (WARN_ON(!attach || !attach->dmabuf)) in dma_buf_map_attachment() 1141 r = attach->dmabuf->ops->pin(attach); in dma_buf_map_attachment() 1153 attach->dmabuf->ops->unpin(attach); in dma_buf_map_attachment() 1198 if (WARN_ON(!attach || !attach->dmabuf)) in dma_buf_map_attachment_unlocked() 1225 if (WARN_ON(!attach || !attach->dmabuf || !sg_table)) in dma_buf_unmap_attachment() 1257 if (WARN_ON(!attach || !attach->dmabuf || !sg_table)) in dma_buf_unmap_attachment_unlocked() [all …]
|
| /linux-6.15/drivers/gpu/drm/virtio/ |
| H A D | virtgpu_prime.c | 79 .attach = virtio_dma_buf_attach, 210 if (attach) { in virtgpu_dma_buf_free_obj() 217 dma_buf_detach(dmabuf, attach); in virtgpu_dma_buf_free_obj() 248 ret = dma_buf_pin(attach); in virtgpu_dma_buf_init_obj() 259 params.size = attach->dmabuf->size; in virtgpu_dma_buf_init_obj() 265 dma_buf_unpin(attach); in virtgpu_dma_buf_init_obj() 271 dma_buf_unpin(attach); in virtgpu_dma_buf_init_obj() 299 struct dma_buf_attachment *attach; in virtgpu_gem_prime_import() local 330 if (IS_ERR(attach)) { in virtgpu_gem_prime_import() 332 return ERR_CAST(attach); in virtgpu_gem_prime_import() [all …]
|
| /linux-6.15/drivers/gpu/drm/xe/ |
| H A D | xe_dma_buf.c | 26 struct dma_buf_attachment *attach) in xe_dma_buf_attach() argument 30 if (attach->peer2peer && in xe_dma_buf_attach() 32 attach->peer2peer = false; in xe_dma_buf_attach() 42 struct dma_buf_attachment *attach) in xe_dma_buf_detach() argument 92 struct dma_buf *dma_buf = attach->dmabuf; in xe_dma_buf_map() 102 if (!attach->peer2peer) in xe_dma_buf_map() 177 .attach = xe_dma_buf_attach, 264 struct dma_buf_attachment *attach; in xe_gem_prime_import() local 297 if (IS_ERR(attach)) { in xe_gem_prime_import() 298 obj = ERR_CAST(attach); in xe_gem_prime_import() [all …]
|
| /linux-6.15/drivers/gpu/drm/amd/amdgpu/ |
| H A D | amdgpu_dma_buf.c | 78 struct dma_buf_attachment *attach) in amdgpu_dma_buf_attach() argument 87 attach->peer2peer = false; in amdgpu_dma_buf_attach() 103 struct dma_buf *dmabuf = attach->dmabuf; in amdgpu_dma_buf_pin() 121 if (!attach->peer2peer) in amdgpu_dma_buf_pin() 178 attach->peer2peer) { in amdgpu_dma_buf_map() 204 dma_buf_attach_adev(attach), bo))) in amdgpu_dma_buf_map() 289 .attach = amdgpu_dma_buf_attach, 467 struct dma_buf_attachment *attach; in amdgpu_gem_prime_import() local 488 if (IS_ERR(attach)) { in amdgpu_gem_prime_import() 490 return ERR_CAST(attach); in amdgpu_gem_prime_import() [all …]
|
| /linux-6.15/drivers/infiniband/core/ |
| H A D | umem_dmabuf.c | 24 dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv); in ib_umem_dmabuf_map_pages() 32 sgt = dma_buf_map_attachment(umem_dmabuf->attach, in ib_umem_dmabuf_map_pages() 158 umem_dmabuf->attach = dma_buf_dynamic_attach( in ib_umem_dmabuf_get_with_dma_device() 163 if (IS_ERR(umem_dmabuf->attach)) { in ib_umem_dmabuf_get_with_dma_device() 164 ret = ERR_CAST(umem_dmabuf->attach); in ib_umem_dmabuf_get_with_dma_device() 217 err = dma_buf_pin(umem_dmabuf->attach); in ib_umem_dmabuf_get_pinned_with_dma_device() 225 dma_resv_unlock(umem_dmabuf->attach->dmabuf->resv); in ib_umem_dmabuf_get_pinned_with_dma_device() 230 dma_buf_unpin(umem_dmabuf->attach); in ib_umem_dmabuf_get_pinned_with_dma_device() 232 dma_resv_unlock(umem_dmabuf->attach->dmabuf->resv); in ib_umem_dmabuf_get_pinned_with_dma_device() 257 dma_buf_unpin(umem_dmabuf->attach); in ib_umem_dmabuf_revoke() [all …]
|
| /linux-6.15/drivers/media/common/videobuf2/ |
| H A D | videobuf2-vmalloc.c | 224 attach = kzalloc(sizeof(*attach), GFP_KERNEL); in vb2_vmalloc_dmabuf_ops_attach() 225 if (!attach) in vb2_vmalloc_dmabuf_ops_attach() 228 sgt = &attach->sgt; in vb2_vmalloc_dmabuf_ops_attach() 231 kfree(attach); in vb2_vmalloc_dmabuf_ops_attach() 239 kfree(attach); in vb2_vmalloc_dmabuf_ops_attach() 246 attach->dma_dir = DMA_NONE; in vb2_vmalloc_dmabuf_ops_attach() 257 if (!attach) in vb2_vmalloc_dmabuf_ops_detach() 260 sgt = &attach->sgt; in vb2_vmalloc_dmabuf_ops_detach() 266 kfree(attach); in vb2_vmalloc_dmabuf_ops_detach() 276 sgt = &attach->sgt; in vb2_vmalloc_dmabuf_ops_map() [all …]
|
| H A D | videobuf2-dma-sg.c | 378 attach = kzalloc(sizeof(*attach), GFP_KERNEL); in vb2_dma_sg_dmabuf_ops_attach() 379 if (!attach) in vb2_dma_sg_dmabuf_ops_attach() 382 sgt = &attach->sgt; in vb2_dma_sg_dmabuf_ops_attach() 388 kfree(attach); in vb2_dma_sg_dmabuf_ops_attach() 400 attach->dma_dir = DMA_NONE; in vb2_dma_sg_dmabuf_ops_attach() 401 dbuf_attach->priv = attach; in vb2_dma_sg_dmabuf_ops_attach() 412 if (!attach) in vb2_dma_sg_dmabuf_ops_detach() 415 sgt = &attach->sgt; in vb2_dma_sg_dmabuf_ops_detach() 421 kfree(attach); in vb2_dma_sg_dmabuf_ops_detach() 431 sgt = &attach->sgt; in vb2_dma_sg_dmabuf_ops_map() [all …]
|
| H A D | videobuf2-dma-contig.c | 327 attach = kzalloc(sizeof(*attach), GFP_KERNEL); in vb2_dc_dmabuf_ops_attach() 328 if (!attach) in vb2_dc_dmabuf_ops_attach() 331 sgt = &attach->sgt; in vb2_dc_dmabuf_ops_attach() 337 kfree(attach); in vb2_dc_dmabuf_ops_attach() 349 attach->dma_dir = DMA_NONE; in vb2_dc_dmabuf_ops_attach() 350 dbuf_attach->priv = attach; in vb2_dc_dmabuf_ops_attach() 361 if (!attach) in vb2_dc_dmabuf_ops_detach() 364 sgt = &attach->sgt; in vb2_dc_dmabuf_ops_detach() 377 kfree(attach); in vb2_dc_dmabuf_ops_detach() 387 sgt = &attach->sgt; in vb2_dc_dmabuf_ops_map() [all …]
|
| /linux-6.15/include/linux/ |
| H A D | dma-buf.h | 72 int (*attach)(struct dma_buf *, struct dma_buf_attachment *); member 109 int (*pin)(struct dma_buf_attachment *attach); 122 void (*unpin)(struct dma_buf_attachment *attach); 488 void (*move_notify)(struct dma_buf_attachment *attach); 595 dma_buf_attachment_is_dynamic(struct dma_buf_attachment *attach) in dma_buf_attachment_is_dynamic() argument 597 return !!attach->importer_ops; in dma_buf_attachment_is_dynamic() 607 struct dma_buf_attachment *attach); 608 int dma_buf_pin(struct dma_buf_attachment *attach); 609 void dma_buf_unpin(struct dma_buf_attachment *attach); 627 dma_buf_map_attachment_unlocked(struct dma_buf_attachment *attach, [all …]
|
| /linux-6.15/drivers/xen/ |
| H A D | gntdev-dmabuf.c | 48 struct dma_buf_attachment *attach; member 253 attach->priv = NULL; in dmabuf_exp_ops_detach() 266 attach->dev); in dmabuf_exp_ops_map_dma_buf() 340 .attach = dmabuf_exp_ops_attach, 591 if (IS_ERR(attach)) { in dmabuf_imp_to_refs() 592 ret = ERR_CAST(attach); in dmabuf_imp_to_refs() 596 gntdev_dmabuf->u.imp.attach = attach; in dmabuf_imp_to_refs() 663 dma_buf_detach(dma_buf, attach); in dmabuf_imp_to_refs() 708 attach = gntdev_dmabuf->u.imp.attach; in dmabuf_imp_release() 713 dma_buf = attach->dmabuf; in dmabuf_imp_release() [all …]
|
| /linux-6.15/drivers/iommu/iommufd/ |
| H A D | device.c | 313 if (attach) in iommufd_group_device_num() 421 if (attach && attach->hwpt && !attach->hwpt->pasid_compat) in iommufd_hwpt_pasid_compat() 572 if (!attach) { in iommufd_hw_pagetable_attach() 573 attach = kzalloc(sizeof(*attach), GFP_KERNEL); in iommufd_hw_pagetable_attach() 574 if (!attach) { in iommufd_hw_pagetable_attach() 628 kfree(attach); in iommufd_hw_pagetable_attach() 647 if (!attach) { in iommufd_hw_pagetable_detach() 652 hwpt = attach->hwpt; in iommufd_hw_pagetable_detach() 659 kfree(attach); in iommufd_hw_pagetable_detach() 745 if (!attach) { in iommufd_device_do_replace() [all …]
|
| /linux-6.15/drivers/vfio/ |
| H A D | device_cdev.c | 165 struct vfio_device_attach_iommufd_pt attach; in vfio_df_ioctl_attach_pt() local 172 if (copy_from_user(&attach, arg, minsz)) in vfio_df_ioctl_attach_pt() 175 if (attach.argsz < minsz) in vfio_df_ioctl_attach_pt() 178 if (attach.flags & ~VFIO_DEVICE_ATTACH_PASID) in vfio_df_ioctl_attach_pt() 181 if (attach.flags & VFIO_DEVICE_ATTACH_PASID) { in vfio_df_ioctl_attach_pt() 188 if (attach.argsz < xend) in vfio_df_ioctl_attach_pt() 191 if (copy_from_user((void *)&attach + minsz, in vfio_df_ioctl_attach_pt() 197 if (attach.flags & VFIO_DEVICE_ATTACH_PASID) in vfio_df_ioctl_attach_pt() 199 attach.pasid, in vfio_df_ioctl_attach_pt() 200 &attach.pt_id); in vfio_df_ioctl_attach_pt() [all …]
|
| /linux-6.15/drivers/gpu/drm/omapdrm/ |
| H A D | omap_gem_dmabuf.c | 100 struct dma_buf_attachment *attach; in omap_gem_prime_import() local 117 attach = dma_buf_attach(dma_buf, dev->dev); in omap_gem_prime_import() 118 if (IS_ERR(attach)) in omap_gem_prime_import() 119 return ERR_CAST(attach); in omap_gem_prime_import() 123 sgt = dma_buf_map_attachment_unlocked(attach, DMA_TO_DEVICE); in omap_gem_prime_import() 135 obj->import_attach = attach; in omap_gem_prime_import() 140 dma_buf_unmap_attachment_unlocked(attach, sgt, DMA_TO_DEVICE); in omap_gem_prime_import() 142 dma_buf_detach(dma_buf, attach); in omap_gem_prime_import()
|
| /linux-6.15/drivers/gpu/drm/i915/gem/ |
| H A D | i915_gem_dmabuf.c | 31 struct drm_i915_gem_object *obj = dma_buf_to_obj(attach->dmabuf); in i915_gem_map_dma_buf() 56 ret = dma_map_sgtable(attach->dev, sgt, dir, DMA_ATTR_SKIP_CPU_SYNC); in i915_gem_map_dma_buf() 168 struct dma_buf_attachment *attach) in i915_gem_dmabuf_attach() argument 197 struct dma_buf_attachment *attach) in i915_gem_dmabuf_detach() argument 205 .attach = i915_gem_dmabuf_attach, 286 struct dma_buf_attachment *attach; in i915_gem_prime_import() local 308 attach = dma_buf_attach(dma_buf, dev->dev); in i915_gem_prime_import() 309 if (IS_ERR(attach)) in i915_gem_prime_import() 310 return ERR_CAST(attach); in i915_gem_prime_import() 323 obj->base.import_attach = attach; in i915_gem_prime_import() [all …]
|
| /linux-6.15/drivers/gpu/drm/ |
| H A D | drm_prime.c | 625 struct dma_buf_attachment *attach) in drm_gem_map_detach() argument 808 .attach = drm_gem_map_attach, 932 struct dma_buf_attachment *attach; in drm_gem_prime_import_dev() local 953 if (IS_ERR(attach)) in drm_gem_prime_import_dev() 954 return ERR_CAST(attach); in drm_gem_prime_import_dev() 970 obj->import_attach = attach; in drm_gem_prime_import_dev() 978 dma_buf_detach(dma_buf, attach); in drm_gem_prime_import_dev() 1069 struct dma_buf_attachment *attach; in drm_prime_gem_destroy() local 1072 attach = obj->import_attach; in drm_prime_gem_destroy() 1075 dma_buf = attach->dmabuf; in drm_prime_gem_destroy() [all …]
|
| /linux-6.15/Documentation/bpf/libbpf/ |
| H A D | program_types.rst | 8 The table below lists the program types, their attach types where relevant and the ELF section 15 When ``extras`` are specified, they provide details of how to auto-attach the BPF program. The 216 .. [#fentry] The ``fentry`` attach format is ``fentry[.s]/<function>``. 217 .. [#kprobe] The ``kprobe`` attach format is ``kprobe/<function>[+<offset>]``. Valid 220 .. [#ksyscall] The ``ksyscall`` attach format is ``ksyscall/<syscall>``. 221 .. [#uprobe] The ``uprobe`` attach format is ``uprobe[.s]/<path>:<function>[+<offset>]``. 222 .. [#usdt] The ``usdt`` attach format is ``usdt/<path>:<provider>:<name>``. 227 .. [#rawtp] The ``raw_tp`` attach format is ``raw_tracepoint[.w]/<tracepoint>``. 228 .. [#tc_legacy] The ``tc``, ``classifier`` and ``action`` attach types are deprecated, use 234 .. [#tp] The ``tracepoint`` attach format is ``tracepoint/<category>/<name>``. [all …]
|
| /linux-6.15/drivers/gpu/drm/tegra/ |
| H A D | gem.c | 82 map->attach = dma_buf_attach(buf, dev); in tegra_bo_pin() 83 if (IS_ERR(map->attach)) { in tegra_bo_pin() 84 err = PTR_ERR(map->attach); in tegra_bo_pin() 90 dma_buf_detach(buf, map->attach); in tegra_bo_pin() 163 if (map->attach) { in tegra_bo_unpin() 166 dma_buf_detach(map->attach->dmabuf, map->attach); in tegra_bo_unpin() 460 struct dma_buf_attachment *attach; in tegra_bo_import() local 474 if (IS_ERR(attach)) { in tegra_bo_import() 475 err = PTR_ERR(attach); in tegra_bo_import() 489 bo->gem.import_attach = attach; in tegra_bo_import() [all …]
|
| /linux-6.15/drivers/gpu/drm/vmwgfx/ |
| H A D | vmwgfx_prime.c | 45 struct dma_buf_attachment *attach) in vmw_prime_map_attach() argument 51 struct dma_buf_attachment *attach) in vmw_prime_map_detach() argument 55 static struct sg_table *vmw_prime_map_dma_buf(struct dma_buf_attachment *attach, in vmw_prime_map_dma_buf() argument 61 static void vmw_prime_unmap_dma_buf(struct dma_buf_attachment *attach, in vmw_prime_unmap_dma_buf() argument 68 .attach = vmw_prime_map_attach,
|
| /linux-6.15/tools/bpf/bpftool/Documentation/ |
| H A D | bpftool-cgroup.rst | 22 { **show** | **list** | **tree** | **attach** | **detach** | **help** } 29 | **bpftool** **cgroup attach** *CGROUP* *ATTACH_TYPE* *PROG* [*ATTACH_FLAGS*] 55 Output will start with program ID followed by attach type, attach flags and 67 with absolute cgroup path, followed by program ID, attach type, attach 74 bpftool cgroup attach *CGROUP* *ATTACH_TYPE* *PROG* [*ATTACH_FLAGS*] 75 Attach program *PROG* to the cgroup *CGROUP* with attach type *ATTACH_TYPE* 83 Only one program is allowed to be attached to a cgroup with no attach flags 85 program and attach the new one. 125 Detach *PROG* from the cgroup *CGROUP* and attach type *ATTACH_TYPE*. 143 | **# bpftool cgroup attach /sys/fs/cgroup/test.slice/ device id 1 allow_multi**
|
| H A D | bpftool-net.rst | 21 *COMMANDS* := { **show** | **list** | **attach** | **detach** | **help** } 27 | **bpftool** **net attach** *ATTACH_TYPE* *PROG* **dev** *NAME* [ **overwrite** ] 61 bpftool net attach *ATTACH_TYPE* *PROG* dev *NAME* [ overwrite ] 78 for attach must be specified. Currently, only XDP-related modes are 157 | **# bpftool net attach xdpdrv id 16 dev enp6s0np0** 166 | **# bpftool net attach xdpdrv id 16 dev enp6s0np0** 167 | **# bpftool net attach xdpdrv id 20 dev enp6s0np0 overwrite** 176 | **# bpftool net attach xdpdrv id 16 dev enp6s0np0** 185 | **# bpftool net attach tcx_ingress name tc_prog dev lo** 195 | **# bpftool net attach tcx_ingress name tc_prog dev lo**
|
| /linux-6.15/drivers/virtio/ |
| H A D | virtio_dma_buf.c | 28 exp_info->ops->attach != &virtio_dma_buf_attach || in virtio_dma_buf_export() 41 struct dma_buf_attachment *attach) in virtio_dma_buf_attach() argument 49 ret = ops->device_attach(dma_buf, attach); in virtio_dma_buf_attach() 63 return dma_buf->ops->attach == &virtio_dma_buf_attach; in is_virtio_dma_buf()
|
| /linux-6.15/drivers/gpu/drm/armada/ |
| H A D | armada_gem.c | 389 struct drm_gem_object *obj = attach->dmabuf->priv; in armada_gem_prime_map_dma_buf() 419 if (dma_map_sgtable(attach->dev, sgt, dir, 0)) in armada_gem_prime_map_dma_buf() 428 if (dma_map_sgtable(attach->dev, sgt, dir, 0)) in armada_gem_prime_map_dma_buf() 455 struct drm_gem_object *obj = attach->dmabuf->priv; in armada_gem_prime_unmap_dma_buf() 460 dma_unmap_sgtable(attach->dev, sgt, dir, 0); in armada_gem_prime_unmap_dma_buf() 502 struct dma_buf_attachment *attach; in armada_gem_prime_import() local 517 attach = dma_buf_attach(buf, dev->dev); in armada_gem_prime_import() 518 if (IS_ERR(attach)) in armada_gem_prime_import() 519 return ERR_CAST(attach); in armada_gem_prime_import() 523 dma_buf_detach(buf, attach); in armada_gem_prime_import() [all …]
|
| /linux-6.15/Documentation/bpf/ |
| H A D | map_cgroup_storage.rst | 10 attach to cgroups; the programs are made available by the same Kconfig. The 34 ``attach_type`` is the program's attach type. 37 When this key type is used, then all attach types of the particular cgroup and 39 ``struct bpf_cgroup_storage_key``, then programs of different attach types 133 multiple attach types, and each attach creates a fresh zeroed storage. The 145 does not already contain an entry for the cgroup and attach type pair, or else 146 the old storage is reused for the new attachment. If the map is attach type 147 shared, then attach type is simply ignored during comparison. Storage is freed 158 In all versions, userspace may use the attach parameters of cgroup and 161 attach type shared storages, only the first value in the struct, cgroup inode [all …]
|
| /linux-6.15/drivers/iio/ |
| H A D | industrialio-buffer.c | 53 struct dma_buf_attachment *attach; member 1562 struct dma_buf_attachment *attach = priv->attach; in iio_buffer_dmabuf_release() local 1572 dma_buf_detach(attach->dmabuf, attach); in iio_buffer_dmabuf_release() 1640 attach = priv->attach; in iio_buffer_find_attachment() 1645 if (attach) in iio_buffer_find_attachment() 1683 if (IS_ERR(attach)) { in iio_buffer_attach_dmabuf() 1684 err = PTR_ERR(attach); in iio_buffer_attach_dmabuf() 1704 priv->attach = attach; in iio_buffer_attach_dmabuf() 1849 if (IS_ERR(attach)) { in iio_buffer_enqueue_dmabuf() 1850 ret = PTR_ERR(attach); in iio_buffer_enqueue_dmabuf() [all …]
|
| /linux-6.15/include/drm/ |
| H A D | drm_prime.h | 81 struct dma_buf_attachment *attach); 83 struct dma_buf_attachment *attach); 84 struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach, 86 void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
|