xref: /linux-6.15/include/linux/io-mapping.h (revision 7eb16f23)
1775c8a3dSThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */
29663f2e6SKeith Packard /*
39663f2e6SKeith Packard  * Copyright © 2008 Keith Packard <[email protected]>
49663f2e6SKeith Packard  */
59663f2e6SKeith Packard 
69663f2e6SKeith Packard #ifndef _LINUX_IO_MAPPING_H
79663f2e6SKeith Packard #define _LINUX_IO_MAPPING_H
89663f2e6SKeith Packard 
99663f2e6SKeith Packard #include <linux/types.h>
105a0e3ad6STejun Heo #include <linux/slab.h>
11187f1882SPaul Gortmaker #include <linux/bug.h>
122584cf83SDan Williams #include <linux/io.h>
1365fddcfcSMike Rapoport #include <linux/pgtable.h>
149663f2e6SKeith Packard #include <asm/page.h>
159663f2e6SKeith Packard 
169663f2e6SKeith Packard /*
179663f2e6SKeith Packard  * The io_mapping mechanism provides an abstraction for mapping
189663f2e6SKeith Packard  * individual pages from an io device to the CPU in an efficient fashion.
199663f2e6SKeith Packard  *
207d3d3254SMauro Carvalho Chehab  * See Documentation/driver-api/io-mapping.rst
219663f2e6SKeith Packard  */
229663f2e6SKeith Packard 
234ab0d47dSVenkatesh Pallipadi struct io_mapping {
244ab0d47dSVenkatesh Pallipadi 	resource_size_t base;
254ab0d47dSVenkatesh Pallipadi 	unsigned long size;
264ab0d47dSVenkatesh Pallipadi 	pgprot_t prot;
27cafaf14aSChris Wilson 	void __iomem *iomem;
284ab0d47dSVenkatesh Pallipadi };
294ab0d47dSVenkatesh Pallipadi 
30cafaf14aSChris Wilson #ifdef CONFIG_HAVE_ATOMIC_IOMAP
31cafaf14aSChris Wilson 
322b755626SAndy Shevchenko #include <linux/pfn.h>
33cafaf14aSChris Wilson #include <asm/iomap.h>
34e5beae16SKeith Packard /*
35e5beae16SKeith Packard  * For small address space machines, mapping large objects
36e5beae16SKeith Packard  * into the kernel virtual space isn't practical. Where
37e5beae16SKeith Packard  * available, use fixmap support to dynamically map pages
38e5beae16SKeith Packard  * of the object at run time.
39e5beae16SKeith Packard  */
409663f2e6SKeith Packard 
419663f2e6SKeith Packard static inline struct io_mapping *
io_mapping_init_wc(struct io_mapping * iomap,resource_size_t base,unsigned long size)42cafaf14aSChris Wilson io_mapping_init_wc(struct io_mapping *iomap,
43cafaf14aSChris Wilson 		   resource_size_t base,
44cafaf14aSChris Wilson 		   unsigned long size)
459663f2e6SKeith Packard {
469e36fda0SVenkatesh Pallipadi 	pgprot_t prot;
474ab0d47dSVenkatesh Pallipadi 
489e36fda0SVenkatesh Pallipadi 	if (iomap_create_wc(base, size, &prot))
49cafaf14aSChris Wilson 		return NULL;
504ab0d47dSVenkatesh Pallipadi 
514ab0d47dSVenkatesh Pallipadi 	iomap->base = base;
524ab0d47dSVenkatesh Pallipadi 	iomap->size = size;
539e36fda0SVenkatesh Pallipadi 	iomap->prot = prot;
544ab0d47dSVenkatesh Pallipadi 	return iomap;
559663f2e6SKeith Packard }
569663f2e6SKeith Packard 
579663f2e6SKeith Packard static inline void
io_mapping_fini(struct io_mapping * mapping)58cafaf14aSChris Wilson io_mapping_fini(struct io_mapping *mapping)
599663f2e6SKeith Packard {
609e36fda0SVenkatesh Pallipadi 	iomap_free(mapping->base, mapping->size);
619663f2e6SKeith Packard }
629663f2e6SKeith Packard 
639663f2e6SKeith Packard /* Atomic map/unmap */
6429bc17ecSFrancisco Jerez static inline void __iomem *
io_mapping_map_atomic_wc(struct io_mapping * mapping,unsigned long offset)65fca3ec01SChris Wilson io_mapping_map_atomic_wc(struct io_mapping *mapping,
663e4d3af5SPeter Zijlstra 			 unsigned long offset)
679663f2e6SKeith Packard {
684ab0d47dSVenkatesh Pallipadi 	resource_size_t phys_addr;
694ab0d47dSVenkatesh Pallipadi 
704ab0d47dSVenkatesh Pallipadi 	BUG_ON(offset >= mapping->size);
714ab0d47dSVenkatesh Pallipadi 	phys_addr = mapping->base + offset;
72*7eb16f23SSebastian Andrzej Siewior 	if (!IS_ENABLED(CONFIG_PREEMPT_RT))
73351191adSThomas Gleixner 		preempt_disable();
74*7eb16f23SSebastian Andrzej Siewior 	else
75*7eb16f23SSebastian Andrzej Siewior 		migrate_disable();
76351191adSThomas Gleixner 	pagefault_disable();
77351191adSThomas Gleixner 	return __iomap_local_pfn_prot(PHYS_PFN(phys_addr), mapping->prot);
789663f2e6SKeith Packard }
799663f2e6SKeith Packard 
809663f2e6SKeith Packard static inline void
io_mapping_unmap_atomic(void __iomem * vaddr)813e4d3af5SPeter Zijlstra io_mapping_unmap_atomic(void __iomem *vaddr)
829663f2e6SKeith Packard {
83351191adSThomas Gleixner 	kunmap_local_indexed((void __force *)vaddr);
84351191adSThomas Gleixner 	pagefault_enable();
85*7eb16f23SSebastian Andrzej Siewior 	if (!IS_ENABLED(CONFIG_PREEMPT_RT))
86351191adSThomas Gleixner 		preempt_enable();
87*7eb16f23SSebastian Andrzej Siewior 	else
88*7eb16f23SSebastian Andrzej Siewior 		migrate_enable();
899663f2e6SKeith Packard }
909663f2e6SKeith Packard 
9129bc17ecSFrancisco Jerez static inline void __iomem *
io_mapping_map_local_wc(struct io_mapping * mapping,unsigned long offset)92e66f6e09SThomas Gleixner io_mapping_map_local_wc(struct io_mapping *mapping, unsigned long offset)
93e66f6e09SThomas Gleixner {
94e66f6e09SThomas Gleixner 	resource_size_t phys_addr;
95e66f6e09SThomas Gleixner 
96e66f6e09SThomas Gleixner 	BUG_ON(offset >= mapping->size);
97e66f6e09SThomas Gleixner 	phys_addr = mapping->base + offset;
98e66f6e09SThomas Gleixner 	return __iomap_local_pfn_prot(PHYS_PFN(phys_addr), mapping->prot);
99e66f6e09SThomas Gleixner }
100e66f6e09SThomas Gleixner 
io_mapping_unmap_local(void __iomem * vaddr)101e66f6e09SThomas Gleixner static inline void io_mapping_unmap_local(void __iomem *vaddr)
102e66f6e09SThomas Gleixner {
103e66f6e09SThomas Gleixner 	kunmap_local_indexed((void __force *)vaddr);
104e66f6e09SThomas Gleixner }
105e66f6e09SThomas Gleixner 
106e66f6e09SThomas Gleixner static inline void __iomem *
io_mapping_map_wc(struct io_mapping * mapping,unsigned long offset,unsigned long size)107d8dab00dSChris Wilson io_mapping_map_wc(struct io_mapping *mapping,
108d8dab00dSChris Wilson 		  unsigned long offset,
109d8dab00dSChris Wilson 		  unsigned long size)
1109663f2e6SKeith Packard {
1115ce04e3dSPallipadi, Venkatesh 	resource_size_t phys_addr;
1125ce04e3dSPallipadi, Venkatesh 
1134ab0d47dSVenkatesh Pallipadi 	BUG_ON(offset >= mapping->size);
1145ce04e3dSPallipadi, Venkatesh 	phys_addr = mapping->base + offset;
1155ce04e3dSPallipadi, Venkatesh 
116d8dab00dSChris Wilson 	return ioremap_wc(phys_addr, size);
1179663f2e6SKeith Packard }
1189663f2e6SKeith Packard 
1199663f2e6SKeith Packard static inline void
io_mapping_unmap(void __iomem * vaddr)12029bc17ecSFrancisco Jerez io_mapping_unmap(void __iomem *vaddr)
1219663f2e6SKeith Packard {
1229663f2e6SKeith Packard 	iounmap(vaddr);
1239663f2e6SKeith Packard }
124e5beae16SKeith Packard 
125e66f6e09SThomas Gleixner #else  /* HAVE_ATOMIC_IOMAP */
126e5beae16SKeith Packard 
12724dd85ffSDaniel Vetter #include <linux/uaccess.h>
1284ab0d47dSVenkatesh Pallipadi 
129e5beae16SKeith Packard /* Create the io_mapping object*/
130e5beae16SKeith Packard static inline struct io_mapping *
io_mapping_init_wc(struct io_mapping * iomap,resource_size_t base,unsigned long size)131cafaf14aSChris Wilson io_mapping_init_wc(struct io_mapping *iomap,
132cafaf14aSChris Wilson 		   resource_size_t base,
133cafaf14aSChris Wilson 		   unsigned long size)
134e5beae16SKeith Packard {
135e0b3e0b1SMichael J. Ruhl 	iomap->iomem = ioremap_wc(base, size);
136e0b3e0b1SMichael J. Ruhl 	if (!iomap->iomem)
137e0b3e0b1SMichael J. Ruhl 		return NULL;
138e0b3e0b1SMichael J. Ruhl 
139cafaf14aSChris Wilson 	iomap->base = base;
140cafaf14aSChris Wilson 	iomap->size = size;
141bcaaa0c4SChris Wilson 	iomap->prot = pgprot_writecombine(PAGE_KERNEL);
142cafaf14aSChris Wilson 
143cafaf14aSChris Wilson 	return iomap;
144e5beae16SKeith Packard }
145e5beae16SKeith Packard 
146e5beae16SKeith Packard static inline void
io_mapping_fini(struct io_mapping * mapping)147cafaf14aSChris Wilson io_mapping_fini(struct io_mapping *mapping)
148e5beae16SKeith Packard {
149cafaf14aSChris Wilson 	iounmap(mapping->iomem);
150cafaf14aSChris Wilson }
151cafaf14aSChris Wilson 
152cafaf14aSChris Wilson /* Non-atomic map/unmap */
153cafaf14aSChris Wilson static inline void __iomem *
io_mapping_map_wc(struct io_mapping * mapping,unsigned long offset,unsigned long size)154cafaf14aSChris Wilson io_mapping_map_wc(struct io_mapping *mapping,
155cafaf14aSChris Wilson 		  unsigned long offset,
156cafaf14aSChris Wilson 		  unsigned long size)
157cafaf14aSChris Wilson {
158cafaf14aSChris Wilson 	return mapping->iomem + offset;
159cafaf14aSChris Wilson }
160cafaf14aSChris Wilson 
161cafaf14aSChris Wilson static inline void
io_mapping_unmap(void __iomem * vaddr)162cafaf14aSChris Wilson io_mapping_unmap(void __iomem *vaddr)
163cafaf14aSChris Wilson {
164e5beae16SKeith Packard }
165e5beae16SKeith Packard 
166e5beae16SKeith Packard /* Atomic map/unmap */
16729bc17ecSFrancisco Jerez static inline void __iomem *
io_mapping_map_atomic_wc(struct io_mapping * mapping,unsigned long offset)168fca3ec01SChris Wilson io_mapping_map_atomic_wc(struct io_mapping *mapping,
1693e4d3af5SPeter Zijlstra 			 unsigned long offset)
170e5beae16SKeith Packard {
171*7eb16f23SSebastian Andrzej Siewior 	if (!IS_ENABLED(CONFIG_PREEMPT_RT))
1722cb7c9cbSDavid Hildenbrand 		preempt_disable();
173*7eb16f23SSebastian Andrzej Siewior 	else
174*7eb16f23SSebastian Andrzej Siewior 		migrate_disable();
17524dd85ffSDaniel Vetter 	pagefault_disable();
176cafaf14aSChris Wilson 	return io_mapping_map_wc(mapping, offset, PAGE_SIZE);
177e5beae16SKeith Packard }
178e5beae16SKeith Packard 
179e5beae16SKeith Packard static inline void
io_mapping_unmap_atomic(void __iomem * vaddr)1803e4d3af5SPeter Zijlstra io_mapping_unmap_atomic(void __iomem *vaddr)
181e5beae16SKeith Packard {
182cafaf14aSChris Wilson 	io_mapping_unmap(vaddr);
18324dd85ffSDaniel Vetter 	pagefault_enable();
184*7eb16f23SSebastian Andrzej Siewior 	if (!IS_ENABLED(CONFIG_PREEMPT_RT))
1852cb7c9cbSDavid Hildenbrand 		preempt_enable();
186*7eb16f23SSebastian Andrzej Siewior 	else
187*7eb16f23SSebastian Andrzej Siewior 		migrate_enable();
188e5beae16SKeith Packard }
189e5beae16SKeith Packard 
190e66f6e09SThomas Gleixner static inline void __iomem *
io_mapping_map_local_wc(struct io_mapping * mapping,unsigned long offset)191e66f6e09SThomas Gleixner io_mapping_map_local_wc(struct io_mapping *mapping, unsigned long offset)
192e66f6e09SThomas Gleixner {
193e66f6e09SThomas Gleixner 	return io_mapping_map_wc(mapping, offset, PAGE_SIZE);
194e66f6e09SThomas Gleixner }
195e66f6e09SThomas Gleixner 
io_mapping_unmap_local(void __iomem * vaddr)196e66f6e09SThomas Gleixner static inline void io_mapping_unmap_local(void __iomem *vaddr)
197e66f6e09SThomas Gleixner {
198e66f6e09SThomas Gleixner 	io_mapping_unmap(vaddr);
199e66f6e09SThomas Gleixner }
200e66f6e09SThomas Gleixner 
201e66f6e09SThomas Gleixner #endif /* !HAVE_ATOMIC_IOMAP */
202cafaf14aSChris Wilson 
203cafaf14aSChris Wilson static inline struct io_mapping *
io_mapping_create_wc(resource_size_t base,unsigned long size)204cafaf14aSChris Wilson io_mapping_create_wc(resource_size_t base,
205d8dab00dSChris Wilson 		     unsigned long size)
206e5beae16SKeith Packard {
207cafaf14aSChris Wilson 	struct io_mapping *iomap;
208cafaf14aSChris Wilson 
209cafaf14aSChris Wilson 	iomap = kmalloc(sizeof(*iomap), GFP_KERNEL);
210cafaf14aSChris Wilson 	if (!iomap)
211cafaf14aSChris Wilson 		return NULL;
212cafaf14aSChris Wilson 
213cafaf14aSChris Wilson 	if (!io_mapping_init_wc(iomap, base, size)) {
214cafaf14aSChris Wilson 		kfree(iomap);
215cafaf14aSChris Wilson 		return NULL;
216cafaf14aSChris Wilson 	}
217cafaf14aSChris Wilson 
218cafaf14aSChris Wilson 	return iomap;
219e5beae16SKeith Packard }
220e5beae16SKeith Packard 
221e5beae16SKeith Packard static inline void
io_mapping_free(struct io_mapping * iomap)222cafaf14aSChris Wilson io_mapping_free(struct io_mapping *iomap)
223e5beae16SKeith Packard {
224cafaf14aSChris Wilson 	io_mapping_fini(iomap);
225cafaf14aSChris Wilson 	kfree(iomap);
226e5beae16SKeith Packard }
227e5beae16SKeith Packard 
2281fbaf8fcSChristoph Hellwig int io_mapping_map_user(struct io_mapping *iomap, struct vm_area_struct *vma,
2291fbaf8fcSChristoph Hellwig 		unsigned long addr, unsigned long pfn, unsigned long size);
230eca36e43SChristophe JAILLET 
231eca36e43SChristophe JAILLET #endif /* _LINUX_IO_MAPPING_H */
232