xref: /linux-6.15/drivers/pci/devres.c (revision bbaff68b)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/device.h>
3 #include <linux/pci.h>
4 #include "pci.h"
5 
6 /*
7  * On the state of PCI's devres implementation:
8  *
9  * The older devres API for PCI has two significant problems:
10  *
11  * 1. It is very strongly tied to the statically allocated mapping table in
12  *    struct pcim_iomap_devres below. This is mostly solved in the sense of the
13  *    pcim_ functions in this file providing things like ranged mapping by
14  *    bypassing this table, whereas the functions that were present in the old
15  *    API still enter the mapping addresses into the table for users of the old
16  *    API.
17  *
18  * 2. The region-request-functions in pci.c do become managed IF the device has
19  *    been enabled with pcim_enable_device() instead of pci_enable_device().
20  *    This resulted in the API becoming inconsistent: Some functions have an
21  *    obviously managed counter-part (e.g., pci_iomap() <-> pcim_iomap()),
22  *    whereas some don't and are never managed, while others don't and are
23  *    _sometimes_ managed (e.g. pci_request_region()).
24  *
25  *    Consequently, in the new API, region requests performed by the pcim_
26  *    functions are automatically cleaned up through the devres callback
27  *    pcim_addr_resource_release(), while requests performed by
28  *    pcim_enable_device() + pci_*region*() are automatically cleaned up
29  *    through the for-loop in pcim_release().
30  *
31  * TODO 1:
32  * Remove the legacy table entirely once all calls to pcim_iomap_table() in
33  * the kernel have been removed.
34  *
35  * TODO 2:
36  * Port everyone calling pcim_enable_device() + pci_*region*() to using the
37  * pcim_ functions. Then, remove all devres functionality from pci_*region*()
38  * functions and remove the associated cleanups described above in point #2.
39  */
40 
41 /*
42  * Legacy struct storing addresses to whole mapped BARs.
43  */
44 struct pcim_iomap_devres {
45 	void __iomem *table[PCI_STD_NUM_BARS];
46 };
47 
48 enum pcim_addr_devres_type {
49 	/* Default initializer. */
50 	PCIM_ADDR_DEVRES_TYPE_INVALID,
51 
52 	/* A requested region spanning an entire BAR. */
53 	PCIM_ADDR_DEVRES_TYPE_REGION,
54 
55 	/*
56 	 * A requested region spanning an entire BAR, and a mapping for
57 	 * the entire BAR.
58 	 */
59 	PCIM_ADDR_DEVRES_TYPE_REGION_MAPPING,
60 
61 	/*
62 	 * A mapping within a BAR, either spanning the whole BAR or just a
63 	 * range.  Without a requested region.
64 	 */
65 	PCIM_ADDR_DEVRES_TYPE_MAPPING,
66 };
67 
68 /*
69  * This struct envelops IO or MEM addresses, i.e., mappings and region
70  * requests, because those are very frequently requested and released
71  * together.
72  */
73 struct pcim_addr_devres {
74 	enum pcim_addr_devres_type type;
75 	void __iomem *baseaddr;
76 	unsigned long offset;
77 	unsigned long len;
78 	int bar;
79 };
80 
81 static inline void pcim_addr_devres_clear(struct pcim_addr_devres *res)
82 {
83 	memset(res, 0, sizeof(*res));
84 	res->bar = -1;
85 }
86 
87 /*
88  * The following functions, __pcim_*_region*, exist as counterparts to the
89  * versions from pci.c - which, unfortunately, can be in "hybrid mode", i.e.,
90  * sometimes managed, sometimes not.
91  *
92  * To separate the APIs cleanly, we define our own, simplified versions here.
93  */
94 
95 /**
96  * __pcim_request_region_range - Request a ranged region
97  * @pdev: PCI device the region belongs to
98  * @bar: BAR the range is within
99  * @offset: offset from the BAR's start address
100  * @maxlen: length in bytes, beginning at @offset
101  * @name: name associated with the request
102  * @req_flags: flags for the request, e.g., for kernel-exclusive requests
103  *
104  * Returns: 0 on success, a negative error code on failure.
105  *
106  * Request a range within a device's PCI BAR.  Sanity check the input.
107  */
108 static int __pcim_request_region_range(struct pci_dev *pdev, int bar,
109 				       unsigned long offset,
110 				       unsigned long maxlen,
111 				       const char *name, int req_flags)
112 {
113 	resource_size_t start = pci_resource_start(pdev, bar);
114 	resource_size_t len = pci_resource_len(pdev, bar);
115 	unsigned long dev_flags = pci_resource_flags(pdev, bar);
116 
117 	if (start == 0 || len == 0) /* Unused BAR. */
118 		return 0;
119 	if (len <= offset)
120 		return -EINVAL;
121 
122 	start += offset;
123 	len -= offset;
124 
125 	if (len > maxlen && maxlen != 0)
126 		len = maxlen;
127 
128 	if (dev_flags & IORESOURCE_IO) {
129 		if (!request_region(start, len, name))
130 			return -EBUSY;
131 	} else if (dev_flags & IORESOURCE_MEM) {
132 		if (!__request_mem_region(start, len, name, req_flags))
133 			return -EBUSY;
134 	} else {
135 		/* That's not a device we can request anything on. */
136 		return -ENODEV;
137 	}
138 
139 	return 0;
140 }
141 
142 static void __pcim_release_region_range(struct pci_dev *pdev, int bar,
143 					unsigned long offset,
144 					unsigned long maxlen)
145 {
146 	resource_size_t start = pci_resource_start(pdev, bar);
147 	resource_size_t len = pci_resource_len(pdev, bar);
148 	unsigned long flags = pci_resource_flags(pdev, bar);
149 
150 	if (len <= offset || start == 0)
151 		return;
152 
153 	if (len == 0 || maxlen == 0) /* This an unused BAR. Do nothing. */
154 		return;
155 
156 	start += offset;
157 	len -= offset;
158 
159 	if (len > maxlen)
160 		len = maxlen;
161 
162 	if (flags & IORESOURCE_IO)
163 		release_region(start, len);
164 	else if (flags & IORESOURCE_MEM)
165 		release_mem_region(start, len);
166 }
167 
168 static int __pcim_request_region(struct pci_dev *pdev, int bar,
169 				 const char *name, int flags)
170 {
171 	unsigned long offset = 0;
172 	unsigned long len = pci_resource_len(pdev, bar);
173 
174 	return __pcim_request_region_range(pdev, bar, offset, len, name, flags);
175 }
176 
177 static void __pcim_release_region(struct pci_dev *pdev, int bar)
178 {
179 	unsigned long offset = 0;
180 	unsigned long len = pci_resource_len(pdev, bar);
181 
182 	__pcim_release_region_range(pdev, bar, offset, len);
183 }
184 
185 static void pcim_addr_resource_release(struct device *dev, void *resource_raw)
186 {
187 	struct pci_dev *pdev = to_pci_dev(dev);
188 	struct pcim_addr_devres *res = resource_raw;
189 
190 	switch (res->type) {
191 	case PCIM_ADDR_DEVRES_TYPE_REGION:
192 		__pcim_release_region(pdev, res->bar);
193 		break;
194 	case PCIM_ADDR_DEVRES_TYPE_REGION_MAPPING:
195 		pci_iounmap(pdev, res->baseaddr);
196 		__pcim_release_region(pdev, res->bar);
197 		break;
198 	case PCIM_ADDR_DEVRES_TYPE_MAPPING:
199 		pci_iounmap(pdev, res->baseaddr);
200 		break;
201 	default:
202 		break;
203 	}
204 }
205 
206 static struct pcim_addr_devres *pcim_addr_devres_alloc(struct pci_dev *pdev)
207 {
208 	struct pcim_addr_devres *res;
209 
210 	res = devres_alloc_node(pcim_addr_resource_release, sizeof(*res),
211 				GFP_KERNEL, dev_to_node(&pdev->dev));
212 	if (res)
213 		pcim_addr_devres_clear(res);
214 	return res;
215 }
216 
217 /* Just for consistency and readability. */
218 static inline void pcim_addr_devres_free(struct pcim_addr_devres *res)
219 {
220 	devres_free(res);
221 }
222 
223 /*
224  * Used by devres to identify a pcim_addr_devres.
225  */
226 static int pcim_addr_resources_match(struct device *dev,
227 				     void *a_raw, void *b_raw)
228 {
229 	struct pcim_addr_devres *a, *b;
230 
231 	a = a_raw;
232 	b = b_raw;
233 
234 	if (a->type != b->type)
235 		return 0;
236 
237 	switch (a->type) {
238 	case PCIM_ADDR_DEVRES_TYPE_REGION:
239 	case PCIM_ADDR_DEVRES_TYPE_REGION_MAPPING:
240 		return a->bar == b->bar;
241 	case PCIM_ADDR_DEVRES_TYPE_MAPPING:
242 		return a->baseaddr == b->baseaddr;
243 	default:
244 		return 0;
245 	}
246 }
247 
248 static void devm_pci_unmap_iospace(struct device *dev, void *ptr)
249 {
250 	struct resource **res = ptr;
251 
252 	pci_unmap_iospace(*res);
253 }
254 
255 /**
256  * devm_pci_remap_iospace - Managed pci_remap_iospace()
257  * @dev: Generic device to remap IO address for
258  * @res: Resource describing the I/O space
259  * @phys_addr: physical address of range to be mapped
260  *
261  * Managed pci_remap_iospace().  Map is automatically unmapped on driver
262  * detach.
263  */
264 int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
265 			   phys_addr_t phys_addr)
266 {
267 	const struct resource **ptr;
268 	int error;
269 
270 	ptr = devres_alloc(devm_pci_unmap_iospace, sizeof(*ptr), GFP_KERNEL);
271 	if (!ptr)
272 		return -ENOMEM;
273 
274 	error = pci_remap_iospace(res, phys_addr);
275 	if (error) {
276 		devres_free(ptr);
277 	} else	{
278 		*ptr = res;
279 		devres_add(dev, ptr);
280 	}
281 
282 	return error;
283 }
284 EXPORT_SYMBOL(devm_pci_remap_iospace);
285 
286 /**
287  * devm_pci_remap_cfgspace - Managed pci_remap_cfgspace()
288  * @dev: Generic device to remap IO address for
289  * @offset: Resource address to map
290  * @size: Size of map
291  *
292  * Managed pci_remap_cfgspace().  Map is automatically unmapped on driver
293  * detach.
294  */
295 void __iomem *devm_pci_remap_cfgspace(struct device *dev,
296 				      resource_size_t offset,
297 				      resource_size_t size)
298 {
299 	void __iomem **ptr, *addr;
300 
301 	ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
302 	if (!ptr)
303 		return NULL;
304 
305 	addr = pci_remap_cfgspace(offset, size);
306 	if (addr) {
307 		*ptr = addr;
308 		devres_add(dev, ptr);
309 	} else
310 		devres_free(ptr);
311 
312 	return addr;
313 }
314 EXPORT_SYMBOL(devm_pci_remap_cfgspace);
315 
316 /**
317  * devm_pci_remap_cfg_resource - check, request region and ioremap cfg resource
318  * @dev: generic device to handle the resource for
319  * @res: configuration space resource to be handled
320  *
321  * Checks that a resource is a valid memory region, requests the memory
322  * region and ioremaps with pci_remap_cfgspace() API that ensures the
323  * proper PCI configuration space memory attributes are guaranteed.
324  *
325  * All operations are managed and will be undone on driver detach.
326  *
327  * Returns a pointer to the remapped memory or an IOMEM_ERR_PTR() encoded error
328  * code on failure. Usage example::
329  *
330  *	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
331  *	base = devm_pci_remap_cfg_resource(&pdev->dev, res);
332  *	if (IS_ERR(base))
333  *		return PTR_ERR(base);
334  */
335 void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
336 					  struct resource *res)
337 {
338 	resource_size_t size;
339 	const char *name;
340 	void __iomem *dest_ptr;
341 
342 	BUG_ON(!dev);
343 
344 	if (!res || resource_type(res) != IORESOURCE_MEM) {
345 		dev_err(dev, "invalid resource\n");
346 		return IOMEM_ERR_PTR(-EINVAL);
347 	}
348 
349 	size = resource_size(res);
350 
351 	if (res->name)
352 		name = devm_kasprintf(dev, GFP_KERNEL, "%s %s", dev_name(dev),
353 				      res->name);
354 	else
355 		name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
356 	if (!name)
357 		return IOMEM_ERR_PTR(-ENOMEM);
358 
359 	if (!devm_request_mem_region(dev, res->start, size, name)) {
360 		dev_err(dev, "can't request region for resource %pR\n", res);
361 		return IOMEM_ERR_PTR(-EBUSY);
362 	}
363 
364 	dest_ptr = devm_pci_remap_cfgspace(dev, res->start, size);
365 	if (!dest_ptr) {
366 		dev_err(dev, "ioremap failed for resource %pR\n", res);
367 		devm_release_mem_region(dev, res->start, size);
368 		dest_ptr = IOMEM_ERR_PTR(-ENOMEM);
369 	}
370 
371 	return dest_ptr;
372 }
373 EXPORT_SYMBOL(devm_pci_remap_cfg_resource);
374 
375 /**
376  * pcim_set_mwi - a device-managed pci_set_mwi()
377  * @dev: the PCI device for which MWI is enabled
378  *
379  * Managed pci_set_mwi().
380  *
381  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
382  */
383 int pcim_set_mwi(struct pci_dev *dev)
384 {
385 	struct pci_devres *dr;
386 
387 	dr = find_pci_dr(dev);
388 	if (!dr)
389 		return -ENOMEM;
390 
391 	dr->mwi = 1;
392 	return pci_set_mwi(dev);
393 }
394 EXPORT_SYMBOL(pcim_set_mwi);
395 
396 static inline bool mask_contains_bar(int mask, int bar)
397 {
398 	return mask & BIT(bar);
399 }
400 
401 static void pcim_release(struct device *gendev, void *res)
402 {
403 	struct pci_dev *dev = to_pci_dev(gendev);
404 	struct pci_devres *this = res;
405 	int i;
406 
407 	/*
408 	 * This is legacy code.
409 	 *
410 	 * All regions requested by a pcim_ function do get released through
411 	 * pcim_addr_resource_release(). Thanks to the hybrid nature of the pci_
412 	 * region-request functions, this for-loop has to release the regions
413 	 * if they have been requested by such a function.
414 	 *
415 	 * TODO: Remove this once all users of pcim_enable_device() PLUS
416 	 * pci-region-request-functions have been ported to pcim_ functions.
417 	 */
418 	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
419 		if (mask_contains_bar(this->region_mask, i))
420 			pci_release_region(dev, i);
421 
422 	if (this->mwi)
423 		pci_clear_mwi(dev);
424 
425 	if (this->restore_intx)
426 		pci_intx(dev, this->orig_intx);
427 
428 	if (this->enabled && !this->pinned)
429 		pci_disable_device(dev);
430 }
431 
432 /*
433  * TODO: After the last four callers in pci.c are ported, find_pci_dr()
434  * needs to be made static again.
435  */
436 struct pci_devres *find_pci_dr(struct pci_dev *pdev)
437 {
438 	if (pci_is_managed(pdev))
439 		return devres_find(&pdev->dev, pcim_release, NULL, NULL);
440 	return NULL;
441 }
442 
443 static struct pci_devres *get_pci_dr(struct pci_dev *pdev)
444 {
445 	struct pci_devres *dr, *new_dr;
446 
447 	dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
448 	if (dr)
449 		return dr;
450 
451 	new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
452 	if (!new_dr)
453 		return NULL;
454 	return devres_get(&pdev->dev, new_dr, NULL, NULL);
455 }
456 
457 /**
458  * pcim_enable_device - Managed pci_enable_device()
459  * @pdev: PCI device to be initialized
460  *
461  * Managed pci_enable_device().
462  */
463 int pcim_enable_device(struct pci_dev *pdev)
464 {
465 	struct pci_devres *dr;
466 	int rc;
467 
468 	dr = get_pci_dr(pdev);
469 	if (unlikely(!dr))
470 		return -ENOMEM;
471 	if (dr->enabled)
472 		return 0;
473 
474 	rc = pci_enable_device(pdev);
475 	if (!rc) {
476 		pdev->is_managed = 1;
477 		dr->enabled = 1;
478 	}
479 	return rc;
480 }
481 EXPORT_SYMBOL(pcim_enable_device);
482 
483 /**
484  * pcim_pin_device - Pin managed PCI device
485  * @pdev: PCI device to pin
486  *
487  * Pin managed PCI device @pdev.  Pinned device won't be disabled on
488  * driver detach.  @pdev must have been enabled with
489  * pcim_enable_device().
490  */
491 void pcim_pin_device(struct pci_dev *pdev)
492 {
493 	struct pci_devres *dr;
494 
495 	dr = find_pci_dr(pdev);
496 	WARN_ON(!dr || !dr->enabled);
497 	if (dr)
498 		dr->pinned = 1;
499 }
500 EXPORT_SYMBOL(pcim_pin_device);
501 
502 static void pcim_iomap_release(struct device *gendev, void *res)
503 {
504 	/*
505 	 * Do nothing. This is legacy code.
506 	 *
507 	 * Cleanup of the mappings is now done directly through the callbacks
508 	 * registered when creating them.
509 	 */
510 }
511 
512 /**
513  * pcim_iomap_table - access iomap allocation table
514  * @pdev: PCI device to access iomap table for
515  *
516  * Returns:
517  * Const pointer to array of __iomem pointers on success, NULL on failure.
518  *
519  * Access iomap allocation table for @dev.  If iomap table doesn't
520  * exist and @pdev is managed, it will be allocated.  All iomaps
521  * recorded in the iomap table are automatically unmapped on driver
522  * detach.
523  *
524  * This function might sleep when the table is first allocated but can
525  * be safely called without context and guaranteed to succeed once
526  * allocated.
527  */
528 void __iomem * const *pcim_iomap_table(struct pci_dev *pdev)
529 {
530 	struct pcim_iomap_devres *dr, *new_dr;
531 
532 	dr = devres_find(&pdev->dev, pcim_iomap_release, NULL, NULL);
533 	if (dr)
534 		return dr->table;
535 
536 	new_dr = devres_alloc_node(pcim_iomap_release, sizeof(*new_dr), GFP_KERNEL,
537 				   dev_to_node(&pdev->dev));
538 	if (!new_dr)
539 		return NULL;
540 	dr = devres_get(&pdev->dev, new_dr, NULL, NULL);
541 	return dr->table;
542 }
543 EXPORT_SYMBOL(pcim_iomap_table);
544 
545 /*
546  * Fill the legacy mapping-table, so that drivers using the old API can
547  * still get a BAR's mapping address through pcim_iomap_table().
548  */
549 static int pcim_add_mapping_to_legacy_table(struct pci_dev *pdev,
550 					    void __iomem *mapping, int bar)
551 {
552 	void __iomem **legacy_iomap_table;
553 
554 	if (bar >= PCI_STD_NUM_BARS)
555 		return -EINVAL;
556 
557 	legacy_iomap_table = (void __iomem **)pcim_iomap_table(pdev);
558 	if (!legacy_iomap_table)
559 		return -ENOMEM;
560 
561 	/* The legacy mechanism doesn't allow for duplicate mappings. */
562 	WARN_ON(legacy_iomap_table[bar]);
563 
564 	legacy_iomap_table[bar] = mapping;
565 
566 	return 0;
567 }
568 
569 /*
570  * Remove a mapping. The table only contains whole-BAR mappings, so this will
571  * never interfere with ranged mappings.
572  */
573 static void pcim_remove_mapping_from_legacy_table(struct pci_dev *pdev,
574 						  void __iomem *addr)
575 {
576 	int bar;
577 	void __iomem **legacy_iomap_table;
578 
579 	legacy_iomap_table = (void __iomem **)pcim_iomap_table(pdev);
580 	if (!legacy_iomap_table)
581 		return;
582 
583 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
584 		if (legacy_iomap_table[bar] == addr) {
585 			legacy_iomap_table[bar] = NULL;
586 			return;
587 		}
588 	}
589 }
590 
591 /*
592  * The same as pcim_remove_mapping_from_legacy_table(), but identifies the
593  * mapping by its BAR index.
594  */
595 static void pcim_remove_bar_from_legacy_table(struct pci_dev *pdev, int bar)
596 {
597 	void __iomem **legacy_iomap_table;
598 
599 	if (bar >= PCI_STD_NUM_BARS)
600 		return;
601 
602 	legacy_iomap_table = (void __iomem **)pcim_iomap_table(pdev);
603 	if (!legacy_iomap_table)
604 		return;
605 
606 	legacy_iomap_table[bar] = NULL;
607 }
608 
609 /**
610  * pcim_iomap - Managed pcim_iomap()
611  * @pdev: PCI device to iomap for
612  * @bar: BAR to iomap
613  * @maxlen: Maximum length of iomap
614  *
615  * Returns: __iomem pointer on success, NULL on failure.
616  *
617  * Managed pci_iomap(). Map is automatically unmapped on driver detach. If
618  * desired, unmap manually only with pcim_iounmap().
619  *
620  * This SHOULD only be used once per BAR.
621  *
622  * NOTE:
623  * Contrary to the other pcim_* functions, this function does not return an
624  * IOMEM_ERR_PTR() on failure, but a simple NULL. This is done for backwards
625  * compatibility.
626  */
627 void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen)
628 {
629 	void __iomem *mapping;
630 	struct pcim_addr_devres *res;
631 
632 	res = pcim_addr_devres_alloc(pdev);
633 	if (!res)
634 		return NULL;
635 	res->type = PCIM_ADDR_DEVRES_TYPE_MAPPING;
636 
637 	mapping = pci_iomap(pdev, bar, maxlen);
638 	if (!mapping)
639 		goto err_iomap;
640 	res->baseaddr = mapping;
641 
642 	if (pcim_add_mapping_to_legacy_table(pdev, mapping, bar) != 0)
643 		goto err_table;
644 
645 	devres_add(&pdev->dev, res);
646 	return mapping;
647 
648 err_table:
649 	pci_iounmap(pdev, mapping);
650 err_iomap:
651 	pcim_addr_devres_free(res);
652 	return NULL;
653 }
654 EXPORT_SYMBOL(pcim_iomap);
655 
656 /**
657  * pcim_iounmap - Managed pci_iounmap()
658  * @pdev: PCI device to iounmap for
659  * @addr: Address to unmap
660  *
661  * Managed pci_iounmap(). @addr must have been mapped using a pcim_* mapping
662  * function.
663  */
664 void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr)
665 {
666 	struct pcim_addr_devres res_searched;
667 
668 	pcim_addr_devres_clear(&res_searched);
669 	res_searched.type = PCIM_ADDR_DEVRES_TYPE_MAPPING;
670 	res_searched.baseaddr = addr;
671 
672 	if (devres_release(&pdev->dev, pcim_addr_resource_release,
673 			pcim_addr_resources_match, &res_searched) != 0) {
674 		/* Doesn't exist. User passed nonsense. */
675 		return;
676 	}
677 
678 	pcim_remove_mapping_from_legacy_table(pdev, addr);
679 }
680 EXPORT_SYMBOL(pcim_iounmap);
681 
682 /**
683  * pcim_iomap_region - Request and iomap a PCI BAR
684  * @pdev: PCI device to map IO resources for
685  * @bar: Index of a BAR to map
686  * @name: Name associated with the request
687  *
688  * Returns: __iomem pointer on success, an IOMEM_ERR_PTR on failure.
689  *
690  * Mapping and region will get automatically released on driver detach. If
691  * desired, release manually only with pcim_iounmap_region().
692  */
693 static void __iomem *pcim_iomap_region(struct pci_dev *pdev, int bar,
694 				       const char *name)
695 {
696 	int ret;
697 	struct pcim_addr_devres *res;
698 
699 	res = pcim_addr_devres_alloc(pdev);
700 	if (!res)
701 		return IOMEM_ERR_PTR(-ENOMEM);
702 
703 	res->type = PCIM_ADDR_DEVRES_TYPE_REGION_MAPPING;
704 	res->bar = bar;
705 
706 	ret = __pcim_request_region(pdev, bar, name, 0);
707 	if (ret != 0)
708 		goto err_region;
709 
710 	res->baseaddr = pci_iomap(pdev, bar, 0);
711 	if (!res->baseaddr) {
712 		ret = -EINVAL;
713 		goto err_iomap;
714 	}
715 
716 	devres_add(&pdev->dev, res);
717 	return res->baseaddr;
718 
719 err_iomap:
720 	__pcim_release_region(pdev, bar);
721 err_region:
722 	pcim_addr_devres_free(res);
723 
724 	return IOMEM_ERR_PTR(ret);
725 }
726 
727 /**
728  * pcim_iounmap_region - Unmap and release a PCI BAR
729  * @pdev: PCI device to operate on
730  * @bar: Index of BAR to unmap and release
731  *
732  * Unmap a BAR and release its region manually. Only pass BARs that were
733  * previously mapped by pcim_iomap_region().
734  */
735 static void pcim_iounmap_region(struct pci_dev *pdev, int bar)
736 {
737 	struct pcim_addr_devres res_searched;
738 
739 	pcim_addr_devres_clear(&res_searched);
740 	res_searched.type = PCIM_ADDR_DEVRES_TYPE_REGION_MAPPING;
741 	res_searched.bar = bar;
742 
743 	devres_release(&pdev->dev, pcim_addr_resource_release,
744 			pcim_addr_resources_match, &res_searched);
745 }
746 
747 /**
748  * pcim_iomap_regions - Request and iomap PCI BARs
749  * @pdev: PCI device to map IO resources for
750  * @mask: Mask of BARs to request and iomap
751  * @name: Name associated with the requests
752  *
753  * Returns: 0 on success, negative error code on failure.
754  *
755  * Request and iomap regions specified by @mask.
756  */
757 int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name)
758 {
759 	int ret;
760 	int bar;
761 	void __iomem *mapping;
762 
763 	for (bar = 0; bar < DEVICE_COUNT_RESOURCE; bar++) {
764 		if (!mask_contains_bar(mask, bar))
765 			continue;
766 
767 		mapping = pcim_iomap_region(pdev, bar, name);
768 		if (IS_ERR(mapping)) {
769 			ret = PTR_ERR(mapping);
770 			goto err;
771 		}
772 		ret = pcim_add_mapping_to_legacy_table(pdev, mapping, bar);
773 		if (ret != 0)
774 			goto err;
775 	}
776 
777 	return 0;
778 
779 err:
780 	while (--bar >= 0) {
781 		pcim_iounmap_region(pdev, bar);
782 		pcim_remove_bar_from_legacy_table(pdev, bar);
783 	}
784 
785 	return ret;
786 }
787 EXPORT_SYMBOL(pcim_iomap_regions);
788 
789 static int _pcim_request_region(struct pci_dev *pdev, int bar, const char *name,
790 				int request_flags)
791 {
792 	int ret;
793 	struct pcim_addr_devres *res;
794 
795 	res = pcim_addr_devres_alloc(pdev);
796 	if (!res)
797 		return -ENOMEM;
798 	res->type = PCIM_ADDR_DEVRES_TYPE_REGION;
799 	res->bar = bar;
800 
801 	ret = __pcim_request_region(pdev, bar, name, request_flags);
802 	if (ret != 0) {
803 		pcim_addr_devres_free(res);
804 		return ret;
805 	}
806 
807 	devres_add(&pdev->dev, res);
808 	return 0;
809 }
810 
811 /**
812  * pcim_request_region - Request a PCI BAR
813  * @pdev: PCI device to requestion region for
814  * @bar: Index of BAR to request
815  * @name: Name associated with the request
816  *
817  * Returns: 0 on success, a negative error code on failure.
818  *
819  * Request region specified by @bar.
820  *
821  * The region will automatically be released on driver detach. If desired,
822  * release manually only with pcim_release_region().
823  */
824 static int pcim_request_region(struct pci_dev *pdev, int bar, const char *name)
825 {
826 	return _pcim_request_region(pdev, bar, name, 0);
827 }
828 
829 /**
830  * pcim_release_region - Release a PCI BAR
831  * @pdev: PCI device to operate on
832  * @bar: Index of BAR to release
833  *
834  * Release a region manually that was previously requested by
835  * pcim_request_region().
836  */
837 static void pcim_release_region(struct pci_dev *pdev, int bar)
838 {
839 	struct pcim_addr_devres res_searched;
840 
841 	pcim_addr_devres_clear(&res_searched);
842 	res_searched.type = PCIM_ADDR_DEVRES_TYPE_REGION;
843 	res_searched.bar = bar;
844 
845 	devres_release(&pdev->dev, pcim_addr_resource_release,
846 			pcim_addr_resources_match, &res_searched);
847 }
848 
849 
850 /**
851  * pcim_release_all_regions - Release all regions of a PCI-device
852  * @pdev: the PCI device
853  *
854  * Release all regions previously requested through pcim_request_region()
855  * or pcim_request_all_regions().
856  *
857  * Can be called from any context, i.e., not necessarily as a counterpart to
858  * pcim_request_all_regions().
859  */
860 static void pcim_release_all_regions(struct pci_dev *pdev)
861 {
862 	int bar;
863 
864 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
865 		pcim_release_region(pdev, bar);
866 }
867 
868 /**
869  * pcim_request_all_regions - Request all regions
870  * @pdev: PCI device to map IO resources for
871  * @name: name associated with the request
872  *
873  * Returns: 0 on success, negative error code on failure.
874  *
875  * Requested regions will automatically be released at driver detach. If
876  * desired, release individual regions with pcim_release_region() or all of
877  * them at once with pcim_release_all_regions().
878  */
879 static int pcim_request_all_regions(struct pci_dev *pdev, const char *name)
880 {
881 	int ret;
882 	int bar;
883 
884 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
885 		ret = pcim_request_region(pdev, bar, name);
886 		if (ret != 0)
887 			goto err;
888 	}
889 
890 	return 0;
891 
892 err:
893 	pcim_release_all_regions(pdev);
894 
895 	return ret;
896 }
897 
898 /**
899  * pcim_iomap_regions_request_all - Request all BARs and iomap specified ones
900  * @pdev: PCI device to map IO resources for
901  * @mask: Mask of BARs to iomap
902  * @name: Name associated with the requests
903  *
904  * Returns: 0 on success, negative error code on failure.
905  *
906  * Request all PCI BARs and iomap regions specified by @mask.
907  *
908  * To release these resources manually, call pcim_release_region() for the
909  * regions and pcim_iounmap() for the mappings.
910  */
911 int pcim_iomap_regions_request_all(struct pci_dev *pdev, int mask,
912 				   const char *name)
913 {
914 	int bar;
915 	int ret;
916 	void __iomem **legacy_iomap_table;
917 
918 	ret = pcim_request_all_regions(pdev, name);
919 	if (ret != 0)
920 		return ret;
921 
922 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
923 		if (!mask_contains_bar(mask, bar))
924 			continue;
925 		if (!pcim_iomap(pdev, bar, 0))
926 			goto err;
927 	}
928 
929 	return 0;
930 
931 err:
932 	/*
933 	 * If bar is larger than 0, then pcim_iomap() above has most likely
934 	 * failed because of -EINVAL. If it is equal 0, most likely the table
935 	 * couldn't be created, indicating -ENOMEM.
936 	 */
937 	ret = bar > 0 ? -EINVAL : -ENOMEM;
938 	legacy_iomap_table = (void __iomem **)pcim_iomap_table(pdev);
939 
940 	while (--bar >= 0)
941 		pcim_iounmap(pdev, legacy_iomap_table[bar]);
942 
943 	pcim_release_all_regions(pdev);
944 
945 	return ret;
946 }
947 EXPORT_SYMBOL(pcim_iomap_regions_request_all);
948 
949 /**
950  * pcim_iounmap_regions - Unmap and release PCI BARs
951  * @pdev: PCI device to map IO resources for
952  * @mask: Mask of BARs to unmap and release
953  *
954  * Unmap and release regions specified by @mask.
955  */
956 void pcim_iounmap_regions(struct pci_dev *pdev, int mask)
957 {
958 	int i;
959 
960 	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
961 		if (!mask_contains_bar(mask, i))
962 			continue;
963 
964 		pcim_iounmap_region(pdev, i);
965 		pcim_remove_bar_from_legacy_table(pdev, i);
966 	}
967 }
968 EXPORT_SYMBOL(pcim_iounmap_regions);
969