xref: /f-stack/dpdk/kernel/linux/igb_uio/igb_uio.c (revision 6b8a3e40)
1*6b8a3e40Sjfb8856606 // SPDX-License-Identifier: GPL-2.0
2*6b8a3e40Sjfb8856606 /*-
3*6b8a3e40Sjfb8856606  * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
4*6b8a3e40Sjfb8856606  */
5*6b8a3e40Sjfb8856606 
6*6b8a3e40Sjfb8856606 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7*6b8a3e40Sjfb8856606 
8*6b8a3e40Sjfb8856606 #include <linux/device.h>
9*6b8a3e40Sjfb8856606 #include <linux/module.h>
10*6b8a3e40Sjfb8856606 #include <linux/pci.h>
11*6b8a3e40Sjfb8856606 #include <linux/uio_driver.h>
12*6b8a3e40Sjfb8856606 #include <linux/io.h>
13*6b8a3e40Sjfb8856606 #include <linux/irq.h>
14*6b8a3e40Sjfb8856606 #include <linux/msi.h>
15*6b8a3e40Sjfb8856606 #include <linux/version.h>
16*6b8a3e40Sjfb8856606 #include <linux/slab.h>
17*6b8a3e40Sjfb8856606 
18*6b8a3e40Sjfb8856606 /**
19*6b8a3e40Sjfb8856606  * These enum and macro definitions are copied from the
20*6b8a3e40Sjfb8856606  * file rte_pci_dev_features.h
21*6b8a3e40Sjfb8856606  */
22*6b8a3e40Sjfb8856606 enum rte_intr_mode {
23*6b8a3e40Sjfb8856606 	RTE_INTR_MODE_NONE = 0,
24*6b8a3e40Sjfb8856606 	RTE_INTR_MODE_LEGACY,
25*6b8a3e40Sjfb8856606 	RTE_INTR_MODE_MSI,
26*6b8a3e40Sjfb8856606 	RTE_INTR_MODE_MSIX
27*6b8a3e40Sjfb8856606 };
28*6b8a3e40Sjfb8856606 #define RTE_INTR_MODE_NONE_NAME "none"
29*6b8a3e40Sjfb8856606 #define RTE_INTR_MODE_LEGACY_NAME "legacy"
30*6b8a3e40Sjfb8856606 #define RTE_INTR_MODE_MSI_NAME "msi"
31*6b8a3e40Sjfb8856606 #define RTE_INTR_MODE_MSIX_NAME "msix"
32*6b8a3e40Sjfb8856606 
33*6b8a3e40Sjfb8856606 
34*6b8a3e40Sjfb8856606 #include "compat.h"
35*6b8a3e40Sjfb8856606 
36*6b8a3e40Sjfb8856606 /**
37*6b8a3e40Sjfb8856606  * A structure describing the private information for a uio device.
38*6b8a3e40Sjfb8856606  */
39*6b8a3e40Sjfb8856606 struct rte_uio_pci_dev {
40*6b8a3e40Sjfb8856606 	struct uio_info info;
41*6b8a3e40Sjfb8856606 	struct pci_dev *pdev;
42*6b8a3e40Sjfb8856606 	enum rte_intr_mode mode;
43*6b8a3e40Sjfb8856606 	atomic_t refcnt;
44*6b8a3e40Sjfb8856606 };
45*6b8a3e40Sjfb8856606 
46*6b8a3e40Sjfb8856606 static int wc_activate;
47*6b8a3e40Sjfb8856606 static char *intr_mode;
48*6b8a3e40Sjfb8856606 static enum rte_intr_mode igbuio_intr_mode_preferred = RTE_INTR_MODE_MSIX;
49*6b8a3e40Sjfb8856606 /* sriov sysfs */
50*6b8a3e40Sjfb8856606 static ssize_t
show_max_vfs(struct device * dev,struct device_attribute * attr,char * buf)51*6b8a3e40Sjfb8856606 show_max_vfs(struct device *dev, struct device_attribute *attr,
52*6b8a3e40Sjfb8856606 	     char *buf)
53*6b8a3e40Sjfb8856606 {
54*6b8a3e40Sjfb8856606 	return snprintf(buf, 10, "%u\n", dev_num_vf(dev));
55*6b8a3e40Sjfb8856606 }
56*6b8a3e40Sjfb8856606 
57*6b8a3e40Sjfb8856606 static ssize_t
store_max_vfs(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)58*6b8a3e40Sjfb8856606 store_max_vfs(struct device *dev, struct device_attribute *attr,
59*6b8a3e40Sjfb8856606 	      const char *buf, size_t count)
60*6b8a3e40Sjfb8856606 {
61*6b8a3e40Sjfb8856606 	int err = 0;
62*6b8a3e40Sjfb8856606 	unsigned long max_vfs;
63*6b8a3e40Sjfb8856606 	struct pci_dev *pdev = to_pci_dev(dev);
64*6b8a3e40Sjfb8856606 
65*6b8a3e40Sjfb8856606 	if (0 != kstrtoul(buf, 0, &max_vfs))
66*6b8a3e40Sjfb8856606 		return -EINVAL;
67*6b8a3e40Sjfb8856606 
68*6b8a3e40Sjfb8856606 	if (0 == max_vfs)
69*6b8a3e40Sjfb8856606 		pci_disable_sriov(pdev);
70*6b8a3e40Sjfb8856606 	else if (0 == pci_num_vf(pdev))
71*6b8a3e40Sjfb8856606 		err = pci_enable_sriov(pdev, max_vfs);
72*6b8a3e40Sjfb8856606 	else /* do nothing if change max_vfs number */
73*6b8a3e40Sjfb8856606 		err = -EINVAL;
74*6b8a3e40Sjfb8856606 
75*6b8a3e40Sjfb8856606 	return err ? err : count;
76*6b8a3e40Sjfb8856606 }
77*6b8a3e40Sjfb8856606 
78*6b8a3e40Sjfb8856606 static DEVICE_ATTR(max_vfs, S_IRUGO | S_IWUSR, show_max_vfs, store_max_vfs);
79*6b8a3e40Sjfb8856606 
80*6b8a3e40Sjfb8856606 static struct attribute *dev_attrs[] = {
81*6b8a3e40Sjfb8856606 	&dev_attr_max_vfs.attr,
82*6b8a3e40Sjfb8856606 	NULL,
83*6b8a3e40Sjfb8856606 };
84*6b8a3e40Sjfb8856606 
85*6b8a3e40Sjfb8856606 static const struct attribute_group dev_attr_grp = {
86*6b8a3e40Sjfb8856606 	.attrs = dev_attrs,
87*6b8a3e40Sjfb8856606 };
88*6b8a3e40Sjfb8856606 
89*6b8a3e40Sjfb8856606 #ifndef HAVE_PCI_MSI_MASK_IRQ
90*6b8a3e40Sjfb8856606 /*
91*6b8a3e40Sjfb8856606  * It masks the msix on/off of generating MSI-X messages.
92*6b8a3e40Sjfb8856606  */
93*6b8a3e40Sjfb8856606 static void
igbuio_msix_mask_irq(struct msi_desc * desc,s32 state)94*6b8a3e40Sjfb8856606 igbuio_msix_mask_irq(struct msi_desc *desc, s32 state)
95*6b8a3e40Sjfb8856606 {
96*6b8a3e40Sjfb8856606 	u32 mask_bits = desc->masked;
97*6b8a3e40Sjfb8856606 	unsigned int offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
98*6b8a3e40Sjfb8856606 						PCI_MSIX_ENTRY_VECTOR_CTRL;
99*6b8a3e40Sjfb8856606 
100*6b8a3e40Sjfb8856606 	if (state != 0)
101*6b8a3e40Sjfb8856606 		mask_bits &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT;
102*6b8a3e40Sjfb8856606 	else
103*6b8a3e40Sjfb8856606 		mask_bits |= PCI_MSIX_ENTRY_CTRL_MASKBIT;
104*6b8a3e40Sjfb8856606 
105*6b8a3e40Sjfb8856606 	if (mask_bits != desc->masked) {
106*6b8a3e40Sjfb8856606 		writel(mask_bits, desc->mask_base + offset);
107*6b8a3e40Sjfb8856606 		readl(desc->mask_base);
108*6b8a3e40Sjfb8856606 		desc->masked = mask_bits;
109*6b8a3e40Sjfb8856606 	}
110*6b8a3e40Sjfb8856606 }
111*6b8a3e40Sjfb8856606 
112*6b8a3e40Sjfb8856606 /*
113*6b8a3e40Sjfb8856606  * It masks the msi on/off of generating MSI messages.
114*6b8a3e40Sjfb8856606  */
115*6b8a3e40Sjfb8856606 static void
igbuio_msi_mask_irq(struct pci_dev * pdev,struct msi_desc * desc,int32_t state)116*6b8a3e40Sjfb8856606 igbuio_msi_mask_irq(struct pci_dev *pdev, struct msi_desc *desc, int32_t state)
117*6b8a3e40Sjfb8856606 {
118*6b8a3e40Sjfb8856606 	u32 mask_bits = desc->masked;
119*6b8a3e40Sjfb8856606 	u32 offset = desc->irq - pdev->irq;
120*6b8a3e40Sjfb8856606 	u32 mask = 1 << offset;
121*6b8a3e40Sjfb8856606 
122*6b8a3e40Sjfb8856606 	if (!desc->msi_attrib.maskbit)
123*6b8a3e40Sjfb8856606 		return;
124*6b8a3e40Sjfb8856606 
125*6b8a3e40Sjfb8856606 	if (state != 0)
126*6b8a3e40Sjfb8856606 		mask_bits &= ~mask;
127*6b8a3e40Sjfb8856606 	else
128*6b8a3e40Sjfb8856606 		mask_bits |= mask;
129*6b8a3e40Sjfb8856606 
130*6b8a3e40Sjfb8856606 	if (mask_bits != desc->masked) {
131*6b8a3e40Sjfb8856606 		pci_write_config_dword(pdev, desc->mask_pos, mask_bits);
132*6b8a3e40Sjfb8856606 		desc->masked = mask_bits;
133*6b8a3e40Sjfb8856606 	}
134*6b8a3e40Sjfb8856606 }
135*6b8a3e40Sjfb8856606 
136*6b8a3e40Sjfb8856606 static void
igbuio_mask_irq(struct pci_dev * pdev,enum rte_intr_mode mode,s32 irq_state)137*6b8a3e40Sjfb8856606 igbuio_mask_irq(struct pci_dev *pdev, enum rte_intr_mode mode, s32 irq_state)
138*6b8a3e40Sjfb8856606 {
139*6b8a3e40Sjfb8856606 	struct msi_desc *desc;
140*6b8a3e40Sjfb8856606 	struct list_head *msi_list;
141*6b8a3e40Sjfb8856606 
142*6b8a3e40Sjfb8856606 #ifdef HAVE_MSI_LIST_IN_GENERIC_DEVICE
143*6b8a3e40Sjfb8856606 	msi_list = &pdev->dev.msi_list;
144*6b8a3e40Sjfb8856606 #else
145*6b8a3e40Sjfb8856606 	msi_list = &pdev->msi_list;
146*6b8a3e40Sjfb8856606 #endif
147*6b8a3e40Sjfb8856606 
148*6b8a3e40Sjfb8856606 	if (mode == RTE_INTR_MODE_MSIX) {
149*6b8a3e40Sjfb8856606 		list_for_each_entry(desc, msi_list, list)
150*6b8a3e40Sjfb8856606 			igbuio_msix_mask_irq(desc, irq_state);
151*6b8a3e40Sjfb8856606 	} else if (mode == RTE_INTR_MODE_MSI) {
152*6b8a3e40Sjfb8856606 		list_for_each_entry(desc, msi_list, list)
153*6b8a3e40Sjfb8856606 			igbuio_msi_mask_irq(pdev, desc, irq_state);
154*6b8a3e40Sjfb8856606 	}
155*6b8a3e40Sjfb8856606 }
156*6b8a3e40Sjfb8856606 #endif
157*6b8a3e40Sjfb8856606 
158*6b8a3e40Sjfb8856606 /**
159*6b8a3e40Sjfb8856606  * This is the irqcontrol callback to be registered to uio_info.
160*6b8a3e40Sjfb8856606  * It can be used to disable/enable interrupt from user space processes.
161*6b8a3e40Sjfb8856606  *
162*6b8a3e40Sjfb8856606  * @param info
163*6b8a3e40Sjfb8856606  *  pointer to uio_info.
164*6b8a3e40Sjfb8856606  * @param irq_state
165*6b8a3e40Sjfb8856606  *  state value. 1 to enable interrupt, 0 to disable interrupt.
166*6b8a3e40Sjfb8856606  *
167*6b8a3e40Sjfb8856606  * @return
168*6b8a3e40Sjfb8856606  *  - On success, 0.
169*6b8a3e40Sjfb8856606  *  - On failure, a negative value.
170*6b8a3e40Sjfb8856606  */
171*6b8a3e40Sjfb8856606 static int
igbuio_pci_irqcontrol(struct uio_info * info,s32 irq_state)172*6b8a3e40Sjfb8856606 igbuio_pci_irqcontrol(struct uio_info *info, s32 irq_state)
173*6b8a3e40Sjfb8856606 {
174*6b8a3e40Sjfb8856606 	struct rte_uio_pci_dev *udev = info->priv;
175*6b8a3e40Sjfb8856606 	struct pci_dev *pdev = udev->pdev;
176*6b8a3e40Sjfb8856606 
177*6b8a3e40Sjfb8856606 #ifdef HAVE_PCI_MSI_MASK_IRQ
178*6b8a3e40Sjfb8856606 	struct irq_data *irq = irq_get_irq_data(udev->info.irq);
179*6b8a3e40Sjfb8856606 #endif
180*6b8a3e40Sjfb8856606 
181*6b8a3e40Sjfb8856606 	pci_cfg_access_lock(pdev);
182*6b8a3e40Sjfb8856606 
183*6b8a3e40Sjfb8856606 	if (udev->mode == RTE_INTR_MODE_MSIX || udev->mode == RTE_INTR_MODE_MSI) {
184*6b8a3e40Sjfb8856606 #ifdef HAVE_PCI_MSI_MASK_IRQ
185*6b8a3e40Sjfb8856606 		if (irq_state == 1)
186*6b8a3e40Sjfb8856606 			pci_msi_unmask_irq(irq);
187*6b8a3e40Sjfb8856606 		else
188*6b8a3e40Sjfb8856606 			pci_msi_mask_irq(irq);
189*6b8a3e40Sjfb8856606 #else
190*6b8a3e40Sjfb8856606 		igbuio_mask_irq(pdev, udev->mode, irq_state);
191*6b8a3e40Sjfb8856606 #endif
192*6b8a3e40Sjfb8856606 	}
193*6b8a3e40Sjfb8856606 
194*6b8a3e40Sjfb8856606 	if (udev->mode == RTE_INTR_MODE_LEGACY)
195*6b8a3e40Sjfb8856606 		pci_intx(pdev, !!irq_state);
196*6b8a3e40Sjfb8856606 
197*6b8a3e40Sjfb8856606 	pci_cfg_access_unlock(pdev);
198*6b8a3e40Sjfb8856606 
199*6b8a3e40Sjfb8856606 	return 0;
200*6b8a3e40Sjfb8856606 }
201*6b8a3e40Sjfb8856606 
202*6b8a3e40Sjfb8856606 /**
203*6b8a3e40Sjfb8856606  * This is interrupt handler which will check if the interrupt is for the right device.
204*6b8a3e40Sjfb8856606  * If yes, disable it here and will be enable later.
205*6b8a3e40Sjfb8856606  */
206*6b8a3e40Sjfb8856606 static irqreturn_t
igbuio_pci_irqhandler(int irq,void * dev_id)207*6b8a3e40Sjfb8856606 igbuio_pci_irqhandler(int irq, void *dev_id)
208*6b8a3e40Sjfb8856606 {
209*6b8a3e40Sjfb8856606 	struct rte_uio_pci_dev *udev = (struct rte_uio_pci_dev *)dev_id;
210*6b8a3e40Sjfb8856606 	struct uio_info *info = &udev->info;
211*6b8a3e40Sjfb8856606 
212*6b8a3e40Sjfb8856606 	/* Legacy mode need to mask in hardware */
213*6b8a3e40Sjfb8856606 	if (udev->mode == RTE_INTR_MODE_LEGACY &&
214*6b8a3e40Sjfb8856606 	    !pci_check_and_mask_intx(udev->pdev))
215*6b8a3e40Sjfb8856606 		return IRQ_NONE;
216*6b8a3e40Sjfb8856606 
217*6b8a3e40Sjfb8856606 	uio_event_notify(info);
218*6b8a3e40Sjfb8856606 
219*6b8a3e40Sjfb8856606 	/* Message signal mode, no share IRQ and automasked */
220*6b8a3e40Sjfb8856606 	return IRQ_HANDLED;
221*6b8a3e40Sjfb8856606 }
222*6b8a3e40Sjfb8856606 
223*6b8a3e40Sjfb8856606 static int
igbuio_pci_enable_interrupts(struct rte_uio_pci_dev * udev)224*6b8a3e40Sjfb8856606 igbuio_pci_enable_interrupts(struct rte_uio_pci_dev *udev)
225*6b8a3e40Sjfb8856606 {
226*6b8a3e40Sjfb8856606 	int err = 0;
227*6b8a3e40Sjfb8856606 #ifndef HAVE_ALLOC_IRQ_VECTORS
228*6b8a3e40Sjfb8856606 	struct msix_entry msix_entry;
229*6b8a3e40Sjfb8856606 #endif
230*6b8a3e40Sjfb8856606 
231*6b8a3e40Sjfb8856606 	switch (igbuio_intr_mode_preferred) {
232*6b8a3e40Sjfb8856606 	case RTE_INTR_MODE_MSIX:
233*6b8a3e40Sjfb8856606 		/* Only 1 msi-x vector needed */
234*6b8a3e40Sjfb8856606 #ifndef HAVE_ALLOC_IRQ_VECTORS
235*6b8a3e40Sjfb8856606 		msix_entry.entry = 0;
236*6b8a3e40Sjfb8856606 		if (pci_enable_msix(udev->pdev, &msix_entry, 1) == 0) {
237*6b8a3e40Sjfb8856606 			dev_dbg(&udev->pdev->dev, "using MSI-X");
238*6b8a3e40Sjfb8856606 			udev->info.irq_flags = IRQF_NO_THREAD;
239*6b8a3e40Sjfb8856606 			udev->info.irq = msix_entry.vector;
240*6b8a3e40Sjfb8856606 			udev->mode = RTE_INTR_MODE_MSIX;
241*6b8a3e40Sjfb8856606 			break;
242*6b8a3e40Sjfb8856606 		}
243*6b8a3e40Sjfb8856606 #else
244*6b8a3e40Sjfb8856606 		if (pci_alloc_irq_vectors(udev->pdev, 1, 1, PCI_IRQ_MSIX) == 1) {
245*6b8a3e40Sjfb8856606 			dev_dbg(&udev->pdev->dev, "using MSI-X");
246*6b8a3e40Sjfb8856606 			udev->info.irq_flags = IRQF_NO_THREAD;
247*6b8a3e40Sjfb8856606 			udev->info.irq = pci_irq_vector(udev->pdev, 0);
248*6b8a3e40Sjfb8856606 			udev->mode = RTE_INTR_MODE_MSIX;
249*6b8a3e40Sjfb8856606 			break;
250*6b8a3e40Sjfb8856606 		}
251*6b8a3e40Sjfb8856606 #endif
252*6b8a3e40Sjfb8856606 
253*6b8a3e40Sjfb8856606 	/* falls through - to MSI */
254*6b8a3e40Sjfb8856606 	case RTE_INTR_MODE_MSI:
255*6b8a3e40Sjfb8856606 #ifndef HAVE_ALLOC_IRQ_VECTORS
256*6b8a3e40Sjfb8856606 		if (pci_enable_msi(udev->pdev) == 0) {
257*6b8a3e40Sjfb8856606 			dev_dbg(&udev->pdev->dev, "using MSI");
258*6b8a3e40Sjfb8856606 			udev->info.irq_flags = IRQF_NO_THREAD;
259*6b8a3e40Sjfb8856606 			udev->info.irq = udev->pdev->irq;
260*6b8a3e40Sjfb8856606 			udev->mode = RTE_INTR_MODE_MSI;
261*6b8a3e40Sjfb8856606 			break;
262*6b8a3e40Sjfb8856606 		}
263*6b8a3e40Sjfb8856606 #else
264*6b8a3e40Sjfb8856606 		if (pci_alloc_irq_vectors(udev->pdev, 1, 1, PCI_IRQ_MSI) == 1) {
265*6b8a3e40Sjfb8856606 			dev_dbg(&udev->pdev->dev, "using MSI");
266*6b8a3e40Sjfb8856606 			udev->info.irq_flags = IRQF_NO_THREAD;
267*6b8a3e40Sjfb8856606 			udev->info.irq = pci_irq_vector(udev->pdev, 0);
268*6b8a3e40Sjfb8856606 			udev->mode = RTE_INTR_MODE_MSI;
269*6b8a3e40Sjfb8856606 			break;
270*6b8a3e40Sjfb8856606 		}
271*6b8a3e40Sjfb8856606 #endif
272*6b8a3e40Sjfb8856606 	/* falls through - to INTX */
273*6b8a3e40Sjfb8856606 	case RTE_INTR_MODE_LEGACY:
274*6b8a3e40Sjfb8856606 		if (pci_intx_mask_supported(udev->pdev)) {
275*6b8a3e40Sjfb8856606 			dev_dbg(&udev->pdev->dev, "using INTX");
276*6b8a3e40Sjfb8856606 			udev->info.irq_flags = IRQF_SHARED | IRQF_NO_THREAD;
277*6b8a3e40Sjfb8856606 			udev->info.irq = udev->pdev->irq;
278*6b8a3e40Sjfb8856606 			udev->mode = RTE_INTR_MODE_LEGACY;
279*6b8a3e40Sjfb8856606 			break;
280*6b8a3e40Sjfb8856606 		}
281*6b8a3e40Sjfb8856606 		dev_notice(&udev->pdev->dev, "PCI INTX mask not supported\n");
282*6b8a3e40Sjfb8856606 	/* falls through - to no IRQ */
283*6b8a3e40Sjfb8856606 	case RTE_INTR_MODE_NONE:
284*6b8a3e40Sjfb8856606 		udev->mode = RTE_INTR_MODE_NONE;
285*6b8a3e40Sjfb8856606 		udev->info.irq = UIO_IRQ_NONE;
286*6b8a3e40Sjfb8856606 		break;
287*6b8a3e40Sjfb8856606 
288*6b8a3e40Sjfb8856606 	default:
289*6b8a3e40Sjfb8856606 		dev_err(&udev->pdev->dev, "invalid IRQ mode %u",
290*6b8a3e40Sjfb8856606 			igbuio_intr_mode_preferred);
291*6b8a3e40Sjfb8856606 		udev->info.irq = UIO_IRQ_NONE;
292*6b8a3e40Sjfb8856606 		err = -EINVAL;
293*6b8a3e40Sjfb8856606 	}
294*6b8a3e40Sjfb8856606 
295*6b8a3e40Sjfb8856606 	if (udev->info.irq != UIO_IRQ_NONE)
296*6b8a3e40Sjfb8856606 		err = request_irq(udev->info.irq, igbuio_pci_irqhandler,
297*6b8a3e40Sjfb8856606 				  udev->info.irq_flags, udev->info.name,
298*6b8a3e40Sjfb8856606 				  udev);
299*6b8a3e40Sjfb8856606 	dev_info(&udev->pdev->dev, "uio device registered with irq %ld\n",
300*6b8a3e40Sjfb8856606 		 udev->info.irq);
301*6b8a3e40Sjfb8856606 
302*6b8a3e40Sjfb8856606 	return err;
303*6b8a3e40Sjfb8856606 }
304*6b8a3e40Sjfb8856606 
305*6b8a3e40Sjfb8856606 static void
igbuio_pci_disable_interrupts(struct rte_uio_pci_dev * udev)306*6b8a3e40Sjfb8856606 igbuio_pci_disable_interrupts(struct rte_uio_pci_dev *udev)
307*6b8a3e40Sjfb8856606 {
308*6b8a3e40Sjfb8856606 	if (udev->info.irq) {
309*6b8a3e40Sjfb8856606 		free_irq(udev->info.irq, udev);
310*6b8a3e40Sjfb8856606 		udev->info.irq = 0;
311*6b8a3e40Sjfb8856606 	}
312*6b8a3e40Sjfb8856606 
313*6b8a3e40Sjfb8856606 #ifndef HAVE_ALLOC_IRQ_VECTORS
314*6b8a3e40Sjfb8856606 	if (udev->mode == RTE_INTR_MODE_MSIX)
315*6b8a3e40Sjfb8856606 		pci_disable_msix(udev->pdev);
316*6b8a3e40Sjfb8856606 	if (udev->mode == RTE_INTR_MODE_MSI)
317*6b8a3e40Sjfb8856606 		pci_disable_msi(udev->pdev);
318*6b8a3e40Sjfb8856606 #else
319*6b8a3e40Sjfb8856606 	if (udev->mode == RTE_INTR_MODE_MSIX ||
320*6b8a3e40Sjfb8856606 	    udev->mode == RTE_INTR_MODE_MSI)
321*6b8a3e40Sjfb8856606 		pci_free_irq_vectors(udev->pdev);
322*6b8a3e40Sjfb8856606 #endif
323*6b8a3e40Sjfb8856606 }
324*6b8a3e40Sjfb8856606 
325*6b8a3e40Sjfb8856606 
326*6b8a3e40Sjfb8856606 /**
327*6b8a3e40Sjfb8856606  * This gets called while opening uio device file.
328*6b8a3e40Sjfb8856606  */
329*6b8a3e40Sjfb8856606 static int
igbuio_pci_open(struct uio_info * info,struct inode * inode)330*6b8a3e40Sjfb8856606 igbuio_pci_open(struct uio_info *info, struct inode *inode)
331*6b8a3e40Sjfb8856606 {
332*6b8a3e40Sjfb8856606 	struct rte_uio_pci_dev *udev = info->priv;
333*6b8a3e40Sjfb8856606 	struct pci_dev *dev = udev->pdev;
334*6b8a3e40Sjfb8856606 	int err;
335*6b8a3e40Sjfb8856606 
336*6b8a3e40Sjfb8856606 	if (atomic_inc_return(&udev->refcnt) != 1)
337*6b8a3e40Sjfb8856606 		return 0;
338*6b8a3e40Sjfb8856606 
339*6b8a3e40Sjfb8856606 	/* set bus master, which was cleared by the reset function */
340*6b8a3e40Sjfb8856606 	pci_set_master(dev);
341*6b8a3e40Sjfb8856606 
342*6b8a3e40Sjfb8856606 	/* enable interrupts */
343*6b8a3e40Sjfb8856606 	err = igbuio_pci_enable_interrupts(udev);
344*6b8a3e40Sjfb8856606 	if (err) {
345*6b8a3e40Sjfb8856606 		atomic_dec(&udev->refcnt);
346*6b8a3e40Sjfb8856606 		dev_err(&dev->dev, "Enable interrupt fails\n");
347*6b8a3e40Sjfb8856606 	}
348*6b8a3e40Sjfb8856606 	return err;
349*6b8a3e40Sjfb8856606 }
350*6b8a3e40Sjfb8856606 
351*6b8a3e40Sjfb8856606 static int
igbuio_pci_release(struct uio_info * info,struct inode * inode)352*6b8a3e40Sjfb8856606 igbuio_pci_release(struct uio_info *info, struct inode *inode)
353*6b8a3e40Sjfb8856606 {
354*6b8a3e40Sjfb8856606 	struct rte_uio_pci_dev *udev = info->priv;
355*6b8a3e40Sjfb8856606 	struct pci_dev *dev = udev->pdev;
356*6b8a3e40Sjfb8856606 
357*6b8a3e40Sjfb8856606 	if (atomic_dec_and_test(&udev->refcnt)) {
358*6b8a3e40Sjfb8856606 		/* disable interrupts */
359*6b8a3e40Sjfb8856606 		igbuio_pci_disable_interrupts(udev);
360*6b8a3e40Sjfb8856606 
361*6b8a3e40Sjfb8856606 		/* stop the device from further DMA */
362*6b8a3e40Sjfb8856606 		pci_clear_master(dev);
363*6b8a3e40Sjfb8856606 	}
364*6b8a3e40Sjfb8856606 
365*6b8a3e40Sjfb8856606 	return 0;
366*6b8a3e40Sjfb8856606 }
367*6b8a3e40Sjfb8856606 
368*6b8a3e40Sjfb8856606 /* Remap pci resources described by bar #pci_bar in uio resource n. */
369*6b8a3e40Sjfb8856606 static int
igbuio_pci_setup_iomem(struct pci_dev * dev,struct uio_info * info,int n,int pci_bar,const char * name)370*6b8a3e40Sjfb8856606 igbuio_pci_setup_iomem(struct pci_dev *dev, struct uio_info *info,
371*6b8a3e40Sjfb8856606 		       int n, int pci_bar, const char *name)
372*6b8a3e40Sjfb8856606 {
373*6b8a3e40Sjfb8856606 	unsigned long addr, len;
374*6b8a3e40Sjfb8856606 	void *internal_addr;
375*6b8a3e40Sjfb8856606 
376*6b8a3e40Sjfb8856606 	if (n >= ARRAY_SIZE(info->mem))
377*6b8a3e40Sjfb8856606 		return -EINVAL;
378*6b8a3e40Sjfb8856606 
379*6b8a3e40Sjfb8856606 	addr = pci_resource_start(dev, pci_bar);
380*6b8a3e40Sjfb8856606 	len = pci_resource_len(dev, pci_bar);
381*6b8a3e40Sjfb8856606 	if (addr == 0 || len == 0)
382*6b8a3e40Sjfb8856606 		return -1;
383*6b8a3e40Sjfb8856606 	if (wc_activate == 0) {
384*6b8a3e40Sjfb8856606 		internal_addr = ioremap(addr, len);
385*6b8a3e40Sjfb8856606 		if (internal_addr == NULL)
386*6b8a3e40Sjfb8856606 			return -1;
387*6b8a3e40Sjfb8856606 	} else {
388*6b8a3e40Sjfb8856606 		internal_addr = NULL;
389*6b8a3e40Sjfb8856606 	}
390*6b8a3e40Sjfb8856606 	info->mem[n].name = name;
391*6b8a3e40Sjfb8856606 	info->mem[n].addr = addr;
392*6b8a3e40Sjfb8856606 	info->mem[n].internal_addr = internal_addr;
393*6b8a3e40Sjfb8856606 	info->mem[n].size = len;
394*6b8a3e40Sjfb8856606 	info->mem[n].memtype = UIO_MEM_PHYS;
395*6b8a3e40Sjfb8856606 	return 0;
396*6b8a3e40Sjfb8856606 }
397*6b8a3e40Sjfb8856606 
398*6b8a3e40Sjfb8856606 /* Get pci port io resources described by bar #pci_bar in uio resource n. */
399*6b8a3e40Sjfb8856606 static int
igbuio_pci_setup_ioport(struct pci_dev * dev,struct uio_info * info,int n,int pci_bar,const char * name)400*6b8a3e40Sjfb8856606 igbuio_pci_setup_ioport(struct pci_dev *dev, struct uio_info *info,
401*6b8a3e40Sjfb8856606 		int n, int pci_bar, const char *name)
402*6b8a3e40Sjfb8856606 {
403*6b8a3e40Sjfb8856606 	unsigned long addr, len;
404*6b8a3e40Sjfb8856606 
405*6b8a3e40Sjfb8856606 	if (n >= ARRAY_SIZE(info->port))
406*6b8a3e40Sjfb8856606 		return -EINVAL;
407*6b8a3e40Sjfb8856606 
408*6b8a3e40Sjfb8856606 	addr = pci_resource_start(dev, pci_bar);
409*6b8a3e40Sjfb8856606 	len = pci_resource_len(dev, pci_bar);
410*6b8a3e40Sjfb8856606 	if (addr == 0 || len == 0)
411*6b8a3e40Sjfb8856606 		return -EINVAL;
412*6b8a3e40Sjfb8856606 
413*6b8a3e40Sjfb8856606 	info->port[n].name = name;
414*6b8a3e40Sjfb8856606 	info->port[n].start = addr;
415*6b8a3e40Sjfb8856606 	info->port[n].size = len;
416*6b8a3e40Sjfb8856606 	info->port[n].porttype = UIO_PORT_X86;
417*6b8a3e40Sjfb8856606 
418*6b8a3e40Sjfb8856606 	return 0;
419*6b8a3e40Sjfb8856606 }
420*6b8a3e40Sjfb8856606 
421*6b8a3e40Sjfb8856606 /* Unmap previously ioremap'd resources */
422*6b8a3e40Sjfb8856606 static void
igbuio_pci_release_iomem(struct uio_info * info)423*6b8a3e40Sjfb8856606 igbuio_pci_release_iomem(struct uio_info *info)
424*6b8a3e40Sjfb8856606 {
425*6b8a3e40Sjfb8856606 	int i;
426*6b8a3e40Sjfb8856606 
427*6b8a3e40Sjfb8856606 	for (i = 0; i < MAX_UIO_MAPS; i++) {
428*6b8a3e40Sjfb8856606 		if (info->mem[i].internal_addr)
429*6b8a3e40Sjfb8856606 			iounmap(info->mem[i].internal_addr);
430*6b8a3e40Sjfb8856606 	}
431*6b8a3e40Sjfb8856606 }
432*6b8a3e40Sjfb8856606 
433*6b8a3e40Sjfb8856606 static int
igbuio_setup_bars(struct pci_dev * dev,struct uio_info * info)434*6b8a3e40Sjfb8856606 igbuio_setup_bars(struct pci_dev *dev, struct uio_info *info)
435*6b8a3e40Sjfb8856606 {
436*6b8a3e40Sjfb8856606 	int i, iom, iop, ret;
437*6b8a3e40Sjfb8856606 	unsigned long flags;
438*6b8a3e40Sjfb8856606 	static const char *bar_names[PCI_STD_RESOURCE_END + 1]  = {
439*6b8a3e40Sjfb8856606 		"BAR0",
440*6b8a3e40Sjfb8856606 		"BAR1",
441*6b8a3e40Sjfb8856606 		"BAR2",
442*6b8a3e40Sjfb8856606 		"BAR3",
443*6b8a3e40Sjfb8856606 		"BAR4",
444*6b8a3e40Sjfb8856606 		"BAR5",
445*6b8a3e40Sjfb8856606 	};
446*6b8a3e40Sjfb8856606 
447*6b8a3e40Sjfb8856606 	iom = 0;
448*6b8a3e40Sjfb8856606 	iop = 0;
449*6b8a3e40Sjfb8856606 
450*6b8a3e40Sjfb8856606 	for (i = 0; i < ARRAY_SIZE(bar_names); i++) {
451*6b8a3e40Sjfb8856606 		if (pci_resource_len(dev, i) != 0 &&
452*6b8a3e40Sjfb8856606 				pci_resource_start(dev, i) != 0) {
453*6b8a3e40Sjfb8856606 			flags = pci_resource_flags(dev, i);
454*6b8a3e40Sjfb8856606 			if (flags & IORESOURCE_MEM) {
455*6b8a3e40Sjfb8856606 				ret = igbuio_pci_setup_iomem(dev, info, iom,
456*6b8a3e40Sjfb8856606 							     i, bar_names[i]);
457*6b8a3e40Sjfb8856606 				if (ret != 0)
458*6b8a3e40Sjfb8856606 					return ret;
459*6b8a3e40Sjfb8856606 				iom++;
460*6b8a3e40Sjfb8856606 			} else if (flags & IORESOURCE_IO) {
461*6b8a3e40Sjfb8856606 				ret = igbuio_pci_setup_ioport(dev, info, iop,
462*6b8a3e40Sjfb8856606 							      i, bar_names[i]);
463*6b8a3e40Sjfb8856606 				if (ret != 0)
464*6b8a3e40Sjfb8856606 					return ret;
465*6b8a3e40Sjfb8856606 				iop++;
466*6b8a3e40Sjfb8856606 			}
467*6b8a3e40Sjfb8856606 		}
468*6b8a3e40Sjfb8856606 	}
469*6b8a3e40Sjfb8856606 
470*6b8a3e40Sjfb8856606 	return (iom != 0 || iop != 0) ? ret : -ENOENT;
471*6b8a3e40Sjfb8856606 }
472*6b8a3e40Sjfb8856606 
473*6b8a3e40Sjfb8856606 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)
474*6b8a3e40Sjfb8856606 static int __devinit
475*6b8a3e40Sjfb8856606 #else
476*6b8a3e40Sjfb8856606 static int
477*6b8a3e40Sjfb8856606 #endif
igbuio_pci_probe(struct pci_dev * dev,const struct pci_device_id * id)478*6b8a3e40Sjfb8856606 igbuio_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
479*6b8a3e40Sjfb8856606 {
480*6b8a3e40Sjfb8856606 	struct rte_uio_pci_dev *udev;
481*6b8a3e40Sjfb8856606 	dma_addr_t map_dma_addr;
482*6b8a3e40Sjfb8856606 	void *map_addr;
483*6b8a3e40Sjfb8856606 	int err;
484*6b8a3e40Sjfb8856606 
485*6b8a3e40Sjfb8856606 #ifdef HAVE_PCI_IS_BRIDGE_API
486*6b8a3e40Sjfb8856606 	if (pci_is_bridge(dev)) {
487*6b8a3e40Sjfb8856606 		dev_warn(&dev->dev, "Ignoring PCI bridge device\n");
488*6b8a3e40Sjfb8856606 		return -ENODEV;
489*6b8a3e40Sjfb8856606 	}
490*6b8a3e40Sjfb8856606 #endif
491*6b8a3e40Sjfb8856606 
492*6b8a3e40Sjfb8856606 	udev = kzalloc(sizeof(struct rte_uio_pci_dev), GFP_KERNEL);
493*6b8a3e40Sjfb8856606 	if (!udev)
494*6b8a3e40Sjfb8856606 		return -ENOMEM;
495*6b8a3e40Sjfb8856606 
496*6b8a3e40Sjfb8856606 	/*
497*6b8a3e40Sjfb8856606 	 * enable device: ask low-level code to enable I/O and
498*6b8a3e40Sjfb8856606 	 * memory
499*6b8a3e40Sjfb8856606 	 */
500*6b8a3e40Sjfb8856606 	err = pci_enable_device(dev);
501*6b8a3e40Sjfb8856606 	if (err != 0) {
502*6b8a3e40Sjfb8856606 		dev_err(&dev->dev, "Cannot enable PCI device\n");
503*6b8a3e40Sjfb8856606 		goto fail_free;
504*6b8a3e40Sjfb8856606 	}
505*6b8a3e40Sjfb8856606 
506*6b8a3e40Sjfb8856606 	/* enable bus mastering on the device */
507*6b8a3e40Sjfb8856606 	pci_set_master(dev);
508*6b8a3e40Sjfb8856606 
509*6b8a3e40Sjfb8856606 	/* remap IO memory */
510*6b8a3e40Sjfb8856606 	err = igbuio_setup_bars(dev, &udev->info);
511*6b8a3e40Sjfb8856606 	if (err != 0)
512*6b8a3e40Sjfb8856606 		goto fail_release_iomem;
513*6b8a3e40Sjfb8856606 
514*6b8a3e40Sjfb8856606 	/* set 64-bit DMA mask */
515*6b8a3e40Sjfb8856606 	err = pci_set_dma_mask(dev,  DMA_BIT_MASK(64));
516*6b8a3e40Sjfb8856606 	if (err != 0) {
517*6b8a3e40Sjfb8856606 		dev_err(&dev->dev, "Cannot set DMA mask\n");
518*6b8a3e40Sjfb8856606 		goto fail_release_iomem;
519*6b8a3e40Sjfb8856606 	}
520*6b8a3e40Sjfb8856606 
521*6b8a3e40Sjfb8856606 	err = pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(64));
522*6b8a3e40Sjfb8856606 	if (err != 0) {
523*6b8a3e40Sjfb8856606 		dev_err(&dev->dev, "Cannot set consistent DMA mask\n");
524*6b8a3e40Sjfb8856606 		goto fail_release_iomem;
525*6b8a3e40Sjfb8856606 	}
526*6b8a3e40Sjfb8856606 
527*6b8a3e40Sjfb8856606 	/* fill uio infos */
528*6b8a3e40Sjfb8856606 	udev->info.name = "igb_uio";
529*6b8a3e40Sjfb8856606 	udev->info.version = "0.1";
530*6b8a3e40Sjfb8856606 	udev->info.irqcontrol = igbuio_pci_irqcontrol;
531*6b8a3e40Sjfb8856606 	udev->info.open = igbuio_pci_open;
532*6b8a3e40Sjfb8856606 	udev->info.release = igbuio_pci_release;
533*6b8a3e40Sjfb8856606 	udev->info.priv = udev;
534*6b8a3e40Sjfb8856606 	udev->pdev = dev;
535*6b8a3e40Sjfb8856606 	atomic_set(&udev->refcnt, 0);
536*6b8a3e40Sjfb8856606 
537*6b8a3e40Sjfb8856606 	err = sysfs_create_group(&dev->dev.kobj, &dev_attr_grp);
538*6b8a3e40Sjfb8856606 	if (err != 0)
539*6b8a3e40Sjfb8856606 		goto fail_release_iomem;
540*6b8a3e40Sjfb8856606 
541*6b8a3e40Sjfb8856606 	/* register uio driver */
542*6b8a3e40Sjfb8856606 	err = uio_register_device(&dev->dev, &udev->info);
543*6b8a3e40Sjfb8856606 	if (err != 0)
544*6b8a3e40Sjfb8856606 		goto fail_remove_group;
545*6b8a3e40Sjfb8856606 
546*6b8a3e40Sjfb8856606 	pci_set_drvdata(dev, udev);
547*6b8a3e40Sjfb8856606 
548*6b8a3e40Sjfb8856606 	/*
549*6b8a3e40Sjfb8856606 	 * Doing a harmless dma mapping for attaching the device to
550*6b8a3e40Sjfb8856606 	 * the iommu identity mapping if kernel boots with iommu=pt.
551*6b8a3e40Sjfb8856606 	 * Note this is not a problem if no IOMMU at all.
552*6b8a3e40Sjfb8856606 	 */
553*6b8a3e40Sjfb8856606 	map_addr = dma_alloc_coherent(&dev->dev, 1024, &map_dma_addr,
554*6b8a3e40Sjfb8856606 			GFP_KERNEL);
555*6b8a3e40Sjfb8856606 	if (map_addr)
556*6b8a3e40Sjfb8856606 		memset(map_addr, 0, 1024);
557*6b8a3e40Sjfb8856606 
558*6b8a3e40Sjfb8856606 	if (!map_addr)
559*6b8a3e40Sjfb8856606 		dev_info(&dev->dev, "dma mapping failed\n");
560*6b8a3e40Sjfb8856606 	else {
561*6b8a3e40Sjfb8856606 		dev_info(&dev->dev, "mapping 1K dma=%#llx host=%p\n",
562*6b8a3e40Sjfb8856606 			 (unsigned long long)map_dma_addr, map_addr);
563*6b8a3e40Sjfb8856606 
564*6b8a3e40Sjfb8856606 		dma_free_coherent(&dev->dev, 1024, map_addr, map_dma_addr);
565*6b8a3e40Sjfb8856606 		dev_info(&dev->dev, "unmapping 1K dma=%#llx host=%p\n",
566*6b8a3e40Sjfb8856606 			 (unsigned long long)map_dma_addr, map_addr);
567*6b8a3e40Sjfb8856606 	}
568*6b8a3e40Sjfb8856606 
569*6b8a3e40Sjfb8856606 	return 0;
570*6b8a3e40Sjfb8856606 
571*6b8a3e40Sjfb8856606 fail_remove_group:
572*6b8a3e40Sjfb8856606 	sysfs_remove_group(&dev->dev.kobj, &dev_attr_grp);
573*6b8a3e40Sjfb8856606 fail_release_iomem:
574*6b8a3e40Sjfb8856606 	igbuio_pci_release_iomem(&udev->info);
575*6b8a3e40Sjfb8856606 	pci_disable_device(dev);
576*6b8a3e40Sjfb8856606 fail_free:
577*6b8a3e40Sjfb8856606 	kfree(udev);
578*6b8a3e40Sjfb8856606 
579*6b8a3e40Sjfb8856606 	return err;
580*6b8a3e40Sjfb8856606 }
581*6b8a3e40Sjfb8856606 
582*6b8a3e40Sjfb8856606 static void
igbuio_pci_remove(struct pci_dev * dev)583*6b8a3e40Sjfb8856606 igbuio_pci_remove(struct pci_dev *dev)
584*6b8a3e40Sjfb8856606 {
585*6b8a3e40Sjfb8856606 	struct rte_uio_pci_dev *udev = pci_get_drvdata(dev);
586*6b8a3e40Sjfb8856606 
587*6b8a3e40Sjfb8856606 	igbuio_pci_release(&udev->info, NULL);
588*6b8a3e40Sjfb8856606 
589*6b8a3e40Sjfb8856606 	sysfs_remove_group(&dev->dev.kobj, &dev_attr_grp);
590*6b8a3e40Sjfb8856606 	uio_unregister_device(&udev->info);
591*6b8a3e40Sjfb8856606 	igbuio_pci_release_iomem(&udev->info);
592*6b8a3e40Sjfb8856606 	pci_disable_device(dev);
593*6b8a3e40Sjfb8856606 	pci_set_drvdata(dev, NULL);
594*6b8a3e40Sjfb8856606 	kfree(udev);
595*6b8a3e40Sjfb8856606 }
596*6b8a3e40Sjfb8856606 
597*6b8a3e40Sjfb8856606 static int
igbuio_config_intr_mode(char * intr_str)598*6b8a3e40Sjfb8856606 igbuio_config_intr_mode(char *intr_str)
599*6b8a3e40Sjfb8856606 {
600*6b8a3e40Sjfb8856606 	if (!intr_str) {
601*6b8a3e40Sjfb8856606 		pr_info("Use MSIX interrupt by default\n");
602*6b8a3e40Sjfb8856606 		return 0;
603*6b8a3e40Sjfb8856606 	}
604*6b8a3e40Sjfb8856606 
605*6b8a3e40Sjfb8856606 	if (!strcmp(intr_str, RTE_INTR_MODE_MSIX_NAME)) {
606*6b8a3e40Sjfb8856606 		igbuio_intr_mode_preferred = RTE_INTR_MODE_MSIX;
607*6b8a3e40Sjfb8856606 		pr_info("Use MSIX interrupt\n");
608*6b8a3e40Sjfb8856606 	} else if (!strcmp(intr_str, RTE_INTR_MODE_MSI_NAME)) {
609*6b8a3e40Sjfb8856606 		igbuio_intr_mode_preferred = RTE_INTR_MODE_MSI;
610*6b8a3e40Sjfb8856606 		pr_info("Use MSI interrupt\n");
611*6b8a3e40Sjfb8856606 	} else if (!strcmp(intr_str, RTE_INTR_MODE_LEGACY_NAME)) {
612*6b8a3e40Sjfb8856606 		igbuio_intr_mode_preferred = RTE_INTR_MODE_LEGACY;
613*6b8a3e40Sjfb8856606 		pr_info("Use legacy interrupt\n");
614*6b8a3e40Sjfb8856606 	} else {
615*6b8a3e40Sjfb8856606 		pr_info("Error: bad parameter - %s\n", intr_str);
616*6b8a3e40Sjfb8856606 		return -EINVAL;
617*6b8a3e40Sjfb8856606 	}
618*6b8a3e40Sjfb8856606 
619*6b8a3e40Sjfb8856606 	return 0;
620*6b8a3e40Sjfb8856606 }
621*6b8a3e40Sjfb8856606 
622*6b8a3e40Sjfb8856606 static struct pci_driver igbuio_pci_driver = {
623*6b8a3e40Sjfb8856606 	.name = "igb_uio",
624*6b8a3e40Sjfb8856606 	.id_table = NULL,
625*6b8a3e40Sjfb8856606 	.probe = igbuio_pci_probe,
626*6b8a3e40Sjfb8856606 	.remove = igbuio_pci_remove,
627*6b8a3e40Sjfb8856606 };
628*6b8a3e40Sjfb8856606 
629*6b8a3e40Sjfb8856606 static int __init
igbuio_pci_init_module(void)630*6b8a3e40Sjfb8856606 igbuio_pci_init_module(void)
631*6b8a3e40Sjfb8856606 {
632*6b8a3e40Sjfb8856606 	int ret;
633*6b8a3e40Sjfb8856606 
634*6b8a3e40Sjfb8856606 	if (igbuio_kernel_is_locked_down()) {
635*6b8a3e40Sjfb8856606 		pr_err("Not able to use module, kernel lock down is enabled\n");
636*6b8a3e40Sjfb8856606 		return -EINVAL;
637*6b8a3e40Sjfb8856606 	}
638*6b8a3e40Sjfb8856606 
639*6b8a3e40Sjfb8856606 	if (wc_activate != 0)
640*6b8a3e40Sjfb8856606 		pr_info("wc_activate is set\n");
641*6b8a3e40Sjfb8856606 
642*6b8a3e40Sjfb8856606 	ret = igbuio_config_intr_mode(intr_mode);
643*6b8a3e40Sjfb8856606 	if (ret < 0)
644*6b8a3e40Sjfb8856606 		return ret;
645*6b8a3e40Sjfb8856606 
646*6b8a3e40Sjfb8856606 	return pci_register_driver(&igbuio_pci_driver);
647*6b8a3e40Sjfb8856606 }
648*6b8a3e40Sjfb8856606 
649*6b8a3e40Sjfb8856606 static void __exit
igbuio_pci_exit_module(void)650*6b8a3e40Sjfb8856606 igbuio_pci_exit_module(void)
651*6b8a3e40Sjfb8856606 {
652*6b8a3e40Sjfb8856606 	pci_unregister_driver(&igbuio_pci_driver);
653*6b8a3e40Sjfb8856606 }
654*6b8a3e40Sjfb8856606 
655*6b8a3e40Sjfb8856606 module_init(igbuio_pci_init_module);
656*6b8a3e40Sjfb8856606 module_exit(igbuio_pci_exit_module);
657*6b8a3e40Sjfb8856606 
658*6b8a3e40Sjfb8856606 module_param(intr_mode, charp, S_IRUGO);
659*6b8a3e40Sjfb8856606 MODULE_PARM_DESC(intr_mode,
660*6b8a3e40Sjfb8856606 "igb_uio interrupt mode (default=msix):\n"
661*6b8a3e40Sjfb8856606 "    " RTE_INTR_MODE_MSIX_NAME "       Use MSIX interrupt\n"
662*6b8a3e40Sjfb8856606 "    " RTE_INTR_MODE_MSI_NAME "        Use MSI interrupt\n"
663*6b8a3e40Sjfb8856606 "    " RTE_INTR_MODE_LEGACY_NAME "     Use Legacy interrupt\n"
664*6b8a3e40Sjfb8856606 "\n");
665*6b8a3e40Sjfb8856606 
666*6b8a3e40Sjfb8856606 module_param(wc_activate, int, 0);
667*6b8a3e40Sjfb8856606 MODULE_PARM_DESC(wc_activate,
668*6b8a3e40Sjfb8856606 "Activate support for write combining (WC) (default=0)\n"
669*6b8a3e40Sjfb8856606 "    0 - disable\n"
670*6b8a3e40Sjfb8856606 "    other - enable\n");
671*6b8a3e40Sjfb8856606 
672*6b8a3e40Sjfb8856606 MODULE_DESCRIPTION("UIO driver for Intel IGB PCI cards");
673*6b8a3e40Sjfb8856606 MODULE_LICENSE("GPL");
674*6b8a3e40Sjfb8856606 MODULE_AUTHOR("Intel Corporation");
675