1 // SPDX-License-Identifier: GPL-2.0
2 /*-
3 * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
4 */
5
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8 #include <linux/device.h>
9 #include <linux/module.h>
10 #include <linux/pci.h>
11 #include <linux/uio_driver.h>
12 #include <linux/io.h>
13 #include <linux/irq.h>
14 #include <linux/msi.h>
15 #include <linux/version.h>
16 #include <linux/slab.h>
17
18 /**
19 * These enum and macro definitions are copied from the
20 * file rte_pci_dev_features.h
21 */
22 enum rte_intr_mode {
23 RTE_INTR_MODE_NONE = 0,
24 RTE_INTR_MODE_LEGACY,
25 RTE_INTR_MODE_MSI,
26 RTE_INTR_MODE_MSIX
27 };
28 #define RTE_INTR_MODE_NONE_NAME "none"
29 #define RTE_INTR_MODE_LEGACY_NAME "legacy"
30 #define RTE_INTR_MODE_MSI_NAME "msi"
31 #define RTE_INTR_MODE_MSIX_NAME "msix"
32
33
34 #include "compat.h"
35
36 /**
37 * A structure describing the private information for a uio device.
38 */
39 struct rte_uio_pci_dev {
40 struct uio_info info;
41 struct pci_dev *pdev;
42 enum rte_intr_mode mode;
43 atomic_t refcnt;
44 };
45
46 static int wc_activate;
47 static char *intr_mode;
48 static enum rte_intr_mode igbuio_intr_mode_preferred = RTE_INTR_MODE_MSIX;
49 /* sriov sysfs */
50 static ssize_t
show_max_vfs(struct device * dev,struct device_attribute * attr,char * buf)51 show_max_vfs(struct device *dev, struct device_attribute *attr,
52 char *buf)
53 {
54 return snprintf(buf, 10, "%u\n", dev_num_vf(dev));
55 }
56
57 static ssize_t
store_max_vfs(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)58 store_max_vfs(struct device *dev, struct device_attribute *attr,
59 const char *buf, size_t count)
60 {
61 int err = 0;
62 unsigned long max_vfs;
63 struct pci_dev *pdev = to_pci_dev(dev);
64
65 if (0 != kstrtoul(buf, 0, &max_vfs))
66 return -EINVAL;
67
68 if (0 == max_vfs)
69 pci_disable_sriov(pdev);
70 else if (0 == pci_num_vf(pdev))
71 err = pci_enable_sriov(pdev, max_vfs);
72 else /* do nothing if change max_vfs number */
73 err = -EINVAL;
74
75 return err ? err : count;
76 }
77
78 static DEVICE_ATTR(max_vfs, S_IRUGO | S_IWUSR, show_max_vfs, store_max_vfs);
79
80 static struct attribute *dev_attrs[] = {
81 &dev_attr_max_vfs.attr,
82 NULL,
83 };
84
85 static const struct attribute_group dev_attr_grp = {
86 .attrs = dev_attrs,
87 };
88
89 #ifndef HAVE_PCI_MSI_MASK_IRQ
90 /*
91 * It masks the msix on/off of generating MSI-X messages.
92 */
93 static void
igbuio_msix_mask_irq(struct msi_desc * desc,s32 state)94 igbuio_msix_mask_irq(struct msi_desc *desc, s32 state)
95 {
96 u32 mask_bits = desc->masked;
97 unsigned int offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
98 PCI_MSIX_ENTRY_VECTOR_CTRL;
99
100 if (state != 0)
101 mask_bits &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT;
102 else
103 mask_bits |= PCI_MSIX_ENTRY_CTRL_MASKBIT;
104
105 if (mask_bits != desc->masked) {
106 writel(mask_bits, desc->mask_base + offset);
107 readl(desc->mask_base);
108 desc->masked = mask_bits;
109 }
110 }
111
112 /*
113 * It masks the msi on/off of generating MSI messages.
114 */
115 static void
igbuio_msi_mask_irq(struct pci_dev * pdev,struct msi_desc * desc,int32_t state)116 igbuio_msi_mask_irq(struct pci_dev *pdev, struct msi_desc *desc, int32_t state)
117 {
118 u32 mask_bits = desc->masked;
119 u32 offset = desc->irq - pdev->irq;
120 u32 mask = 1 << offset;
121
122 if (!desc->msi_attrib.maskbit)
123 return;
124
125 if (state != 0)
126 mask_bits &= ~mask;
127 else
128 mask_bits |= mask;
129
130 if (mask_bits != desc->masked) {
131 pci_write_config_dword(pdev, desc->mask_pos, mask_bits);
132 desc->masked = mask_bits;
133 }
134 }
135
136 static void
igbuio_mask_irq(struct pci_dev * pdev,enum rte_intr_mode mode,s32 irq_state)137 igbuio_mask_irq(struct pci_dev *pdev, enum rte_intr_mode mode, s32 irq_state)
138 {
139 struct msi_desc *desc;
140 struct list_head *msi_list;
141
142 #ifdef HAVE_MSI_LIST_IN_GENERIC_DEVICE
143 msi_list = &pdev->dev.msi_list;
144 #else
145 msi_list = &pdev->msi_list;
146 #endif
147
148 if (mode == RTE_INTR_MODE_MSIX) {
149 list_for_each_entry(desc, msi_list, list)
150 igbuio_msix_mask_irq(desc, irq_state);
151 } else if (mode == RTE_INTR_MODE_MSI) {
152 list_for_each_entry(desc, msi_list, list)
153 igbuio_msi_mask_irq(pdev, desc, irq_state);
154 }
155 }
156 #endif
157
158 /**
159 * This is the irqcontrol callback to be registered to uio_info.
160 * It can be used to disable/enable interrupt from user space processes.
161 *
162 * @param info
163 * pointer to uio_info.
164 * @param irq_state
165 * state value. 1 to enable interrupt, 0 to disable interrupt.
166 *
167 * @return
168 * - On success, 0.
169 * - On failure, a negative value.
170 */
171 static int
igbuio_pci_irqcontrol(struct uio_info * info,s32 irq_state)172 igbuio_pci_irqcontrol(struct uio_info *info, s32 irq_state)
173 {
174 struct rte_uio_pci_dev *udev = info->priv;
175 struct pci_dev *pdev = udev->pdev;
176
177 #ifdef HAVE_PCI_MSI_MASK_IRQ
178 struct irq_data *irq = irq_get_irq_data(udev->info.irq);
179 #endif
180
181 pci_cfg_access_lock(pdev);
182
183 if (udev->mode == RTE_INTR_MODE_MSIX || udev->mode == RTE_INTR_MODE_MSI) {
184 #ifdef HAVE_PCI_MSI_MASK_IRQ
185 if (irq_state == 1)
186 pci_msi_unmask_irq(irq);
187 else
188 pci_msi_mask_irq(irq);
189 #else
190 igbuio_mask_irq(pdev, udev->mode, irq_state);
191 #endif
192 }
193
194 if (udev->mode == RTE_INTR_MODE_LEGACY)
195 pci_intx(pdev, !!irq_state);
196
197 pci_cfg_access_unlock(pdev);
198
199 return 0;
200 }
201
202 /**
203 * This is interrupt handler which will check if the interrupt is for the right device.
204 * If yes, disable it here and will be enable later.
205 */
206 static irqreturn_t
igbuio_pci_irqhandler(int irq,void * dev_id)207 igbuio_pci_irqhandler(int irq, void *dev_id)
208 {
209 struct rte_uio_pci_dev *udev = (struct rte_uio_pci_dev *)dev_id;
210 struct uio_info *info = &udev->info;
211
212 /* Legacy mode need to mask in hardware */
213 if (udev->mode == RTE_INTR_MODE_LEGACY &&
214 !pci_check_and_mask_intx(udev->pdev))
215 return IRQ_NONE;
216
217 uio_event_notify(info);
218
219 /* Message signal mode, no share IRQ and automasked */
220 return IRQ_HANDLED;
221 }
222
223 static int
igbuio_pci_enable_interrupts(struct rte_uio_pci_dev * udev)224 igbuio_pci_enable_interrupts(struct rte_uio_pci_dev *udev)
225 {
226 int err = 0;
227 #ifndef HAVE_ALLOC_IRQ_VECTORS
228 struct msix_entry msix_entry;
229 #endif
230
231 switch (igbuio_intr_mode_preferred) {
232 case RTE_INTR_MODE_MSIX:
233 /* Only 1 msi-x vector needed */
234 #ifndef HAVE_ALLOC_IRQ_VECTORS
235 msix_entry.entry = 0;
236 if (pci_enable_msix(udev->pdev, &msix_entry, 1) == 0) {
237 dev_dbg(&udev->pdev->dev, "using MSI-X");
238 udev->info.irq_flags = IRQF_NO_THREAD;
239 udev->info.irq = msix_entry.vector;
240 udev->mode = RTE_INTR_MODE_MSIX;
241 break;
242 }
243 #else
244 if (pci_alloc_irq_vectors(udev->pdev, 1, 1, PCI_IRQ_MSIX) == 1) {
245 dev_dbg(&udev->pdev->dev, "using MSI-X");
246 udev->info.irq_flags = IRQF_NO_THREAD;
247 udev->info.irq = pci_irq_vector(udev->pdev, 0);
248 udev->mode = RTE_INTR_MODE_MSIX;
249 break;
250 }
251 #endif
252
253 /* falls through - to MSI */
254 case RTE_INTR_MODE_MSI:
255 #ifndef HAVE_ALLOC_IRQ_VECTORS
256 if (pci_enable_msi(udev->pdev) == 0) {
257 dev_dbg(&udev->pdev->dev, "using MSI");
258 udev->info.irq_flags = IRQF_NO_THREAD;
259 udev->info.irq = udev->pdev->irq;
260 udev->mode = RTE_INTR_MODE_MSI;
261 break;
262 }
263 #else
264 if (pci_alloc_irq_vectors(udev->pdev, 1, 1, PCI_IRQ_MSI) == 1) {
265 dev_dbg(&udev->pdev->dev, "using MSI");
266 udev->info.irq_flags = IRQF_NO_THREAD;
267 udev->info.irq = pci_irq_vector(udev->pdev, 0);
268 udev->mode = RTE_INTR_MODE_MSI;
269 break;
270 }
271 #endif
272 /* falls through - to INTX */
273 case RTE_INTR_MODE_LEGACY:
274 if (pci_intx_mask_supported(udev->pdev)) {
275 dev_dbg(&udev->pdev->dev, "using INTX");
276 udev->info.irq_flags = IRQF_SHARED | IRQF_NO_THREAD;
277 udev->info.irq = udev->pdev->irq;
278 udev->mode = RTE_INTR_MODE_LEGACY;
279 break;
280 }
281 dev_notice(&udev->pdev->dev, "PCI INTX mask not supported\n");
282 /* falls through - to no IRQ */
283 case RTE_INTR_MODE_NONE:
284 udev->mode = RTE_INTR_MODE_NONE;
285 udev->info.irq = UIO_IRQ_NONE;
286 break;
287
288 default:
289 dev_err(&udev->pdev->dev, "invalid IRQ mode %u",
290 igbuio_intr_mode_preferred);
291 udev->info.irq = UIO_IRQ_NONE;
292 err = -EINVAL;
293 }
294
295 if (udev->info.irq != UIO_IRQ_NONE)
296 err = request_irq(udev->info.irq, igbuio_pci_irqhandler,
297 udev->info.irq_flags, udev->info.name,
298 udev);
299 dev_info(&udev->pdev->dev, "uio device registered with irq %ld\n",
300 udev->info.irq);
301
302 return err;
303 }
304
305 static void
igbuio_pci_disable_interrupts(struct rte_uio_pci_dev * udev)306 igbuio_pci_disable_interrupts(struct rte_uio_pci_dev *udev)
307 {
308 if (udev->info.irq) {
309 free_irq(udev->info.irq, udev);
310 udev->info.irq = 0;
311 }
312
313 #ifndef HAVE_ALLOC_IRQ_VECTORS
314 if (udev->mode == RTE_INTR_MODE_MSIX)
315 pci_disable_msix(udev->pdev);
316 if (udev->mode == RTE_INTR_MODE_MSI)
317 pci_disable_msi(udev->pdev);
318 #else
319 if (udev->mode == RTE_INTR_MODE_MSIX ||
320 udev->mode == RTE_INTR_MODE_MSI)
321 pci_free_irq_vectors(udev->pdev);
322 #endif
323 }
324
325
326 /**
327 * This gets called while opening uio device file.
328 */
329 static int
igbuio_pci_open(struct uio_info * info,struct inode * inode)330 igbuio_pci_open(struct uio_info *info, struct inode *inode)
331 {
332 struct rte_uio_pci_dev *udev = info->priv;
333 struct pci_dev *dev = udev->pdev;
334 int err;
335
336 if (atomic_inc_return(&udev->refcnt) != 1)
337 return 0;
338
339 /* set bus master, which was cleared by the reset function */
340 pci_set_master(dev);
341
342 /* enable interrupts */
343 err = igbuio_pci_enable_interrupts(udev);
344 if (err) {
345 atomic_dec(&udev->refcnt);
346 dev_err(&dev->dev, "Enable interrupt fails\n");
347 }
348 return err;
349 }
350
351 static int
igbuio_pci_release(struct uio_info * info,struct inode * inode)352 igbuio_pci_release(struct uio_info *info, struct inode *inode)
353 {
354 struct rte_uio_pci_dev *udev = info->priv;
355 struct pci_dev *dev = udev->pdev;
356
357 if (atomic_dec_and_test(&udev->refcnt)) {
358 /* disable interrupts */
359 igbuio_pci_disable_interrupts(udev);
360
361 /* stop the device from further DMA */
362 pci_clear_master(dev);
363 }
364
365 return 0;
366 }
367
368 /* Remap pci resources described by bar #pci_bar in uio resource n. */
369 static int
igbuio_pci_setup_iomem(struct pci_dev * dev,struct uio_info * info,int n,int pci_bar,const char * name)370 igbuio_pci_setup_iomem(struct pci_dev *dev, struct uio_info *info,
371 int n, int pci_bar, const char *name)
372 {
373 unsigned long addr, len;
374 void *internal_addr;
375
376 if (n >= ARRAY_SIZE(info->mem))
377 return -EINVAL;
378
379 addr = pci_resource_start(dev, pci_bar);
380 len = pci_resource_len(dev, pci_bar);
381 if (addr == 0 || len == 0)
382 return -1;
383 if (wc_activate == 0) {
384 internal_addr = ioremap(addr, len);
385 if (internal_addr == NULL)
386 return -1;
387 } else {
388 internal_addr = NULL;
389 }
390 info->mem[n].name = name;
391 info->mem[n].addr = addr;
392 info->mem[n].internal_addr = internal_addr;
393 info->mem[n].size = len;
394 info->mem[n].memtype = UIO_MEM_PHYS;
395 return 0;
396 }
397
398 /* Get pci port io resources described by bar #pci_bar in uio resource n. */
399 static int
igbuio_pci_setup_ioport(struct pci_dev * dev,struct uio_info * info,int n,int pci_bar,const char * name)400 igbuio_pci_setup_ioport(struct pci_dev *dev, struct uio_info *info,
401 int n, int pci_bar, const char *name)
402 {
403 unsigned long addr, len;
404
405 if (n >= ARRAY_SIZE(info->port))
406 return -EINVAL;
407
408 addr = pci_resource_start(dev, pci_bar);
409 len = pci_resource_len(dev, pci_bar);
410 if (addr == 0 || len == 0)
411 return -EINVAL;
412
413 info->port[n].name = name;
414 info->port[n].start = addr;
415 info->port[n].size = len;
416 info->port[n].porttype = UIO_PORT_X86;
417
418 return 0;
419 }
420
421 /* Unmap previously ioremap'd resources */
422 static void
igbuio_pci_release_iomem(struct uio_info * info)423 igbuio_pci_release_iomem(struct uio_info *info)
424 {
425 int i;
426
427 for (i = 0; i < MAX_UIO_MAPS; i++) {
428 if (info->mem[i].internal_addr)
429 iounmap(info->mem[i].internal_addr);
430 }
431 }
432
433 static int
igbuio_setup_bars(struct pci_dev * dev,struct uio_info * info)434 igbuio_setup_bars(struct pci_dev *dev, struct uio_info *info)
435 {
436 int i, iom, iop, ret;
437 unsigned long flags;
438 static const char *bar_names[PCI_STD_RESOURCE_END + 1] = {
439 "BAR0",
440 "BAR1",
441 "BAR2",
442 "BAR3",
443 "BAR4",
444 "BAR5",
445 };
446
447 iom = 0;
448 iop = 0;
449
450 for (i = 0; i < ARRAY_SIZE(bar_names); i++) {
451 if (pci_resource_len(dev, i) != 0 &&
452 pci_resource_start(dev, i) != 0) {
453 flags = pci_resource_flags(dev, i);
454 if (flags & IORESOURCE_MEM) {
455 ret = igbuio_pci_setup_iomem(dev, info, iom,
456 i, bar_names[i]);
457 if (ret != 0)
458 return ret;
459 iom++;
460 } else if (flags & IORESOURCE_IO) {
461 ret = igbuio_pci_setup_ioport(dev, info, iop,
462 i, bar_names[i]);
463 if (ret != 0)
464 return ret;
465 iop++;
466 }
467 }
468 }
469
470 return (iom != 0 || iop != 0) ? ret : -ENOENT;
471 }
472
473 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)
474 static int __devinit
475 #else
476 static int
477 #endif
igbuio_pci_probe(struct pci_dev * dev,const struct pci_device_id * id)478 igbuio_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
479 {
480 struct rte_uio_pci_dev *udev;
481 dma_addr_t map_dma_addr;
482 void *map_addr;
483 int err;
484
485 #ifdef HAVE_PCI_IS_BRIDGE_API
486 if (pci_is_bridge(dev)) {
487 dev_warn(&dev->dev, "Ignoring PCI bridge device\n");
488 return -ENODEV;
489 }
490 #endif
491
492 udev = kzalloc(sizeof(struct rte_uio_pci_dev), GFP_KERNEL);
493 if (!udev)
494 return -ENOMEM;
495
496 /*
497 * enable device: ask low-level code to enable I/O and
498 * memory
499 */
500 err = pci_enable_device(dev);
501 if (err != 0) {
502 dev_err(&dev->dev, "Cannot enable PCI device\n");
503 goto fail_free;
504 }
505
506 /* enable bus mastering on the device */
507 pci_set_master(dev);
508
509 /* remap IO memory */
510 err = igbuio_setup_bars(dev, &udev->info);
511 if (err != 0)
512 goto fail_release_iomem;
513
514 /* set 64-bit DMA mask */
515 err = pci_set_dma_mask(dev, DMA_BIT_MASK(64));
516 if (err != 0) {
517 dev_err(&dev->dev, "Cannot set DMA mask\n");
518 goto fail_release_iomem;
519 }
520
521 err = pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(64));
522 if (err != 0) {
523 dev_err(&dev->dev, "Cannot set consistent DMA mask\n");
524 goto fail_release_iomem;
525 }
526
527 /* fill uio infos */
528 udev->info.name = "igb_uio";
529 udev->info.version = "0.1";
530 udev->info.irqcontrol = igbuio_pci_irqcontrol;
531 udev->info.open = igbuio_pci_open;
532 udev->info.release = igbuio_pci_release;
533 udev->info.priv = udev;
534 udev->pdev = dev;
535 atomic_set(&udev->refcnt, 0);
536
537 err = sysfs_create_group(&dev->dev.kobj, &dev_attr_grp);
538 if (err != 0)
539 goto fail_release_iomem;
540
541 /* register uio driver */
542 err = uio_register_device(&dev->dev, &udev->info);
543 if (err != 0)
544 goto fail_remove_group;
545
546 pci_set_drvdata(dev, udev);
547
548 /*
549 * Doing a harmless dma mapping for attaching the device to
550 * the iommu identity mapping if kernel boots with iommu=pt.
551 * Note this is not a problem if no IOMMU at all.
552 */
553 map_addr = dma_alloc_coherent(&dev->dev, 1024, &map_dma_addr,
554 GFP_KERNEL);
555 if (map_addr)
556 memset(map_addr, 0, 1024);
557
558 if (!map_addr)
559 dev_info(&dev->dev, "dma mapping failed\n");
560 else {
561 dev_info(&dev->dev, "mapping 1K dma=%#llx host=%p\n",
562 (unsigned long long)map_dma_addr, map_addr);
563
564 dma_free_coherent(&dev->dev, 1024, map_addr, map_dma_addr);
565 dev_info(&dev->dev, "unmapping 1K dma=%#llx host=%p\n",
566 (unsigned long long)map_dma_addr, map_addr);
567 }
568
569 return 0;
570
571 fail_remove_group:
572 sysfs_remove_group(&dev->dev.kobj, &dev_attr_grp);
573 fail_release_iomem:
574 igbuio_pci_release_iomem(&udev->info);
575 pci_disable_device(dev);
576 fail_free:
577 kfree(udev);
578
579 return err;
580 }
581
582 static void
igbuio_pci_remove(struct pci_dev * dev)583 igbuio_pci_remove(struct pci_dev *dev)
584 {
585 struct rte_uio_pci_dev *udev = pci_get_drvdata(dev);
586
587 igbuio_pci_release(&udev->info, NULL);
588
589 sysfs_remove_group(&dev->dev.kobj, &dev_attr_grp);
590 uio_unregister_device(&udev->info);
591 igbuio_pci_release_iomem(&udev->info);
592 pci_disable_device(dev);
593 pci_set_drvdata(dev, NULL);
594 kfree(udev);
595 }
596
597 static int
igbuio_config_intr_mode(char * intr_str)598 igbuio_config_intr_mode(char *intr_str)
599 {
600 if (!intr_str) {
601 pr_info("Use MSIX interrupt by default\n");
602 return 0;
603 }
604
605 if (!strcmp(intr_str, RTE_INTR_MODE_MSIX_NAME)) {
606 igbuio_intr_mode_preferred = RTE_INTR_MODE_MSIX;
607 pr_info("Use MSIX interrupt\n");
608 } else if (!strcmp(intr_str, RTE_INTR_MODE_MSI_NAME)) {
609 igbuio_intr_mode_preferred = RTE_INTR_MODE_MSI;
610 pr_info("Use MSI interrupt\n");
611 } else if (!strcmp(intr_str, RTE_INTR_MODE_LEGACY_NAME)) {
612 igbuio_intr_mode_preferred = RTE_INTR_MODE_LEGACY;
613 pr_info("Use legacy interrupt\n");
614 } else {
615 pr_info("Error: bad parameter - %s\n", intr_str);
616 return -EINVAL;
617 }
618
619 return 0;
620 }
621
622 static struct pci_driver igbuio_pci_driver = {
623 .name = "igb_uio",
624 .id_table = NULL,
625 .probe = igbuio_pci_probe,
626 .remove = igbuio_pci_remove,
627 };
628
629 static int __init
igbuio_pci_init_module(void)630 igbuio_pci_init_module(void)
631 {
632 int ret;
633
634 if (igbuio_kernel_is_locked_down()) {
635 pr_err("Not able to use module, kernel lock down is enabled\n");
636 return -EINVAL;
637 }
638
639 if (wc_activate != 0)
640 pr_info("wc_activate is set\n");
641
642 ret = igbuio_config_intr_mode(intr_mode);
643 if (ret < 0)
644 return ret;
645
646 return pci_register_driver(&igbuio_pci_driver);
647 }
648
649 static void __exit
igbuio_pci_exit_module(void)650 igbuio_pci_exit_module(void)
651 {
652 pci_unregister_driver(&igbuio_pci_driver);
653 }
654
655 module_init(igbuio_pci_init_module);
656 module_exit(igbuio_pci_exit_module);
657
658 module_param(intr_mode, charp, S_IRUGO);
659 MODULE_PARM_DESC(intr_mode,
660 "igb_uio interrupt mode (default=msix):\n"
661 " " RTE_INTR_MODE_MSIX_NAME " Use MSIX interrupt\n"
662 " " RTE_INTR_MODE_MSI_NAME " Use MSI interrupt\n"
663 " " RTE_INTR_MODE_LEGACY_NAME " Use Legacy interrupt\n"
664 "\n");
665
666 module_param(wc_activate, int, 0);
667 MODULE_PARM_DESC(wc_activate,
668 "Activate support for write combining (WC) (default=0)\n"
669 " 0 - disable\n"
670 " other - enable\n");
671
672 MODULE_DESCRIPTION("UIO driver for Intel IGB PCI cards");
673 MODULE_LICENSE("GPL");
674 MODULE_AUTHOR("Intel Corporation");
675