1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Copyright(c) 2010-2014 Intel Corporation.
4 */
5
6 #ifndef _KNI_DEV_H_
7 #define _KNI_DEV_H_
8
9 #ifdef pr_fmt
10 #undef pr_fmt
11 #endif
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14 #define KNI_VERSION "1.0"
15
16 #include "compat.h"
17
18 #include <linux/if.h>
19 #include <linux/wait.h>
20 #ifdef HAVE_SIGNAL_FUNCTIONS_OWN_HEADER
21 #include <linux/sched/signal.h>
22 #else
23 #include <linux/sched.h>
24 #endif
25 #include <linux/netdevice.h>
26 #include <linux/spinlock.h>
27 #include <linux/list.h>
28
29 #include <rte_kni_common.h>
30 #define KNI_KTHREAD_RESCHEDULE_INTERVAL 5 /* us */
31
32 #define MBUF_BURST_SZ 32
33
34 /* Default carrier state for created KNI network interfaces */
35 extern uint32_t kni_dflt_carrier;
36
37 /**
38 * A structure describing the private information for a kni device.
39 */
40 struct kni_dev {
41 /* kni list */
42 struct list_head list;
43
44 uint8_t iova_mode;
45
46 uint32_t core_id; /* Core ID to bind */
47 char name[RTE_KNI_NAMESIZE]; /* Network device name */
48 struct task_struct *pthread;
49
50 /* wait queue for req/resp */
51 wait_queue_head_t wq;
52 struct mutex sync_lock;
53
54 /* kni device */
55 struct net_device *net_dev;
56
57 /* queue for packets to be sent out */
58 struct rte_kni_fifo *tx_q;
59
60 /* queue for the packets received */
61 struct rte_kni_fifo *rx_q;
62
63 /* queue for the allocated mbufs those can be used to save sk buffs */
64 struct rte_kni_fifo *alloc_q;
65
66 /* free queue for the mbufs to be freed */
67 struct rte_kni_fifo *free_q;
68
69 /* request queue */
70 struct rte_kni_fifo *req_q;
71
72 /* response queue */
73 struct rte_kni_fifo *resp_q;
74
75 void *sync_kva;
76 void *sync_va;
77
78 void *mbuf_kva;
79 void *mbuf_va;
80
81 /* mbuf size */
82 uint32_t mbuf_size;
83
84 /* buffers */
85 void *pa[MBUF_BURST_SZ];
86 void *va[MBUF_BURST_SZ];
87 void *alloc_pa[MBUF_BURST_SZ];
88 void *alloc_va[MBUF_BURST_SZ];
89
90 struct task_struct *usr_tsk;
91 };
92
93 #ifdef HAVE_IOVA_TO_KVA_MAPPING_SUPPORT
iova_to_phys(struct task_struct * tsk,unsigned long iova)94 static inline phys_addr_t iova_to_phys(struct task_struct *tsk,
95 unsigned long iova)
96 {
97 phys_addr_t offset, phys_addr;
98 struct page *page = NULL;
99 long ret;
100
101 offset = iova & (PAGE_SIZE - 1);
102
103 /* Read one page struct info */
104 #ifdef HAVE_TSK_IN_GUP
105 ret = get_user_pages_remote(tsk, tsk->mm, iova, 1,
106 FOLL_TOUCH, &page, NULL, NULL);
107 #else
108 ret = get_user_pages_remote(tsk->mm, iova, 1,
109 FOLL_TOUCH, &page, NULL, NULL);
110 #endif
111 if (ret < 0)
112 return 0;
113
114 phys_addr = page_to_phys(page) | offset;
115 put_page(page);
116
117 return phys_addr;
118 }
119
iova_to_kva(struct task_struct * tsk,unsigned long iova)120 static inline void *iova_to_kva(struct task_struct *tsk, unsigned long iova)
121 {
122 return phys_to_virt(iova_to_phys(tsk, iova));
123 }
124 #endif
125
126 void kni_net_release_fifo_phy(struct kni_dev *kni);
127 void kni_net_rx(struct kni_dev *kni);
128 void kni_net_init(struct net_device *dev);
129 void kni_net_config_lo_mode(char *lo_str);
130 void kni_net_poll_resp(struct kni_dev *kni);
131
132 #endif
133