xref: /dpdk/kernel/linux/kni/kni_dev.h (revision 5569dd7d)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright(c) 2010-2014 Intel Corporation.
4  */
5 
6 #ifndef _KNI_DEV_H_
7 #define _KNI_DEV_H_
8 
9 #ifdef pr_fmt
10 #undef pr_fmt
11 #endif
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 
14 #define KNI_VERSION	"1.0"
15 
16 #include "compat.h"
17 
18 #include <linux/if.h>
19 #include <linux/wait.h>
20 #ifdef HAVE_SIGNAL_FUNCTIONS_OWN_HEADER
21 #include <linux/sched/signal.h>
22 #else
23 #include <linux/sched.h>
24 #endif
25 #include <linux/netdevice.h>
26 #include <linux/spinlock.h>
27 #include <linux/list.h>
28 
29 #include <rte_kni_common.h>
30 #define KNI_KTHREAD_MAX_RESCHEDULE_INTERVAL 1000000 /* us */
31 
32 #define MBUF_BURST_SZ 32
33 
34 /* Default carrier state for created KNI network interfaces */
35 extern uint32_t kni_dflt_carrier;
36 
37 /* Request processing support for bifurcated drivers. */
38 extern uint32_t bifurcated_support;
39 
40 /**
41  * A structure describing the private information for a kni device.
42  */
43 struct kni_dev {
44 	/* kni list */
45 	struct list_head list;
46 
47 	uint8_t iova_mode;
48 
49 	uint32_t core_id;            /* Core ID to bind */
50 	char name[RTE_KNI_NAMESIZE]; /* Network device name */
51 	struct task_struct *pthread;
52 
53 	/* wait queue for req/resp */
54 	wait_queue_head_t wq;
55 	struct mutex sync_lock;
56 
57 	/* kni device */
58 	struct net_device *net_dev;
59 
60 	/* queue for packets to be sent out */
61 	struct rte_kni_fifo *tx_q;
62 
63 	/* queue for the packets received */
64 	struct rte_kni_fifo *rx_q;
65 
66 	/* queue for the allocated mbufs those can be used to save sk buffs */
67 	struct rte_kni_fifo *alloc_q;
68 
69 	/* free queue for the mbufs to be freed */
70 	struct rte_kni_fifo *free_q;
71 
72 	/* request queue */
73 	struct rte_kni_fifo *req_q;
74 
75 	/* response queue */
76 	struct rte_kni_fifo *resp_q;
77 
78 	void *sync_kva;
79 	void *sync_va;
80 
81 	void *mbuf_kva;
82 	void *mbuf_va;
83 
84 	/* mbuf size */
85 	uint32_t mbuf_size;
86 
87 	/* buffers */
88 	void *pa[MBUF_BURST_SZ];
89 	void *va[MBUF_BURST_SZ];
90 	void *alloc_pa[MBUF_BURST_SZ];
91 	void *alloc_va[MBUF_BURST_SZ];
92 
93 	struct task_struct *usr_tsk;
94 };
95 
96 #ifdef HAVE_IOVA_TO_KVA_MAPPING_SUPPORT
iova_to_phys(struct task_struct * tsk,unsigned long iova)97 static inline phys_addr_t iova_to_phys(struct task_struct *tsk,
98 				       unsigned long iova)
99 {
100 	phys_addr_t offset, phys_addr;
101 	struct page *page = NULL;
102 	long ret;
103 
104 	offset = iova & (PAGE_SIZE - 1);
105 
106 	/* Read one page struct info */
107 #ifdef HAVE_TSK_IN_GUP
108 	ret = get_user_pages_remote(tsk, tsk->mm, iova, 1,
109 				    FOLL_TOUCH, &page, NULL, NULL);
110 #else
111 	ret = get_user_pages_remote(tsk->mm, iova, 1,
112 				    FOLL_TOUCH, &page, NULL, NULL);
113 #endif
114 	if (ret < 0)
115 		return 0;
116 
117 	phys_addr = page_to_phys(page) | offset;
118 	put_page(page);
119 
120 	return phys_addr;
121 }
122 
iova_to_kva(struct task_struct * tsk,unsigned long iova)123 static inline void *iova_to_kva(struct task_struct *tsk, unsigned long iova)
124 {
125 	return phys_to_virt(iova_to_phys(tsk, iova));
126 }
127 #endif
128 
129 void kni_net_release_fifo_phy(struct kni_dev *kni);
130 void kni_net_rx(struct kni_dev *kni);
131 void kni_net_init(struct net_device *dev);
132 void kni_net_config_lo_mode(char *lo_str);
133 void kni_net_poll_resp(struct kni_dev *kni);
134 
135 #endif
136