1d30ea906Sjfb8856606 /* SPDX-License-Identifier: GPL-2.0 */
2d30ea906Sjfb8856606 /*
3d30ea906Sjfb8856606 * Copyright(c) 2010-2014 Intel Corporation.
4d30ea906Sjfb8856606 */
5d30ea906Sjfb8856606
6d30ea906Sjfb8856606 #ifndef _KNI_DEV_H_
7d30ea906Sjfb8856606 #define _KNI_DEV_H_
8d30ea906Sjfb8856606
9d30ea906Sjfb8856606 #ifdef pr_fmt
10d30ea906Sjfb8856606 #undef pr_fmt
11d30ea906Sjfb8856606 #endif
12d30ea906Sjfb8856606 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13d30ea906Sjfb8856606
144418919fSjohnjiang #define KNI_VERSION "1.0"
154418919fSjohnjiang
16d30ea906Sjfb8856606 #include "compat.h"
17d30ea906Sjfb8856606
18d30ea906Sjfb8856606 #include <linux/if.h>
19d30ea906Sjfb8856606 #include <linux/wait.h>
20d30ea906Sjfb8856606 #ifdef HAVE_SIGNAL_FUNCTIONS_OWN_HEADER
21d30ea906Sjfb8856606 #include <linux/sched/signal.h>
22d30ea906Sjfb8856606 #else
23d30ea906Sjfb8856606 #include <linux/sched.h>
24d30ea906Sjfb8856606 #endif
25d30ea906Sjfb8856606 #include <linux/netdevice.h>
26d30ea906Sjfb8856606 #include <linux/spinlock.h>
27d30ea906Sjfb8856606 #include <linux/list.h>
28d30ea906Sjfb8856606
294418919fSjohnjiang #include <rte_kni_common.h>
30d30ea906Sjfb8856606 #define KNI_KTHREAD_RESCHEDULE_INTERVAL 5 /* us */
31d30ea906Sjfb8856606
32d30ea906Sjfb8856606 #define MBUF_BURST_SZ 32
33d30ea906Sjfb8856606
34d30ea906Sjfb8856606 /* Default carrier state for created KNI network interfaces */
354418919fSjohnjiang extern uint32_t kni_dflt_carrier;
36d30ea906Sjfb8856606
37d30ea906Sjfb8856606 /**
38d30ea906Sjfb8856606 * A structure describing the private information for a kni device.
39d30ea906Sjfb8856606 */
40d30ea906Sjfb8856606 struct kni_dev {
41d30ea906Sjfb8856606 /* kni list */
42d30ea906Sjfb8856606 struct list_head list;
43d30ea906Sjfb8856606
444418919fSjohnjiang uint8_t iova_mode;
454418919fSjohnjiang
46d30ea906Sjfb8856606 uint32_t core_id; /* Core ID to bind */
47d30ea906Sjfb8856606 char name[RTE_KNI_NAMESIZE]; /* Network device name */
48d30ea906Sjfb8856606 struct task_struct *pthread;
49d30ea906Sjfb8856606
50d30ea906Sjfb8856606 /* wait queue for req/resp */
51d30ea906Sjfb8856606 wait_queue_head_t wq;
52d30ea906Sjfb8856606 struct mutex sync_lock;
53d30ea906Sjfb8856606
54d30ea906Sjfb8856606 /* kni device */
55d30ea906Sjfb8856606 struct net_device *net_dev;
56d30ea906Sjfb8856606
57d30ea906Sjfb8856606 /* queue for packets to be sent out */
584418919fSjohnjiang struct rte_kni_fifo *tx_q;
59d30ea906Sjfb8856606
60d30ea906Sjfb8856606 /* queue for the packets received */
614418919fSjohnjiang struct rte_kni_fifo *rx_q;
62d30ea906Sjfb8856606
63d30ea906Sjfb8856606 /* queue for the allocated mbufs those can be used to save sk buffs */
644418919fSjohnjiang struct rte_kni_fifo *alloc_q;
65d30ea906Sjfb8856606
66d30ea906Sjfb8856606 /* free queue for the mbufs to be freed */
674418919fSjohnjiang struct rte_kni_fifo *free_q;
68d30ea906Sjfb8856606
69d30ea906Sjfb8856606 /* request queue */
704418919fSjohnjiang struct rte_kni_fifo *req_q;
71d30ea906Sjfb8856606
72d30ea906Sjfb8856606 /* response queue */
734418919fSjohnjiang struct rte_kni_fifo *resp_q;
74d30ea906Sjfb8856606
75d30ea906Sjfb8856606 void *sync_kva;
76d30ea906Sjfb8856606 void *sync_va;
77d30ea906Sjfb8856606
78d30ea906Sjfb8856606 void *mbuf_kva;
79d30ea906Sjfb8856606 void *mbuf_va;
80d30ea906Sjfb8856606
81d30ea906Sjfb8856606 /* mbuf size */
82d30ea906Sjfb8856606 uint32_t mbuf_size;
83d30ea906Sjfb8856606
84d30ea906Sjfb8856606 /* buffers */
85d30ea906Sjfb8856606 void *pa[MBUF_BURST_SZ];
86d30ea906Sjfb8856606 void *va[MBUF_BURST_SZ];
87d30ea906Sjfb8856606 void *alloc_pa[MBUF_BURST_SZ];
88d30ea906Sjfb8856606 void *alloc_va[MBUF_BURST_SZ];
894418919fSjohnjiang
904418919fSjohnjiang struct task_struct *usr_tsk;
91d30ea906Sjfb8856606 };
92d30ea906Sjfb8856606
934418919fSjohnjiang #ifdef HAVE_IOVA_TO_KVA_MAPPING_SUPPORT
iova_to_phys(struct task_struct * tsk,unsigned long iova)944418919fSjohnjiang static inline phys_addr_t iova_to_phys(struct task_struct *tsk,
954418919fSjohnjiang unsigned long iova)
964418919fSjohnjiang {
974418919fSjohnjiang phys_addr_t offset, phys_addr;
984418919fSjohnjiang struct page *page = NULL;
994418919fSjohnjiang long ret;
1004418919fSjohnjiang
1014418919fSjohnjiang offset = iova & (PAGE_SIZE - 1);
1024418919fSjohnjiang
1034418919fSjohnjiang /* Read one page struct info */
104*0c6bd470Sfengbojiang #ifdef HAVE_TSK_IN_GUP
1054418919fSjohnjiang ret = get_user_pages_remote(tsk, tsk->mm, iova, 1,
1064418919fSjohnjiang FOLL_TOUCH, &page, NULL, NULL);
107*0c6bd470Sfengbojiang #else
108*0c6bd470Sfengbojiang ret = get_user_pages_remote(tsk->mm, iova, 1,
109*0c6bd470Sfengbojiang FOLL_TOUCH, &page, NULL, NULL);
110*0c6bd470Sfengbojiang #endif
1114418919fSjohnjiang if (ret < 0)
1124418919fSjohnjiang return 0;
1134418919fSjohnjiang
1144418919fSjohnjiang phys_addr = page_to_phys(page) | offset;
1154418919fSjohnjiang put_page(page);
1164418919fSjohnjiang
1174418919fSjohnjiang return phys_addr;
1184418919fSjohnjiang }
1194418919fSjohnjiang
iova_to_kva(struct task_struct * tsk,unsigned long iova)1204418919fSjohnjiang static inline void *iova_to_kva(struct task_struct *tsk, unsigned long iova)
1214418919fSjohnjiang {
1224418919fSjohnjiang return phys_to_virt(iova_to_phys(tsk, iova));
1234418919fSjohnjiang }
1244418919fSjohnjiang #endif
1254418919fSjohnjiang
126d30ea906Sjfb8856606 void kni_net_release_fifo_phy(struct kni_dev *kni);
127d30ea906Sjfb8856606 void kni_net_rx(struct kni_dev *kni);
128d30ea906Sjfb8856606 void kni_net_init(struct net_device *dev);
129d30ea906Sjfb8856606 void kni_net_config_lo_mode(char *lo_str);
130d30ea906Sjfb8856606 void kni_net_poll_resp(struct kni_dev *kni);
131d30ea906Sjfb8856606
132d30ea906Sjfb8856606 #endif
133