xref: /dpdk/drivers/event/dlb2/pf/base/dlb2_osdep.h (revision dfdd11a8)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2020 Intel Corporation
3  */
4 
5 #ifndef __DLB2_OSDEP_H
6 #define __DLB2_OSDEP_H
7 
8 #include <string.h>
9 #include <time.h>
10 #include <unistd.h>
11 #include <pthread.h>
12 
13 #include <rte_string_fns.h>
14 #include <rte_cycles.h>
15 #include <rte_io.h>
16 #include <rte_log.h>
17 #include <rte_spinlock.h>
18 #include "../dlb2_main.h"
19 
20 #include "dlb2_resource.h"
21 
22 #include "../../dlb2_log.h"
23 #include "../../dlb2_user.h"
24 
25 
26 #define DLB2_PCI_REG_READ(addr)        rte_read32((void *)addr)
27 #define DLB2_PCI_REG_WRITE(reg, value) rte_write32(value, (void *)reg)
28 
29 /* Read/write register 'reg' in the CSR BAR space */
30 #define DLB2_CSR_REG_ADDR(a, reg) ((void *)((uintptr_t)(a)->csr_kva + (reg)))
31 #define DLB2_CSR_RD(hw, reg) \
32 	DLB2_PCI_REG_READ(DLB2_CSR_REG_ADDR((hw), (reg)))
33 #define DLB2_CSR_WR(hw, reg, value) \
34 	DLB2_PCI_REG_WRITE(DLB2_CSR_REG_ADDR((hw), (reg)), (value))
35 
36 /* Read/write register 'reg' in the func BAR space */
37 #define DLB2_FUNC_REG_ADDR(a, reg) ((void *)((uintptr_t)(a)->func_kva + (reg)))
38 #define DLB2_FUNC_RD(hw, reg) \
39 	DLB2_PCI_REG_READ(DLB2_FUNC_REG_ADDR((hw), (reg)))
40 #define DLB2_FUNC_WR(hw, reg, value) \
41 	DLB2_PCI_REG_WRITE(DLB2_FUNC_REG_ADDR((hw), (reg)), (value))
42 
43 /* Map to PMDs logging interface */
44 #define DLB2_ERR(dev, fmt, args...) \
45 	DLB2_LOG_ERR(fmt, ## args)
46 
47 #define DLB2_INFO(dev, fmt, args...) \
48 	DLB2_LOG_INFO(fmt, ## args)
49 
50 #define DLB2_DEBUG(dev, fmt, args...) \
51 	DLB2_LOG_DBG(fmt, ## args)
52 
53 /**
54  * os_udelay() - busy-wait for a number of microseconds
55  * @usecs: delay duration.
56  */
os_udelay(int usecs)57 static inline void os_udelay(int usecs)
58 {
59 	rte_delay_us(usecs);
60 }
61 
62 /**
63  * os_msleep() - sleep for a number of milliseconds
64  * @usecs: delay duration.
65  */
os_msleep(int msecs)66 static inline void os_msleep(int msecs)
67 {
68 	rte_delay_ms(msecs);
69 }
70 
71 #define DLB2_PP_BASE(__is_ldb) \
72 	((__is_ldb) ? DLB2_LDB_PP_BASE : DLB2_DIR_PP_BASE)
73 
74 /**
75  * os_map_producer_port() - map a producer port into the caller's address space
76  * @hw: dlb2_hw handle for a particular device.
77  * @port_id: port ID
78  * @is_ldb: true for load-balanced port, false for a directed port
79  *
80  * This function maps the requested producer port memory into the caller's
81  * address space.
82  *
83  * Return:
84  * Returns the base address at which the PP memory was mapped, else NULL.
85  */
os_map_producer_port(struct dlb2_hw * hw,u8 port_id,bool is_ldb)86 static inline void *os_map_producer_port(struct dlb2_hw *hw,
87 					 u8 port_id,
88 					 bool is_ldb)
89 {
90 	uint64_t addr;
91 	uint64_t pp_dma_base;
92 
93 	pp_dma_base = (uintptr_t)hw->func_kva + DLB2_PP_BASE(is_ldb);
94 	addr = (pp_dma_base + (rte_mem_page_size() * port_id));
95 
96 	return (void *)(uintptr_t)addr;
97 }
98 
99 /**
100  * os_unmap_producer_port() - unmap a producer port
101  * @addr: mapped producer port address
102  *
103  * This function undoes os_map_producer_port() by unmapping the producer port
104  * memory from the caller's address space.
105  *
106  * Return:
107  * Returns the base address at which the PP memory was mapped, else NULL.
108  */
os_unmap_producer_port(struct dlb2_hw * hw,void * addr)109 static inline void os_unmap_producer_port(struct dlb2_hw *hw, void *addr)
110 {
111 	RTE_SET_USED(hw);
112 	RTE_SET_USED(addr);
113 }
114 
115 /**
116  * os_fence_hcw() - fence an HCW to ensure it arrives at the device
117  * @hw: dlb2_hw handle for a particular device.
118  * @pp_addr: producer port address
119  */
os_fence_hcw(struct dlb2_hw * hw,u64 * pp_addr)120 static inline void os_fence_hcw(struct dlb2_hw *hw, u64 *pp_addr)
121 {
122 	RTE_SET_USED(hw);
123 
124 	/* To ensure outstanding HCWs reach the device, read the PP address. IA
125 	 * memory ordering prevents reads from passing older writes, and the
126 	 * mfence also ensures this.
127 	 */
128 	rte_mb();
129 
130 	*(volatile u64 *)pp_addr;
131 }
132 
133 /**
134  * DLB2_HW_ERR() - log an error message
135  * @dlb2: dlb2_hw handle for a particular device.
136  * @...: variable string args.
137  */
138 #define DLB2_HW_ERR(dlb2, ...) do {	\
139 	RTE_SET_USED(dlb2);		\
140 	DLB2_ERR(dlb2, __VA_ARGS__);	\
141 } while (0)
142 
143 /**
144  * DLB2_HW_DBG() - log an info message
145  * @dlb2: dlb2_hw handle for a particular device.
146  * @...: variable string args.
147  */
148 #define DLB2_HW_DBG(dlb2, ...) do {	\
149 	RTE_SET_USED(dlb2);		\
150 	DLB2_DEBUG(dlb2, __VA_ARGS__);	\
151 } while (0)
152 
153 /* The callback runs until it completes all outstanding QID->CQ
154  * map and unmap requests. To prevent deadlock, this function gives other
155  * threads a chance to grab the resource mutex and configure hardware.
156  */
dlb2_complete_queue_map_unmap(void * __args)157 static void *dlb2_complete_queue_map_unmap(void *__args)
158 {
159 	struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)__args;
160 	int ret;
161 
162 	while (1) {
163 		rte_spinlock_lock(&dlb2_dev->resource_mutex);
164 
165 		ret = dlb2_finish_unmap_qid_procedures(&dlb2_dev->hw);
166 		ret += dlb2_finish_map_qid_procedures(&dlb2_dev->hw);
167 
168 		if (ret != 0) {
169 			rte_spinlock_unlock(&dlb2_dev->resource_mutex);
170 			/* Relinquish the CPU so the application can process
171 			 * its CQs, so this function doesn't deadlock.
172 			 */
173 			sched_yield();
174 		} else {
175 			break;
176 		}
177 	}
178 
179 	dlb2_dev->worker_launched = false;
180 
181 	rte_spinlock_unlock(&dlb2_dev->resource_mutex);
182 
183 	return NULL;
184 }
185 
186 
187 /**
188  * os_schedule_work() - launch a thread to process pending map and unmap work
189  * @hw: dlb2_hw handle for a particular device.
190  *
191  * This function launches a kernel thread that will run until all pending
192  * map and unmap procedures are complete.
193  */
os_schedule_work(struct dlb2_hw * hw)194 static inline void os_schedule_work(struct dlb2_hw *hw)
195 {
196 	struct dlb2_dev *dlb2_dev;
197 	pthread_t complete_queue_map_unmap_thread;
198 	int ret;
199 
200 	dlb2_dev = container_of(hw, struct dlb2_dev, hw);
201 
202 	ret = rte_ctrl_thread_create(&complete_queue_map_unmap_thread,
203 				     "dlb_queue_unmap_waiter",
204 				     NULL,
205 				     dlb2_complete_queue_map_unmap,
206 				     dlb2_dev);
207 	if (ret)
208 		DLB2_ERR(dlb2_dev,
209 			 "Could not create queue complete map/unmap thread, err=%d\n",
210 			 ret);
211 	else
212 		dlb2_dev->worker_launched = true;
213 }
214 
215 /**
216  * os_worker_active() - query whether the map/unmap worker thread is active
217  * @hw: dlb2_hw handle for a particular device.
218  *
219  * This function returns a boolean indicating whether a thread (launched by
220  * os_schedule_work()) is active. This function is used to determine
221  * whether or not to launch a worker thread.
222  */
os_worker_active(struct dlb2_hw * hw)223 static inline bool os_worker_active(struct dlb2_hw *hw)
224 {
225 	struct dlb2_dev *dlb2_dev;
226 
227 	dlb2_dev = container_of(hw, struct dlb2_dev, hw);
228 
229 	return dlb2_dev->worker_launched;
230 }
231 
232 #endif /*  __DLB2_OSDEP_H */
233