xref: /f-stack/dpdk/drivers/event/dlb/pf/base/dlb_osdep.h (revision 2d9fd380)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2020 Intel Corporation
3  */
4 
5 #ifndef __DLB_OSDEP_H__
6 #define __DLB_OSDEP_H__
7 
8 #include <string.h>
9 #include <time.h>
10 #include <unistd.h>
11 #include <cpuid.h>
12 #include <pthread.h>
13 #include <rte_string_fns.h>
14 #include <rte_cycles.h>
15 #include <rte_io.h>
16 #include <rte_log.h>
17 #include <rte_spinlock.h>
18 #include "../dlb_main.h"
19 #include "dlb_resource.h"
20 #include "../../dlb_log.h"
21 #include "../../dlb_user.h"
22 
23 
24 #define DLB_PCI_REG_READ(reg)        rte_read32((void *)reg)
25 #define DLB_PCI_REG_WRITE(reg, val)   rte_write32(val, (void *)reg)
26 
27 #define DLB_CSR_REG_ADDR(a, reg) ((void *)((uintptr_t)(a)->csr_kva + (reg)))
28 #define DLB_CSR_RD(hw, reg) \
29 	DLB_PCI_REG_READ(DLB_CSR_REG_ADDR((hw), (reg)))
30 #define DLB_CSR_WR(hw, reg, val) \
31 	DLB_PCI_REG_WRITE(DLB_CSR_REG_ADDR((hw), (reg)), (val))
32 
33 #define DLB_FUNC_REG_ADDR(a, reg) ((void *)((uintptr_t)(a)->func_kva + (reg)))
34 #define DLB_FUNC_RD(hw, reg) \
35 	DLB_PCI_REG_READ(DLB_FUNC_REG_ADDR((hw), (reg)))
36 #define DLB_FUNC_WR(hw, reg, val) \
37 	DLB_PCI_REG_WRITE(DLB_FUNC_REG_ADDR((hw), (reg)), (val))
38 
39 extern unsigned int dlb_unregister_timeout_s;
40 /**
41  * os_queue_unregister_timeout_s() - timeout (in seconds) to wait for queue
42  *                                   unregister acknowledgments.
43  */
os_queue_unregister_timeout_s(void)44 static inline unsigned int os_queue_unregister_timeout_s(void)
45 {
46 	return dlb_unregister_timeout_s;
47 }
48 
os_strlcpy(char * dst,const char * src,size_t sz)49 static inline size_t os_strlcpy(char *dst, const char *src, size_t sz)
50 {
51 	return rte_strlcpy(dst, src, sz);
52 }
53 
54 /**
55  * os_udelay() - busy-wait for a number of microseconds
56  * @usecs: delay duration.
57  */
os_udelay(int usecs)58 static inline void os_udelay(int usecs)
59 {
60 	rte_delay_us(usecs);
61 }
62 
63 /**
64  * os_msleep() - sleep for a number of milliseconds
65  * @usecs: delay duration.
66  */
67 
os_msleep(int msecs)68 static inline void os_msleep(int msecs)
69 {
70 	rte_delay_ms(msecs);
71 }
72 
73 #define DLB_PP_BASE(__is_ldb) ((__is_ldb) ? DLB_LDB_PP_BASE : DLB_DIR_PP_BASE)
74 /**
75  * os_map_producer_port() - map a producer port into the caller's address space
76  * @hw: dlb_hw handle for a particular device.
77  * @port_id: port ID
78  * @is_ldb: true for load-balanced port, false for a directed port
79  *
80  * This function maps the requested producer port memory into the caller's
81  * address space.
82  *
83  * Return:
84  * Returns the base address at which the PP memory was mapped, else NULL.
85  */
os_map_producer_port(struct dlb_hw * hw,u8 port_id,bool is_ldb)86 static inline void *os_map_producer_port(struct dlb_hw *hw,
87 					 u8 port_id,
88 					 bool is_ldb)
89 {
90 	uint64_t addr;
91 	uint64_t pp_dma_base;
92 
93 
94 	pp_dma_base = (uintptr_t)hw->func_kva + DLB_PP_BASE(is_ldb);
95 	addr = (pp_dma_base + (PAGE_SIZE * port_id));
96 
97 	return (void *)(uintptr_t)addr;
98 
99 }
100 /**
101  * os_unmap_producer_port() - unmap a producer port
102  * @addr: mapped producer port address
103  *
104  * This function undoes os_map_producer_port() by unmapping the producer port
105  * memory from the caller's address space.
106  *
107  * Return:
108  * Returns the base address at which the PP memory was mapped, else NULL.
109  */
110 
111 /* PFPMD - Nothing to do here, since memory was not actually mapped by us */
os_unmap_producer_port(struct dlb_hw * hw,void * addr)112 static inline void os_unmap_producer_port(struct dlb_hw *hw, void *addr)
113 {
114 	RTE_SET_USED(hw);
115 	RTE_SET_USED(addr);
116 }
117 
118 /**
119  * os_fence_hcw() - fence an HCW to ensure it arrives at the device
120  * @hw: dlb_hw handle for a particular device.
121  * @pp_addr: producer port address
122  */
os_fence_hcw(struct dlb_hw * hw,u64 * pp_addr)123 static inline void os_fence_hcw(struct dlb_hw *hw, u64 *pp_addr)
124 {
125 	RTE_SET_USED(hw);
126 
127 	/* To ensure outstanding HCWs reach the device, read the PP address. IA
128 	 * memory ordering prevents reads from passing older writes, and the
129 	 * mfence also ensures this.
130 	 */
131 	rte_mb();
132 
133 	*(volatile u64 *)pp_addr;
134 }
135 
136 /* Map to PMDs logging interface */
137 #define DLB_ERR(dev, fmt, args...) \
138 	DLB_LOG_ERR(fmt, ## args)
139 
140 #define DLB_INFO(dev, fmt, args...) \
141 	DLB_LOG_INFO(fmt, ## args)
142 
143 #define DLB_DEBUG(dev, fmt, args...) \
144 	DLB_LOG_DEBUG(fmt, ## args)
145 
146 /**
147  * DLB_HW_ERR() - log an error message
148  * @dlb: dlb_hw handle for a particular device.
149  * @...: variable string args.
150  */
151 #define DLB_HW_ERR(dlb, ...) do {	\
152 	RTE_SET_USED(dlb);		\
153 	DLB_ERR(dlb, __VA_ARGS__);	\
154 } while (0)
155 
156 /**
157  * DLB_HW_INFO() - log an info message
158  * @dlb: dlb_hw handle for a particular device.
159  * @...: variable string args.
160  */
161 #define DLB_HW_INFO(dlb, ...) do {	\
162 	RTE_SET_USED(dlb);		\
163 	DLB_INFO(dlb, __VA_ARGS__);	\
164 } while (0)
165 
166 /*** scheduling functions ***/
167 
168 /* The callback runs until it completes all outstanding QID->CQ
169  * map and unmap requests. To prevent deadlock, this function gives other
170  * threads a chance to grab the resource mutex and configure hardware.
171  */
dlb_complete_queue_map_unmap(void * __args)172 static void *dlb_complete_queue_map_unmap(void *__args)
173 {
174 	struct dlb_dev *dlb_dev = (struct dlb_dev *)__args;
175 	int ret;
176 
177 	while (1) {
178 		rte_spinlock_lock(&dlb_dev->resource_mutex);
179 
180 		ret = dlb_finish_unmap_qid_procedures(&dlb_dev->hw);
181 		ret += dlb_finish_map_qid_procedures(&dlb_dev->hw);
182 
183 		if (ret != 0) {
184 			rte_spinlock_unlock(&dlb_dev->resource_mutex);
185 			/* Relinquish the CPU so the application can process
186 			 * its CQs, so this function does not deadlock.
187 			 */
188 			sched_yield();
189 		} else
190 			break;
191 	}
192 
193 	dlb_dev->worker_launched = false;
194 
195 	rte_spinlock_unlock(&dlb_dev->resource_mutex);
196 
197 	return NULL;
198 }
199 
200 
201 /**
202  * os_schedule_work() - launch a thread to process pending map and unmap work
203  * @hw: dlb_hw handle for a particular device.
204  *
205  * This function launches a thread that will run until all pending
206  * map and unmap procedures are complete.
207  */
os_schedule_work(struct dlb_hw * hw)208 static inline void os_schedule_work(struct dlb_hw *hw)
209 {
210 	struct dlb_dev *dlb_dev;
211 	pthread_t complete_queue_map_unmap_thread;
212 	int ret;
213 
214 	dlb_dev = container_of(hw, struct dlb_dev, hw);
215 
216 	ret = rte_ctrl_thread_create(&complete_queue_map_unmap_thread,
217 				     "dlb_queue_unmap_waiter",
218 				     NULL,
219 				     dlb_complete_queue_map_unmap,
220 				     dlb_dev);
221 	if (ret)
222 		DLB_ERR(dlb_dev,
223 		"Could not create queue complete map/unmap thread, err=%d\n",
224 			  ret);
225 	else
226 		dlb_dev->worker_launched = true;
227 }
228 
229 /**
230  * os_worker_active() - query whether the map/unmap worker thread is active
231  * @hw: dlb_hw handle for a particular device.
232  *
233  * This function returns a boolean indicating whether a thread (launched by
234  * os_schedule_work()) is active. This function is used to determine
235  * whether or not to launch a worker thread.
236  */
os_worker_active(struct dlb_hw * hw)237 static inline bool os_worker_active(struct dlb_hw *hw)
238 {
239 	struct dlb_dev *dlb_dev;
240 
241 	dlb_dev = container_of(hw, struct dlb_dev, hw);
242 
243 	return dlb_dev->worker_launched;
244 }
245 
246 /**
247  * os_notify_user_space() - notify user space
248  * @hw: dlb_hw handle for a particular device.
249  * @domain_id: ID of domain to notify.
250  * @alert_id: alert ID.
251  * @aux_alert_data: additional alert data.
252  *
253  * This function notifies user space of an alert (such as a remote queue
254  * unregister or hardware alarm).
255  *
256  * Return:
257  * Returns 0 upon success, <0 otherwise.
258  */
os_notify_user_space(struct dlb_hw * hw,u32 domain_id,u64 alert_id,u64 aux_alert_data)259 static inline int os_notify_user_space(struct dlb_hw *hw,
260 				       u32 domain_id,
261 				       u64 alert_id,
262 				       u64 aux_alert_data)
263 {
264 	RTE_SET_USED(hw);
265 	RTE_SET_USED(domain_id);
266 	RTE_SET_USED(alert_id);
267 	RTE_SET_USED(aux_alert_data);
268 
269 	/* Not called for PF PMD */
270 	return -1;
271 }
272 
273 enum dlb_dev_revision {
274 	DLB_A0,
275 	DLB_A1,
276 	DLB_A2,
277 	DLB_A3,
278 	DLB_B0,
279 };
280 
281 /**
282  * os_get_dev_revision() - query the device_revision
283  * @hw: dlb_hw handle for a particular device.
284  */
os_get_dev_revision(struct dlb_hw * hw)285 static inline enum dlb_dev_revision os_get_dev_revision(struct dlb_hw *hw)
286 {
287 	uint32_t a, b, c, d, stepping;
288 
289 	RTE_SET_USED(hw);
290 
291 	__cpuid(0x1, a, b, c, d);
292 
293 	stepping = a & 0xf;
294 
295 	switch (stepping) {
296 	case 0:
297 		return DLB_A0;
298 	case 1:
299 		return DLB_A1;
300 	case 2:
301 		return DLB_A2;
302 	case 3:
303 		return DLB_A3;
304 	default:
305 		/* Treat all revisions >= 4 as B0 */
306 		return DLB_B0;
307 	}
308 }
309 
310 #endif /*  __DLB_OSDEP_H__ */
311