1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2020 Intel Corporation
3 */
4
5 #ifndef __DLB2_OSDEP_H
6 #define __DLB2_OSDEP_H
7
8 #include <string.h>
9 #include <time.h>
10 #include <unistd.h>
11 #include <pthread.h>
12
13 #include <rte_string_fns.h>
14 #include <rte_cycles.h>
15 #include <rte_io.h>
16 #include <rte_log.h>
17 #include <rte_spinlock.h>
18 #include "../dlb2_main.h"
19 #include "dlb2_resource.h"
20 #include "../../dlb2_log.h"
21 #include "../../dlb2_user.h"
22
23
24 #define DLB2_PCI_REG_READ(addr) rte_read32((void *)addr)
25 #define DLB2_PCI_REG_WRITE(reg, value) rte_write32(value, (void *)reg)
26
27 /* Read/write register 'reg' in the CSR BAR space */
28 #define DLB2_CSR_REG_ADDR(a, reg) ((void *)((uintptr_t)(a)->csr_kva + (reg)))
29 #define DLB2_CSR_RD(hw, reg) \
30 DLB2_PCI_REG_READ(DLB2_CSR_REG_ADDR((hw), (reg)))
31 #define DLB2_CSR_WR(hw, reg, value) \
32 DLB2_PCI_REG_WRITE(DLB2_CSR_REG_ADDR((hw), (reg)), (value))
33
34 /* Read/write register 'reg' in the func BAR space */
35 #define DLB2_FUNC_REG_ADDR(a, reg) ((void *)((uintptr_t)(a)->func_kva + (reg)))
36 #define DLB2_FUNC_RD(hw, reg) \
37 DLB2_PCI_REG_READ(DLB2_FUNC_REG_ADDR((hw), (reg)))
38 #define DLB2_FUNC_WR(hw, reg, value) \
39 DLB2_PCI_REG_WRITE(DLB2_FUNC_REG_ADDR((hw), (reg)), (value))
40
41 /* Map to PMDs logging interface */
42 #define DLB2_ERR(dev, fmt, args...) \
43 DLB2_LOG_ERR(fmt, ## args)
44
45 #define DLB2_INFO(dev, fmt, args...) \
46 DLB2_LOG_INFO(fmt, ## args)
47
48 #define DLB2_DEBUG(dev, fmt, args...) \
49 DLB2_LOG_DBG(fmt, ## args)
50
51 /**
52 * os_udelay() - busy-wait for a number of microseconds
53 * @usecs: delay duration.
54 */
os_udelay(int usecs)55 static inline void os_udelay(int usecs)
56 {
57 rte_delay_us(usecs);
58 }
59
60 /**
61 * os_msleep() - sleep for a number of milliseconds
62 * @usecs: delay duration.
63 */
os_msleep(int msecs)64 static inline void os_msleep(int msecs)
65 {
66 rte_delay_ms(msecs);
67 }
68
69 #define DLB2_PP_BASE(__is_ldb) \
70 ((__is_ldb) ? DLB2_LDB_PP_BASE : DLB2_DIR_PP_BASE)
71
72 /**
73 * os_map_producer_port() - map a producer port into the caller's address space
74 * @hw: dlb2_hw handle for a particular device.
75 * @port_id: port ID
76 * @is_ldb: true for load-balanced port, false for a directed port
77 *
78 * This function maps the requested producer port memory into the caller's
79 * address space.
80 *
81 * Return:
82 * Returns the base address at which the PP memory was mapped, else NULL.
83 */
os_map_producer_port(struct dlb2_hw * hw,u8 port_id,bool is_ldb)84 static inline void *os_map_producer_port(struct dlb2_hw *hw,
85 u8 port_id,
86 bool is_ldb)
87 {
88 uint64_t addr;
89 uint64_t pp_dma_base;
90
91 pp_dma_base = (uintptr_t)hw->func_kva + DLB2_PP_BASE(is_ldb);
92 addr = (pp_dma_base + (PAGE_SIZE * port_id));
93
94 return (void *)(uintptr_t)addr;
95 }
96
97 /**
98 * os_unmap_producer_port() - unmap a producer port
99 * @addr: mapped producer port address
100 *
101 * This function undoes os_map_producer_port() by unmapping the producer port
102 * memory from the caller's address space.
103 *
104 * Return:
105 * Returns the base address at which the PP memory was mapped, else NULL.
106 */
os_unmap_producer_port(struct dlb2_hw * hw,void * addr)107 static inline void os_unmap_producer_port(struct dlb2_hw *hw, void *addr)
108 {
109 RTE_SET_USED(hw);
110 RTE_SET_USED(addr);
111 }
112
113 /**
114 * os_fence_hcw() - fence an HCW to ensure it arrives at the device
115 * @hw: dlb2_hw handle for a particular device.
116 * @pp_addr: producer port address
117 */
os_fence_hcw(struct dlb2_hw * hw,u64 * pp_addr)118 static inline void os_fence_hcw(struct dlb2_hw *hw, u64 *pp_addr)
119 {
120 RTE_SET_USED(hw);
121
122 /* To ensure outstanding HCWs reach the device, read the PP address. IA
123 * memory ordering prevents reads from passing older writes, and the
124 * mfence also ensures this.
125 */
126 rte_mb();
127
128 *(volatile u64 *)pp_addr;
129 }
130
131 /**
132 * DLB2_HW_ERR() - log an error message
133 * @dlb2: dlb2_hw handle for a particular device.
134 * @...: variable string args.
135 */
136 #define DLB2_HW_ERR(dlb2, ...) do { \
137 RTE_SET_USED(dlb2); \
138 DLB2_ERR(dlb2, __VA_ARGS__); \
139 } while (0)
140
141 /**
142 * DLB2_HW_DBG() - log an info message
143 * @dlb2: dlb2_hw handle for a particular device.
144 * @...: variable string args.
145 */
146 #define DLB2_HW_DBG(dlb2, ...) do { \
147 RTE_SET_USED(dlb2); \
148 DLB2_DEBUG(dlb2, __VA_ARGS__); \
149 } while (0)
150
151 /* The callback runs until it completes all outstanding QID->CQ
152 * map and unmap requests. To prevent deadlock, this function gives other
153 * threads a chance to grab the resource mutex and configure hardware.
154 */
dlb2_complete_queue_map_unmap(void * __args)155 static void *dlb2_complete_queue_map_unmap(void *__args)
156 {
157 struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)__args;
158 int ret;
159
160 while (1) {
161 rte_spinlock_lock(&dlb2_dev->resource_mutex);
162
163 ret = dlb2_finish_unmap_qid_procedures(&dlb2_dev->hw);
164 ret += dlb2_finish_map_qid_procedures(&dlb2_dev->hw);
165
166 if (ret != 0) {
167 rte_spinlock_unlock(&dlb2_dev->resource_mutex);
168 /* Relinquish the CPU so the application can process
169 * its CQs, so this function doesn't deadlock.
170 */
171 sched_yield();
172 } else {
173 break;
174 }
175 }
176
177 dlb2_dev->worker_launched = false;
178
179 rte_spinlock_unlock(&dlb2_dev->resource_mutex);
180
181 return NULL;
182 }
183
184
185 /**
186 * os_schedule_work() - launch a thread to process pending map and unmap work
187 * @hw: dlb2_hw handle for a particular device.
188 *
189 * This function launches a kernel thread that will run until all pending
190 * map and unmap procedures are complete.
191 */
os_schedule_work(struct dlb2_hw * hw)192 static inline void os_schedule_work(struct dlb2_hw *hw)
193 {
194 struct dlb2_dev *dlb2_dev;
195 pthread_t complete_queue_map_unmap_thread;
196 int ret;
197
198 dlb2_dev = container_of(hw, struct dlb2_dev, hw);
199
200 ret = rte_ctrl_thread_create(&complete_queue_map_unmap_thread,
201 "dlb_queue_unmap_waiter",
202 NULL,
203 dlb2_complete_queue_map_unmap,
204 dlb2_dev);
205 if (ret)
206 DLB2_ERR(dlb2_dev,
207 "Could not create queue complete map/unmap thread, err=%d\n",
208 ret);
209 else
210 dlb2_dev->worker_launched = true;
211 }
212
213 /**
214 * os_worker_active() - query whether the map/unmap worker thread is active
215 * @hw: dlb2_hw handle for a particular device.
216 *
217 * This function returns a boolean indicating whether a thread (launched by
218 * os_schedule_work()) is active. This function is used to determine
219 * whether or not to launch a worker thread.
220 */
os_worker_active(struct dlb2_hw * hw)221 static inline bool os_worker_active(struct dlb2_hw *hw)
222 {
223 struct dlb2_dev *dlb2_dev;
224
225 dlb2_dev = container_of(hw, struct dlb2_dev, hw);
226
227 return dlb2_dev->worker_launched;
228 }
229
230 #endif /* __DLB2_OSDEP_H */
231