1 /* SPDX-License-Identifier: BSD-3-Clause
2 *
3 * Copyright 2017-2022 NXP
4 *
5 */
6 #ifndef __RTE_DPAA_BUS_H__
7 #define __RTE_DPAA_BUS_H__
8
9 #include <rte_bus.h>
10 #include <rte_mbuf_dyn.h>
11 #include <rte_mempool.h>
12 #include <dpaax_iova_table.h>
13
14 #include <dpaa_of.h>
15 #include <fsl_usd.h>
16 #include <fsl_qman.h>
17 #include <fsl_bman.h>
18 #include <netcfg.h>
19
20 #ifdef __cplusplus
21 extern "C" {
22 #endif
23
24 /* This sequence number field is used to store event entry index for
25 * driver specific usage. For parallel mode queues, invalid
26 * index will be set and for atomic mode queues, valid value
27 * ranging from 1 to 16.
28 */
29 #define DPAA_INVALID_MBUF_SEQN 0
30
31 typedef uint32_t dpaa_seqn_t;
32 extern int dpaa_seqn_dynfield_offset;
33
34 /**
35 * Read dpaa sequence number from mbuf.
36 *
37 * @param mbuf Structure to read from.
38 * @return pointer to dpaa sequence number.
39 */
40 __rte_internal
41 static inline dpaa_seqn_t *
dpaa_seqn(struct rte_mbuf * mbuf)42 dpaa_seqn(struct rte_mbuf *mbuf)
43 {
44 return RTE_MBUF_DYNFIELD(mbuf, dpaa_seqn_dynfield_offset,
45 dpaa_seqn_t *);
46 }
47
48 #define DPAA_MEMPOOL_OPS_NAME "dpaa"
49
50 #define DEV_TO_DPAA_DEVICE(ptr) \
51 container_of(ptr, struct rte_dpaa_device, device)
52
53 /* DPAA SoC identifier; If this is not available, it can be concluded
54 * that board is non-DPAA. Single slot is currently supported.
55 */
56 #define DPAA_SOC_ID_FILE "/sys/devices/soc0/soc_id"
57
58 #define SVR_LS1043A_FAMILY 0x87920000
59 #define SVR_LS1046A_FAMILY 0x87070000
60 #define SVR_MASK 0xffff0000
61
62 /** Device driver supports link state interrupt */
63 #define RTE_DPAA_DRV_INTR_LSC 0x0008
64
65 /** Number of supported QDMA devices */
66 #define RTE_DPAA_QDMA_DEVICES 1
67
68 #define RTE_DEV_TO_DPAA_CONST(ptr) \
69 container_of(ptr, const struct rte_dpaa_device, device)
70
71 extern unsigned int dpaa_svr_family;
72
73 struct rte_dpaa_device;
74 struct rte_dpaa_driver;
75
76 /* DPAA Device and Driver lists for DPAA bus */
77 TAILQ_HEAD(rte_dpaa_device_list, rte_dpaa_device);
78 TAILQ_HEAD(rte_dpaa_driver_list, rte_dpaa_driver);
79
80 enum rte_dpaa_type {
81 FSL_DPAA_ETH = 1,
82 FSL_DPAA_CRYPTO,
83 FSL_DPAA_QDMA
84 };
85
86 struct rte_dpaa_bus {
87 struct rte_bus bus;
88 struct rte_dpaa_device_list device_list;
89 struct rte_dpaa_driver_list driver_list;
90 int device_count;
91 int detected;
92 };
93
94 struct dpaa_device_id {
95 uint8_t fman_id; /**< Fman interface ID, for ETH type device */
96 uint8_t mac_id; /**< Fman MAC interface ID, for ETH type device */
97 uint16_t dev_id; /**< Device Identifier from DPDK */
98 };
99
100 struct rte_dpaa_device {
101 TAILQ_ENTRY(rte_dpaa_device) next;
102 struct rte_device device;
103 union {
104 struct rte_eth_dev *eth_dev;
105 struct rte_cryptodev *crypto_dev;
106 struct rte_dma_dev *dmadev;
107 };
108 struct rte_dpaa_driver *driver;
109 struct dpaa_device_id id;
110 struct rte_intr_handle *intr_handle;
111 enum rte_dpaa_type device_type; /**< Ethernet or crypto type device */
112 char name[RTE_ETH_NAME_MAX_LEN];
113 };
114
115 typedef int (*rte_dpaa_probe_t)(struct rte_dpaa_driver *dpaa_drv,
116 struct rte_dpaa_device *dpaa_dev);
117 typedef int (*rte_dpaa_remove_t)(struct rte_dpaa_device *dpaa_dev);
118
119 struct rte_dpaa_driver {
120 TAILQ_ENTRY(rte_dpaa_driver) next;
121 struct rte_driver driver;
122 struct rte_dpaa_bus *dpaa_bus;
123 enum rte_dpaa_type drv_type;
124 rte_dpaa_probe_t probe;
125 rte_dpaa_remove_t remove;
126 uint32_t drv_flags; /**< Flags for controlling device.*/
127 };
128
129 /* Create storage for dqrr entries per lcore */
130 #define DPAA_PORTAL_DEQUEUE_DEPTH 16
131 struct dpaa_portal_dqrr {
132 void *mbuf[DPAA_PORTAL_DEQUEUE_DEPTH];
133 uint64_t dqrr_held;
134 uint8_t dqrr_size;
135 };
136
137 struct dpaa_portal {
138 uint32_t bman_idx; /**< BMAN Portal ID*/
139 uint32_t qman_idx; /**< QMAN Portal ID*/
140 struct dpaa_portal_dqrr dpaa_held_bufs;
141 uint64_t tid;/**< Parent Thread id for this portal */
142 };
143
144 RTE_DECLARE_PER_LCORE(struct dpaa_portal *, dpaa_io);
145
146 #define DPAA_PER_LCORE_PORTAL \
147 RTE_PER_LCORE(dpaa_io)
148 #define DPAA_PER_LCORE_DQRR_SIZE \
149 RTE_PER_LCORE(dpaa_io)->dpaa_held_bufs.dqrr_size
150 #define DPAA_PER_LCORE_DQRR_HELD \
151 RTE_PER_LCORE(dpaa_io)->dpaa_held_bufs.dqrr_held
152 #define DPAA_PER_LCORE_DQRR_MBUF(i) \
153 RTE_PER_LCORE(dpaa_io)->dpaa_held_bufs.mbuf[i]
154
155 /* Various structures representing contiguous memory maps */
156 struct dpaa_memseg {
157 TAILQ_ENTRY(dpaa_memseg) next;
158 char *vaddr;
159 rte_iova_t iova;
160 size_t len;
161 };
162
163 TAILQ_HEAD(dpaa_memseg_list, dpaa_memseg);
164 extern struct dpaa_memseg_list rte_dpaa_memsegs;
165
166 /* Either iterate over the list of internal memseg references or fallback to
167 * EAL memseg based iova2virt.
168 */
rte_dpaa_mem_ptov(phys_addr_t paddr)169 static inline void *rte_dpaa_mem_ptov(phys_addr_t paddr)
170 {
171 struct dpaa_memseg *ms;
172 void *va;
173
174 va = dpaax_iova_table_get_va(paddr);
175 if (likely(va != NULL))
176 return va;
177
178 /* Check if the address is already part of the memseg list internally
179 * maintained by the dpaa driver.
180 */
181 TAILQ_FOREACH(ms, &rte_dpaa_memsegs, next) {
182 if (paddr >= ms->iova && paddr <
183 ms->iova + ms->len)
184 return RTE_PTR_ADD(ms->vaddr, (uintptr_t)(paddr - ms->iova));
185 }
186
187 /* If not, Fallback to full memseg list searching */
188 va = rte_mem_iova2virt(paddr);
189
190 dpaax_iova_table_update(paddr, va, RTE_CACHE_LINE_SIZE);
191
192 return va;
193 }
194
195 static inline rte_iova_t
rte_dpaa_mem_vtop(void * vaddr)196 rte_dpaa_mem_vtop(void *vaddr)
197 {
198 const struct rte_memseg *ms;
199
200 ms = rte_mem_virt2memseg(vaddr, NULL);
201 if (ms)
202 return ms->iova + RTE_PTR_DIFF(vaddr, ms->addr);
203
204 return (size_t)NULL;
205 }
206
207 /**
208 * Register a DPAA driver.
209 *
210 * @param driver
211 * A pointer to a rte_dpaa_driver structure describing the driver
212 * to be registered.
213 */
214 __rte_internal
215 void rte_dpaa_driver_register(struct rte_dpaa_driver *driver);
216
217 /**
218 * Unregister a DPAA driver.
219 *
220 * @param driver
221 * A pointer to a rte_dpaa_driver structure describing the driver
222 * to be unregistered.
223 */
224 __rte_internal
225 void rte_dpaa_driver_unregister(struct rte_dpaa_driver *driver);
226
227 /**
228 * Initialize a DPAA portal
229 *
230 * @param arg
231 * Per thread ID
232 *
233 * @return
234 * 0 in case of success, error otherwise
235 */
236 __rte_internal
237 int rte_dpaa_portal_init(void *arg);
238
239 __rte_internal
240 int rte_dpaa_portal_fq_init(void *arg, struct qman_fq *fq);
241
242 __rte_internal
243 int rte_dpaa_portal_fq_close(struct qman_fq *fq);
244
245 /**
246 * Cleanup a DPAA Portal
247 */
248 void dpaa_portal_finish(void *arg);
249
250 /** Helper for DPAA device registration from driver (eth, crypto) instance */
251 #define RTE_PMD_REGISTER_DPAA(nm, dpaa_drv) \
252 RTE_INIT(dpaainitfn_ ##nm) \
253 {\
254 (dpaa_drv).driver.name = RTE_STR(nm);\
255 rte_dpaa_driver_register(&dpaa_drv); \
256 } \
257 RTE_PMD_EXPORT_NAME(nm, __COUNTER__)
258
259 __rte_internal
260 struct fm_eth_port_cfg *dpaa_get_eth_port_cfg(int dev_id);
261
262 #ifdef __cplusplus
263 }
264 #endif
265
266 #endif /* __RTE_DPAA_BUS_H__ */
267