1 /* SPDX-License-Identifier: BSD-3-Clause
2 *
3 * Copyright 2017-2020 NXP
4 *
5 */
6 #ifndef __RTE_DPAA_BUS_H__
7 #define __RTE_DPAA_BUS_H__
8
9 #include <rte_bus.h>
10 #include <rte_mbuf_dyn.h>
11 #include <rte_mempool.h>
12 #include <dpaax_iova_table.h>
13
14 #include <dpaa_of.h>
15 #include <fsl_usd.h>
16 #include <fsl_qman.h>
17 #include <fsl_bman.h>
18 #include <netcfg.h>
19
20 /* This sequence number field is used to store event entry index for
21 * driver specific usage. For parallel mode queues, invalid
22 * index will be set and for atomic mode queues, valid value
23 * ranging from 1 to 16.
24 */
25 #define DPAA_INVALID_MBUF_SEQN 0
26
27 typedef uint32_t dpaa_seqn_t;
28 extern int dpaa_seqn_dynfield_offset;
29
30 /**
31 * @warning
32 * @b EXPERIMENTAL: this API may change without prior notice
33 *
34 * Read dpaa sequence number from mbuf.
35 *
36 * @param mbuf Structure to read from.
37 * @return pointer to dpaa sequence number.
38 */
39 __rte_experimental
40 static inline dpaa_seqn_t *
dpaa_seqn(struct rte_mbuf * mbuf)41 dpaa_seqn(struct rte_mbuf *mbuf)
42 {
43 return RTE_MBUF_DYNFIELD(mbuf, dpaa_seqn_dynfield_offset,
44 dpaa_seqn_t *);
45 }
46
47 #define DPAA_MEMPOOL_OPS_NAME "dpaa"
48
49 #define DEV_TO_DPAA_DEVICE(ptr) \
50 container_of(ptr, struct rte_dpaa_device, device)
51
52 /* DPAA SoC identifier; If this is not available, it can be concluded
53 * that board is non-DPAA. Single slot is currently supported.
54 */
55 #define DPAA_SOC_ID_FILE "/sys/devices/soc0/soc_id"
56
57 #define SVR_LS1043A_FAMILY 0x87920000
58 #define SVR_LS1046A_FAMILY 0x87070000
59 #define SVR_MASK 0xffff0000
60
61 /** Device driver supports link state interrupt */
62 #define RTE_DPAA_DRV_INTR_LSC 0x0008
63
64 #define RTE_DEV_TO_DPAA_CONST(ptr) \
65 container_of(ptr, const struct rte_dpaa_device, device)
66
67 extern unsigned int dpaa_svr_family;
68
69 struct rte_dpaa_device;
70 struct rte_dpaa_driver;
71
72 /* DPAA Device and Driver lists for DPAA bus */
73 TAILQ_HEAD(rte_dpaa_device_list, rte_dpaa_device);
74 TAILQ_HEAD(rte_dpaa_driver_list, rte_dpaa_driver);
75
76 enum rte_dpaa_type {
77 FSL_DPAA_ETH = 1,
78 FSL_DPAA_CRYPTO,
79 };
80
81 struct rte_dpaa_bus {
82 struct rte_bus bus;
83 struct rte_dpaa_device_list device_list;
84 struct rte_dpaa_driver_list driver_list;
85 int device_count;
86 int detected;
87 };
88
89 struct dpaa_device_id {
90 uint8_t fman_id; /**< Fman interface ID, for ETH type device */
91 uint8_t mac_id; /**< Fman MAC interface ID, for ETH type device */
92 uint16_t dev_id; /**< Device Identifier from DPDK */
93 };
94
95 struct rte_dpaa_device {
96 TAILQ_ENTRY(rte_dpaa_device) next;
97 struct rte_device device;
98 union {
99 struct rte_eth_dev *eth_dev;
100 struct rte_cryptodev *crypto_dev;
101 };
102 struct rte_dpaa_driver *driver;
103 struct dpaa_device_id id;
104 struct rte_intr_handle intr_handle;
105 enum rte_dpaa_type device_type; /**< Ethernet or crypto type device */
106 char name[RTE_ETH_NAME_MAX_LEN];
107 };
108
109 typedef int (*rte_dpaa_probe_t)(struct rte_dpaa_driver *dpaa_drv,
110 struct rte_dpaa_device *dpaa_dev);
111 typedef int (*rte_dpaa_remove_t)(struct rte_dpaa_device *dpaa_dev);
112
113 struct rte_dpaa_driver {
114 TAILQ_ENTRY(rte_dpaa_driver) next;
115 struct rte_driver driver;
116 struct rte_dpaa_bus *dpaa_bus;
117 enum rte_dpaa_type drv_type;
118 rte_dpaa_probe_t probe;
119 rte_dpaa_remove_t remove;
120 uint32_t drv_flags; /**< Flags for controlling device.*/
121 };
122
123 /* Create storage for dqrr entries per lcore */
124 #define DPAA_PORTAL_DEQUEUE_DEPTH 16
125 struct dpaa_portal_dqrr {
126 void *mbuf[DPAA_PORTAL_DEQUEUE_DEPTH];
127 uint64_t dqrr_held;
128 uint8_t dqrr_size;
129 };
130
131 struct dpaa_portal {
132 uint32_t bman_idx; /**< BMAN Portal ID*/
133 uint32_t qman_idx; /**< QMAN Portal ID*/
134 struct dpaa_portal_dqrr dpaa_held_bufs;
135 struct rte_crypto_op **dpaa_sec_ops;
136 int dpaa_sec_op_nb;
137 uint64_t tid;/**< Parent Thread id for this portal */
138 };
139
140 RTE_DECLARE_PER_LCORE(struct dpaa_portal *, dpaa_io);
141
142 #define DPAA_PER_LCORE_PORTAL \
143 RTE_PER_LCORE(dpaa_io)
144 #define DPAA_PER_LCORE_DQRR_SIZE \
145 RTE_PER_LCORE(dpaa_io)->dpaa_held_bufs.dqrr_size
146 #define DPAA_PER_LCORE_DQRR_HELD \
147 RTE_PER_LCORE(dpaa_io)->dpaa_held_bufs.dqrr_held
148 #define DPAA_PER_LCORE_DQRR_MBUF(i) \
149 RTE_PER_LCORE(dpaa_io)->dpaa_held_bufs.mbuf[i]
150 #define DPAA_PER_LCORE_RTE_CRYPTO_OP \
151 RTE_PER_LCORE(dpaa_io)->dpaa_sec_ops
152 #define DPAA_PER_LCORE_DPAA_SEC_OP_NB \
153 RTE_PER_LCORE(dpaa_io)->dpaa_sec_op_nb
154
155 /* Various structures representing contiguous memory maps */
156 struct dpaa_memseg {
157 TAILQ_ENTRY(dpaa_memseg) next;
158 char *vaddr;
159 rte_iova_t iova;
160 size_t len;
161 };
162
163 TAILQ_HEAD(dpaa_memseg_list, dpaa_memseg);
164 extern struct dpaa_memseg_list rte_dpaa_memsegs;
165
166 /* Either iterate over the list of internal memseg references or fallback to
167 * EAL memseg based iova2virt.
168 */
rte_dpaa_mem_ptov(phys_addr_t paddr)169 static inline void *rte_dpaa_mem_ptov(phys_addr_t paddr)
170 {
171 struct dpaa_memseg *ms;
172 void *va;
173
174 va = dpaax_iova_table_get_va(paddr);
175 if (likely(va != NULL))
176 return va;
177
178 /* Check if the address is already part of the memseg list internally
179 * maintained by the dpaa driver.
180 */
181 TAILQ_FOREACH(ms, &rte_dpaa_memsegs, next) {
182 if (paddr >= ms->iova && paddr <
183 ms->iova + ms->len)
184 return RTE_PTR_ADD(ms->vaddr, (uintptr_t)(paddr - ms->iova));
185 }
186
187 /* If not, Fallback to full memseg list searching */
188 va = rte_mem_iova2virt(paddr);
189
190 dpaax_iova_table_update(paddr, va, RTE_CACHE_LINE_SIZE);
191
192 return va;
193 }
194
195 static inline rte_iova_t
rte_dpaa_mem_vtop(void * vaddr)196 rte_dpaa_mem_vtop(void *vaddr)
197 {
198 const struct rte_memseg *ms;
199
200 ms = rte_mem_virt2memseg(vaddr, NULL);
201 if (ms)
202 return ms->iova + RTE_PTR_DIFF(vaddr, ms->addr);
203
204 return (size_t)NULL;
205 }
206
207 /**
208 * Register a DPAA driver.
209 *
210 * @param driver
211 * A pointer to a rte_dpaa_driver structure describing the driver
212 * to be registered.
213 */
214 __rte_internal
215 void rte_dpaa_driver_register(struct rte_dpaa_driver *driver);
216
217 /**
218 * Unregister a DPAA driver.
219 *
220 * @param driver
221 * A pointer to a rte_dpaa_driver structure describing the driver
222 * to be unregistered.
223 */
224 __rte_internal
225 void rte_dpaa_driver_unregister(struct rte_dpaa_driver *driver);
226
227 /**
228 * Initialize a DPAA portal
229 *
230 * @param arg
231 * Per thread ID
232 *
233 * @return
234 * 0 in case of success, error otherwise
235 */
236 __rte_internal
237 int rte_dpaa_portal_init(void *arg);
238
239 __rte_internal
240 int rte_dpaa_portal_fq_init(void *arg, struct qman_fq *fq);
241
242 __rte_internal
243 int rte_dpaa_portal_fq_close(struct qman_fq *fq);
244
245 /**
246 * Cleanup a DPAA Portal
247 */
248 void dpaa_portal_finish(void *arg);
249
250 /** Helper for DPAA device registration from driver (eth, crypto) instance */
251 #define RTE_PMD_REGISTER_DPAA(nm, dpaa_drv) \
252 RTE_INIT(dpaainitfn_ ##nm) \
253 {\
254 (dpaa_drv).driver.name = RTE_STR(nm);\
255 rte_dpaa_driver_register(&dpaa_drv); \
256 } \
257 RTE_PMD_EXPORT_NAME(nm, __COUNTER__)
258
259 __rte_internal
260 struct fm_eth_port_cfg *dpaa_get_eth_port_cfg(int dev_id);
261
262 #ifdef __cplusplus
263 }
264 #endif
265
266 #endif /* __RTE_DPAA_BUS_H__ */
267