1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018-2020 NXP
3  */
4 
5 #ifndef __RTE_PMD_DPAA2_QDMA_H__
6 #define __RTE_PMD_DPAA2_QDMA_H__
7 
8 #include <rte_rawdev.h>
9 
10 /**
11  * @file
12  *
13  * NXP dpaa2 QDMA specific structures.
14  *
15  */
16 
17 /** Maximum qdma burst size */
18 #define RTE_QDMA_BURST_NB_MAX 256
19 
20 /** Determines the mode of operation */
21 enum {
22 	/**
23 	 * Allocate a H/W queue per VQ i.e. Exclusive hardware queue for a VQ.
24 	 * This mode will have best performance.
25 	 */
26 	RTE_QDMA_MODE_HW,
27 	/**
28 	 * A VQ shall not have an exclusive associated H/W queue.
29 	 * Rather a H/W Queue will be shared by multiple Virtual Queues.
30 	 * This mode will have intermediate data structures to support
31 	 * multi VQ to PQ mappings thus having some performance implications.
32 	 * Note: Even in this mode there is an option to allocate a H/W
33 	 * queue for a VQ. Please see 'RTE_QDMA_VQ_EXCLUSIVE_PQ' flag.
34 	 */
35 	RTE_QDMA_MODE_VIRTUAL
36 };
37 
38 /** Determines the format of FD */
39 enum {
40 	RTE_QDMA_LONG_FORMAT,
41 	RTE_QDMA_ULTRASHORT_FORMAT,
42 };
43 
44 /**
45  * If user has configured a Virtual Queue mode, but for some particular VQ
46  * user needs an exclusive H/W queue associated (for better performance
47  * on that particular VQ), then user can pass this flag while creating the
48  * Virtual Queue. A H/W queue will be allocated corresponding to
49  * VQ which uses this flag.
50  */
51 #define RTE_QDMA_VQ_EXCLUSIVE_PQ	(1ULL)
52 
53 #define RTE_QDMA_VQ_FD_LONG_FORMAT		(1ULL << 1)
54 
55 #define RTE_QDMA_VQ_FD_SG_FORMAT		(1ULL << 2)
56 
57 #define RTE_QDMA_VQ_NO_RESPONSE			(1ULL << 3)
58 
59 /** States if the source addresses is physical. */
60 #define RTE_QDMA_JOB_SRC_PHY		(1ULL)
61 
62 /** States if the destination addresses is physical. */
63 #define RTE_QDMA_JOB_DEST_PHY		(1ULL << 1)
64 
65 /** Provides QDMA device attributes */
66 struct rte_qdma_attr {
67 	/** total number of hw QDMA queues present */
68 	uint16_t num_hw_queues;
69 };
70 
71 /** QDMA device configuration structure */
72 struct rte_qdma_config {
73 	/** Number of maximum hw queues to allocate per core. */
74 	uint16_t max_hw_queues_per_core;
75 	/** Maximum number of VQ's to be used. */
76 	uint16_t max_vqs;
77 	/**
78 	 * User provides this as input to the driver as a size of the FLE pool.
79 	 * FLE's (and corresponding source/destination descriptors) are
80 	 * allocated by the driver at enqueue time to store src/dest and
81 	 * other data and are freed at the dequeue time. This determines the
82 	 * maximum number of inflight jobs on the QDMA device. This should
83 	 * be power of 2.
84 	 */
85 	int fle_queue_pool_cnt;
86 };
87 
88 struct rte_qdma_rbp {
89 	uint32_t use_ultrashort:1;
90 	uint32_t enable:1;
91 	/**
92 	 * dportid:
93 	 * 0000 PCI-Express 1
94 	 * 0001 PCI-Express 2
95 	 * 0010 PCI-Express 3
96 	 * 0011 PCI-Express 4
97 	 * 0100 PCI-Express 5
98 	 * 0101 PCI-Express 6
99 	 */
100 	uint32_t dportid:4;
101 	uint32_t dpfid:2;
102 	uint32_t dvfid:6;
103 	/*using route by port for destination */
104 	uint32_t drbp:1;
105 	/**
106 	 * sportid:
107 	 * 0000 PCI-Express 1
108 	 * 0001 PCI-Express 2
109 	 * 0010 PCI-Express 3
110 	 * 0011 PCI-Express 4
111 	 * 0100 PCI-Express 5
112 	 * 0101 PCI-Express 6
113 	 */
114 	uint32_t sportid:4;
115 	uint32_t spfid:2;
116 	uint32_t svfid:6;
117 	/* using route by port for source */
118 	uint32_t srbp:1;
119 	uint32_t rsv:4;
120 };
121 
122 /** Provides QDMA device statistics */
123 struct rte_qdma_vq_stats {
124 	/** States if this vq has exclusively associated hw queue */
125 	uint8_t exclusive_hw_queue;
126 	/** Associated lcore id */
127 	uint32_t lcore_id;
128 	/* Total number of enqueues on this VQ */
129 	uint64_t num_enqueues;
130 	/* Total number of dequeues from this VQ */
131 	uint64_t num_dequeues;
132 	/* total number of pending jobs in this VQ */
133 	uint64_t num_pending_jobs;
134 };
135 
136 /** Determines a QDMA job */
137 struct rte_qdma_job {
138 	/** Source Address from where DMA is (to be) performed */
139 	uint64_t src;
140 	/** Destination Address where DMA is (to be) done */
141 	uint64_t dest;
142 	/** Length of the DMA operation in bytes. */
143 	uint32_t len;
144 	/** See RTE_QDMA_JOB_ flags */
145 	uint32_t flags;
146 	/**
147 	 * User can specify a context which will be maintained
148 	 * on the dequeue operation.
149 	 */
150 	uint64_t cnxt;
151 	/**
152 	 * Status of the transaction.
153 	 * This is filled in the dequeue operation by the driver.
154 	 * upper 8bits acc_err for route by port.
155 	 * lower 8bits fd error
156 	 */
157 	uint16_t status;
158 	uint16_t vq_id;
159 	/**
160 	 * FLE pool element maintained by user, in case no qDMA response.
161 	 * Note: the address must be allocated from DPDK memory pool.
162 	 */
163 	void *usr_elem;
164 };
165 
166 struct rte_qdma_enqdeq {
167 	uint16_t vq_id;
168 	struct rte_qdma_job **job;
169 };
170 
171 struct rte_qdma_queue_config {
172 	uint32_t lcore_id;
173 	uint32_t flags;
174 	struct rte_qdma_rbp *rbp;
175 };
176 
177 #define rte_qdma_info rte_rawdev_info
178 #define rte_qdma_start(id) rte_rawdev_start(id)
179 #define rte_qdma_reset(id) rte_rawdev_reset(id)
180 #define rte_qdma_configure(id, cf) rte_rawdev_configure(id, cf)
181 #define rte_qdma_dequeue_buffers(id, buf, num, ctxt) \
182 	rte_rawdev_dequeue_buffers(id, buf, num, ctxt)
183 #define rte_qdma_enqueue_buffers(id, buf, num, ctxt) \
184 	rte_rawdev_enqueue_buffers(id, buf, num, ctxt)
185 #define rte_qdma_queue_setup(id, qid, cfg) \
186 	rte_rawdev_queue_setup(id, qid, cfg)
187 
188 /*TODO introduce per queue stats API in rawdew */
189 /**
190  * Get a Virtual Queue statistics.
191  *
192  * @param rawdev
193  *   Raw Device.
194  * @param vq_id
195  *   Virtual Queue ID.
196  * @param vq_stats
197  *   VQ statistics structure which will be filled in by the driver.
198  */
199 void
200 rte_qdma_vq_stats(struct rte_rawdev *rawdev,
201 		uint16_t vq_id,
202 		struct rte_qdma_vq_stats *vq_stats);
203 
204 #endif /* __RTE_PMD_DPAA2_QDMA_H__*/
205