xref: /f-stack/dpdk/drivers/raw/ioat/ioat_common.c (revision 2d9fd380)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Intel Corporation
3  */
4 
5 #include <rte_rawdev_pmd.h>
6 #include <rte_memzone.h>
7 #include <rte_common.h>
8 #include <rte_string_fns.h>
9 
10 #include "ioat_private.h"
11 
12 static const char * const xstat_names[] = {
13 		"failed_enqueues", "successful_enqueues",
14 		"copies_started", "copies_completed"
15 };
16 
17 int
ioat_xstats_get(const struct rte_rawdev * dev,const unsigned int ids[],uint64_t values[],unsigned int n)18 ioat_xstats_get(const struct rte_rawdev *dev, const unsigned int ids[],
19 		uint64_t values[], unsigned int n)
20 {
21 	const struct rte_ioat_rawdev *ioat = dev->dev_private;
22 	const uint64_t *stats = (const void *)&ioat->xstats;
23 	unsigned int i;
24 
25 	for (i = 0; i < n; i++) {
26 		if (ids[i] > sizeof(ioat->xstats)/sizeof(*stats))
27 			values[i] = 0;
28 		else
29 			values[i] = stats[ids[i]];
30 	}
31 	return n;
32 }
33 
34 int
ioat_xstats_get_names(const struct rte_rawdev * dev,struct rte_rawdev_xstats_name * names,unsigned int size)35 ioat_xstats_get_names(const struct rte_rawdev *dev,
36 		struct rte_rawdev_xstats_name *names,
37 		unsigned int size)
38 {
39 	unsigned int i;
40 
41 	RTE_SET_USED(dev);
42 	if (size < RTE_DIM(xstat_names))
43 		return RTE_DIM(xstat_names);
44 
45 	for (i = 0; i < RTE_DIM(xstat_names); i++)
46 		strlcpy(names[i].name, xstat_names[i], sizeof(names[i]));
47 
48 	return RTE_DIM(xstat_names);
49 }
50 
51 int
ioat_xstats_reset(struct rte_rawdev * dev,const uint32_t * ids,uint32_t nb_ids)52 ioat_xstats_reset(struct rte_rawdev *dev, const uint32_t *ids, uint32_t nb_ids)
53 {
54 	struct rte_ioat_rawdev *ioat = dev->dev_private;
55 	uint64_t *stats = (void *)&ioat->xstats;
56 	unsigned int i;
57 
58 	if (!ids) {
59 		memset(&ioat->xstats, 0, sizeof(ioat->xstats));
60 		return 0;
61 	}
62 
63 	for (i = 0; i < nb_ids; i++)
64 		if (ids[i] < sizeof(ioat->xstats)/sizeof(*stats))
65 			stats[ids[i]] = 0;
66 
67 	return 0;
68 }
69 
70 int
idxd_rawdev_close(struct rte_rawdev * dev __rte_unused)71 idxd_rawdev_close(struct rte_rawdev *dev __rte_unused)
72 {
73 	return 0;
74 }
75 
76 int
idxd_dev_dump(struct rte_rawdev * dev,FILE * f)77 idxd_dev_dump(struct rte_rawdev *dev, FILE *f)
78 {
79 	struct idxd_rawdev *idxd = dev->dev_private;
80 	struct rte_idxd_rawdev *rte_idxd = &idxd->public;
81 	int i;
82 
83 	fprintf(f, "Raw Device #%d\n", dev->dev_id);
84 	fprintf(f, "Driver: %s\n\n", dev->driver_name);
85 
86 	fprintf(f, "Portal: %p\n", rte_idxd->portal);
87 	fprintf(f, "Batch Ring size: %u\n", rte_idxd->batch_ring_sz);
88 	fprintf(f, "Comp Handle Ring size: %u\n\n", rte_idxd->hdl_ring_sz);
89 
90 	fprintf(f, "Next batch: %u\n", rte_idxd->next_batch);
91 	fprintf(f, "Next batch to be completed: %u\n", rte_idxd->next_completed);
92 	for (i = 0; i < rte_idxd->batch_ring_sz; i++) {
93 		struct rte_idxd_desc_batch *b = &rte_idxd->batch_ring[i];
94 		fprintf(f, "Batch %u @%p: submitted=%u, op_count=%u, hdl_end=%u\n",
95 				i, b, b->submitted, b->op_count, b->hdl_end);
96 	}
97 
98 	fprintf(f, "\n");
99 	fprintf(f, "Next free hdl: %u\n", rte_idxd->next_free_hdl);
100 	fprintf(f, "Last completed hdl: %u\n", rte_idxd->last_completed_hdl);
101 	fprintf(f, "Next returned hdl: %u\n", rte_idxd->next_ret_hdl);
102 
103 	return 0;
104 }
105 
106 int
idxd_dev_info_get(struct rte_rawdev * dev,rte_rawdev_obj_t dev_info,size_t info_size)107 idxd_dev_info_get(struct rte_rawdev *dev, rte_rawdev_obj_t dev_info,
108 		size_t info_size)
109 {
110 	struct rte_ioat_rawdev_config *cfg = dev_info;
111 	struct idxd_rawdev *idxd = dev->dev_private;
112 	struct rte_idxd_rawdev *rte_idxd = &idxd->public;
113 
114 	if (info_size != sizeof(*cfg))
115 		return -EINVAL;
116 
117 	if (cfg != NULL) {
118 		cfg->ring_size = rte_idxd->hdl_ring_sz;
119 		cfg->hdls_disable = rte_idxd->hdls_disable;
120 	}
121 	return 0;
122 }
123 
124 int
idxd_dev_configure(const struct rte_rawdev * dev,rte_rawdev_obj_t config,size_t config_size)125 idxd_dev_configure(const struct rte_rawdev *dev,
126 		rte_rawdev_obj_t config, size_t config_size)
127 {
128 	struct idxd_rawdev *idxd = dev->dev_private;
129 	struct rte_idxd_rawdev *rte_idxd = &idxd->public;
130 	struct rte_ioat_rawdev_config *cfg = config;
131 	uint16_t max_desc = cfg->ring_size;
132 	uint16_t max_batches = max_desc / BATCH_SIZE;
133 	uint16_t i;
134 
135 	if (config_size != sizeof(*cfg))
136 		return -EINVAL;
137 
138 	if (dev->started) {
139 		IOAT_PMD_ERR("%s: Error, device is started.", __func__);
140 		return -EAGAIN;
141 	}
142 
143 	rte_idxd->hdls_disable = cfg->hdls_disable;
144 
145 	/* limit the batches to what can be stored in hardware */
146 	if (max_batches > idxd->max_batches) {
147 		IOAT_PMD_DEBUG("Ring size of %u is too large for this device, need to limit to %u batches of %u",
148 				max_desc, idxd->max_batches, BATCH_SIZE);
149 		max_batches = idxd->max_batches;
150 		max_desc = max_batches * BATCH_SIZE;
151 	}
152 	if (!rte_is_power_of_2(max_desc))
153 		max_desc = rte_align32pow2(max_desc);
154 	IOAT_PMD_DEBUG("Rawdev %u using %u descriptors in %u batches",
155 			dev->dev_id, max_desc, max_batches);
156 
157 	/* in case we are reconfiguring a device, free any existing memory */
158 	rte_free(rte_idxd->batch_ring);
159 	rte_free(rte_idxd->hdl_ring);
160 
161 	rte_idxd->batch_ring = rte_zmalloc(NULL,
162 			sizeof(*rte_idxd->batch_ring) * max_batches, 0);
163 	if (rte_idxd->batch_ring == NULL)
164 		return -ENOMEM;
165 
166 	rte_idxd->hdl_ring = rte_zmalloc(NULL,
167 			sizeof(*rte_idxd->hdl_ring) * max_desc, 0);
168 	if (rte_idxd->hdl_ring == NULL) {
169 		rte_free(rte_idxd->batch_ring);
170 		rte_idxd->batch_ring = NULL;
171 		return -ENOMEM;
172 	}
173 	rte_idxd->batch_ring_sz = max_batches;
174 	rte_idxd->hdl_ring_sz = max_desc;
175 
176 	for (i = 0; i < rte_idxd->batch_ring_sz; i++) {
177 		struct rte_idxd_desc_batch *b = &rte_idxd->batch_ring[i];
178 		b->batch_desc.completion = rte_mem_virt2iova(&b->comp);
179 		b->batch_desc.desc_addr = rte_mem_virt2iova(&b->null_desc);
180 		b->batch_desc.op_flags = (idxd_op_batch << IDXD_CMD_OP_SHIFT) |
181 				IDXD_FLAG_COMPLETION_ADDR_VALID |
182 				IDXD_FLAG_REQUEST_COMPLETION;
183 	}
184 
185 	return 0;
186 }
187 
188 int
idxd_rawdev_create(const char * name,struct rte_device * dev,const struct idxd_rawdev * base_idxd,const struct rte_rawdev_ops * ops)189 idxd_rawdev_create(const char *name, struct rte_device *dev,
190 		   const struct idxd_rawdev *base_idxd,
191 		   const struct rte_rawdev_ops *ops)
192 {
193 	struct idxd_rawdev *idxd;
194 	struct rte_rawdev *rawdev = NULL;
195 	const struct rte_memzone *mz = NULL;
196 	char mz_name[RTE_MEMZONE_NAMESIZE];
197 	int ret = 0;
198 
199 	RTE_BUILD_BUG_ON(sizeof(struct rte_idxd_hw_desc) != 64);
200 	RTE_BUILD_BUG_ON(offsetof(struct rte_idxd_hw_desc, size) != 32);
201 	RTE_BUILD_BUG_ON(sizeof(struct rte_idxd_completion) != 32);
202 
203 	if (!name) {
204 		IOAT_PMD_ERR("Invalid name of the device!");
205 		ret = -EINVAL;
206 		goto cleanup;
207 	}
208 
209 	/* Allocate device structure */
210 	rawdev = rte_rawdev_pmd_allocate(name, sizeof(struct idxd_rawdev),
211 					 dev->numa_node);
212 	if (rawdev == NULL) {
213 		IOAT_PMD_ERR("Unable to allocate raw device");
214 		ret = -ENOMEM;
215 		goto cleanup;
216 	}
217 
218 	snprintf(mz_name, sizeof(mz_name), "rawdev%u_private", rawdev->dev_id);
219 	mz = rte_memzone_reserve(mz_name, sizeof(struct idxd_rawdev),
220 			dev->numa_node, RTE_MEMZONE_IOVA_CONTIG);
221 	if (mz == NULL) {
222 		IOAT_PMD_ERR("Unable to reserve memzone for private data\n");
223 		ret = -ENOMEM;
224 		goto cleanup;
225 	}
226 	rawdev->dev_private = mz->addr;
227 	rawdev->dev_ops = ops;
228 	rawdev->device = dev;
229 	rawdev->driver_name = IOAT_PMD_RAWDEV_NAME_STR;
230 
231 	idxd = rawdev->dev_private;
232 	*idxd = *base_idxd; /* copy over the main fields already passed in */
233 	idxd->public.type = RTE_IDXD_DEV;
234 	idxd->rawdev = rawdev;
235 	idxd->mz = mz;
236 
237 	return 0;
238 
239 cleanup:
240 	if (rawdev)
241 		rte_rawdev_pmd_release(rawdev);
242 
243 	return ret;
244 }
245