xref: /dpdk/drivers/net/cnxk/cnxk_ethdev_sec.c (revision 29fd052d)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 
5 #include <cnxk_ethdev.h>
6 
7 #define CNXK_NIX_INL_SELFTEST	      "selftest"
8 #define CNXK_NIX_INL_IPSEC_IN_MIN_SPI "ipsec_in_min_spi"
9 #define CNXK_NIX_INL_IPSEC_IN_MAX_SPI "ipsec_in_max_spi"
10 #define CNXK_INL_CPT_CHANNEL	      "inl_cpt_channel"
11 
12 struct inl_cpt_channel {
13 	bool is_multi_channel;
14 	uint16_t channel;
15 	uint16_t mask;
16 };
17 
18 #define CNXK_NIX_INL_DEV_NAME RTE_STR(cnxk_nix_inl_dev_)
19 #define CNXK_NIX_INL_DEV_NAME_LEN                                              \
20 	(sizeof(CNXK_NIX_INL_DEV_NAME) + PCI_PRI_STR_SIZE)
21 
22 static inline int
23 bitmap_ctzll(uint64_t slab)
24 {
25 	if (slab == 0)
26 		return 0;
27 
28 	return __builtin_ctzll(slab);
29 }
30 
31 int
32 cnxk_eth_outb_sa_idx_get(struct cnxk_eth_dev *dev, uint32_t *idx_p)
33 {
34 	uint32_t pos, idx;
35 	uint64_t slab;
36 	int rc;
37 
38 	if (!dev->outb.sa_bmap)
39 		return -ENOTSUP;
40 
41 	pos = 0;
42 	slab = 0;
43 	/* Scan from the beginning */
44 	plt_bitmap_scan_init(dev->outb.sa_bmap);
45 	/* Scan bitmap to get the free sa index */
46 	rc = plt_bitmap_scan(dev->outb.sa_bmap, &pos, &slab);
47 	/* Empty bitmap */
48 	if (rc == 0) {
49 		plt_err("Outbound SA' exhausted, use 'ipsec_out_max_sa' "
50 			"devargs to increase");
51 		return -ERANGE;
52 	}
53 
54 	/* Get free SA index */
55 	idx = pos + bitmap_ctzll(slab);
56 	plt_bitmap_clear(dev->outb.sa_bmap, idx);
57 	*idx_p = idx;
58 	return 0;
59 }
60 
61 int
62 cnxk_eth_outb_sa_idx_put(struct cnxk_eth_dev *dev, uint32_t idx)
63 {
64 	if (idx >= dev->outb.max_sa)
65 		return -EINVAL;
66 
67 	/* Check if it is already free */
68 	if (plt_bitmap_get(dev->outb.sa_bmap, idx))
69 		return -EINVAL;
70 
71 	/* Mark index as free */
72 	plt_bitmap_set(dev->outb.sa_bmap, idx);
73 	return 0;
74 }
75 
76 struct cnxk_eth_sec_sess *
77 cnxk_eth_sec_sess_get_by_spi(struct cnxk_eth_dev *dev, uint32_t spi, bool inb)
78 {
79 	struct cnxk_eth_sec_sess_list *list;
80 	struct cnxk_eth_sec_sess *eth_sec;
81 
82 	list = inb ? &dev->inb.list : &dev->outb.list;
83 	TAILQ_FOREACH(eth_sec, list, entry) {
84 		if (eth_sec->spi == spi)
85 			return eth_sec;
86 	}
87 
88 	return NULL;
89 }
90 
91 struct cnxk_eth_sec_sess *
92 cnxk_eth_sec_sess_get_by_sess(struct cnxk_eth_dev *dev,
93 			      struct rte_security_session *sess)
94 {
95 	struct cnxk_eth_sec_sess *eth_sec = NULL;
96 
97 	/* Search in inbound list */
98 	TAILQ_FOREACH(eth_sec, &dev->inb.list, entry) {
99 		if (eth_sec->sess == sess)
100 			return eth_sec;
101 	}
102 
103 	/* Search in outbound list */
104 	TAILQ_FOREACH(eth_sec, &dev->outb.list, entry) {
105 		if (eth_sec->sess == sess)
106 			return eth_sec;
107 	}
108 
109 	return NULL;
110 }
111 
112 static unsigned int
113 cnxk_eth_sec_session_get_size(void *device __rte_unused)
114 {
115 	return sizeof(struct cnxk_eth_sec_sess);
116 }
117 
118 struct rte_security_ops cnxk_eth_sec_ops = {
119 	.session_get_size = cnxk_eth_sec_session_get_size
120 };
121 
122 static int
123 parse_ipsec_in_spi_range(const char *key, const char *value, void *extra_args)
124 {
125 	RTE_SET_USED(key);
126 	uint32_t val;
127 
128 	errno = 0;
129 	val = strtoul(value, NULL, 0);
130 	if (errno)
131 		val = 0;
132 
133 	*(uint32_t *)extra_args = val;
134 
135 	return 0;
136 }
137 
138 static int
139 parse_selftest(const char *key, const char *value, void *extra_args)
140 {
141 	RTE_SET_USED(key);
142 	uint32_t val;
143 
144 	val = atoi(value);
145 
146 	*(uint8_t *)extra_args = !!(val == 1);
147 	return 0;
148 }
149 
150 static int
151 parse_inl_cpt_channel(const char *key, const char *value, void *extra_args)
152 {
153 	RTE_SET_USED(key);
154 	uint16_t chan = 0, mask = 0;
155 	char *next = 0;
156 
157 	/* next will point to the separator '/' */
158 	chan = strtol(value, &next, 16);
159 	mask = strtol(++next, 0, 16);
160 
161 	if (chan > GENMASK(12, 0) || mask > GENMASK(12, 0))
162 		return -EINVAL;
163 
164 	((struct inl_cpt_channel *)extra_args)->channel = chan;
165 	((struct inl_cpt_channel *)extra_args)->mask = mask;
166 	((struct inl_cpt_channel *)extra_args)->is_multi_channel = true;
167 
168 	return 0;
169 }
170 
171 static int
172 nix_inl_parse_devargs(struct rte_devargs *devargs,
173 		      struct roc_nix_inl_dev *inl_dev)
174 {
175 	uint32_t ipsec_in_max_spi = BIT(8) - 1;
176 	uint32_t ipsec_in_min_spi = 0;
177 	struct inl_cpt_channel cpt_channel;
178 	struct rte_kvargs *kvlist;
179 	uint8_t selftest = 0;
180 
181 	memset(&cpt_channel, 0, sizeof(cpt_channel));
182 
183 	if (devargs == NULL)
184 		goto null_devargs;
185 
186 	kvlist = rte_kvargs_parse(devargs->args, NULL);
187 	if (kvlist == NULL)
188 		goto exit;
189 
190 	rte_kvargs_process(kvlist, CNXK_NIX_INL_SELFTEST, &parse_selftest,
191 			   &selftest);
192 	rte_kvargs_process(kvlist, CNXK_NIX_INL_IPSEC_IN_MIN_SPI,
193 			   &parse_ipsec_in_spi_range, &ipsec_in_min_spi);
194 	rte_kvargs_process(kvlist, CNXK_NIX_INL_IPSEC_IN_MAX_SPI,
195 			   &parse_ipsec_in_spi_range, &ipsec_in_max_spi);
196 	rte_kvargs_process(kvlist, CNXK_INL_CPT_CHANNEL, &parse_inl_cpt_channel,
197 			   &cpt_channel);
198 	rte_kvargs_free(kvlist);
199 
200 null_devargs:
201 	inl_dev->ipsec_in_min_spi = ipsec_in_min_spi;
202 	inl_dev->ipsec_in_max_spi = ipsec_in_max_spi;
203 	inl_dev->selftest = selftest;
204 	inl_dev->channel = cpt_channel.channel;
205 	inl_dev->chan_mask = cpt_channel.mask;
206 	inl_dev->is_multi_channel = cpt_channel.is_multi_channel;
207 	return 0;
208 exit:
209 	return -EINVAL;
210 }
211 
212 static inline char *
213 nix_inl_dev_to_name(struct rte_pci_device *pci_dev, char *name)
214 {
215 	snprintf(name, CNXK_NIX_INL_DEV_NAME_LEN,
216 		 CNXK_NIX_INL_DEV_NAME PCI_PRI_FMT, pci_dev->addr.domain,
217 		 pci_dev->addr.bus, pci_dev->addr.devid,
218 		 pci_dev->addr.function);
219 
220 	return name;
221 }
222 
223 static int
224 cnxk_nix_inl_dev_remove(struct rte_pci_device *pci_dev)
225 {
226 	char name[CNXK_NIX_INL_DEV_NAME_LEN];
227 	const struct rte_memzone *mz;
228 	struct roc_nix_inl_dev *dev;
229 	int rc;
230 
231 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
232 		return 0;
233 
234 	mz = rte_memzone_lookup(nix_inl_dev_to_name(pci_dev, name));
235 	if (!mz)
236 		return 0;
237 
238 	dev = mz->addr;
239 
240 	/* Cleanup inline dev */
241 	rc = roc_nix_inl_dev_fini(dev);
242 	if (rc) {
243 		plt_err("Failed to cleanup inl dev, rc=%d(%s)", rc,
244 			roc_error_msg_get(rc));
245 		return rc;
246 	}
247 
248 	rte_memzone_free(mz);
249 	return 0;
250 }
251 
252 static int
253 cnxk_nix_inl_dev_probe(struct rte_pci_driver *pci_drv,
254 		       struct rte_pci_device *pci_dev)
255 {
256 	char name[CNXK_NIX_INL_DEV_NAME_LEN];
257 	struct roc_nix_inl_dev *inl_dev;
258 	const struct rte_memzone *mz;
259 	int rc = -ENOMEM;
260 
261 	RTE_SET_USED(pci_drv);
262 
263 	rc = roc_plt_init();
264 	if (rc) {
265 		plt_err("Failed to initialize platform model, rc=%d", rc);
266 		return rc;
267 	}
268 
269 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
270 		return 0;
271 
272 	mz = rte_memzone_reserve_aligned(nix_inl_dev_to_name(pci_dev, name),
273 					 sizeof(*inl_dev), SOCKET_ID_ANY, 0,
274 					 RTE_CACHE_LINE_SIZE);
275 	if (mz == NULL)
276 		return rc;
277 
278 	inl_dev = mz->addr;
279 	inl_dev->pci_dev = pci_dev;
280 
281 	/* Parse devargs string */
282 	rc = nix_inl_parse_devargs(pci_dev->device.devargs, inl_dev);
283 	if (rc) {
284 		plt_err("Failed to parse devargs rc=%d", rc);
285 		goto free_mem;
286 	}
287 
288 	inl_dev->attach_cptlf = true;
289 	/* WQE skip is one for DPDK */
290 	inl_dev->wqe_skip = true;
291 	inl_dev->set_soft_exp_poll = true;
292 	rc = roc_nix_inl_dev_init(inl_dev);
293 	if (rc) {
294 		plt_err("Failed to init nix inl device, rc=%d(%s)", rc,
295 			roc_error_msg_get(rc));
296 		goto free_mem;
297 	}
298 
299 	return 0;
300 free_mem:
301 	rte_memzone_free(mz);
302 	return rc;
303 }
304 
305 static const struct rte_pci_id cnxk_nix_inl_pci_map[] = {
306 	{RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CNXK_RVU_NIX_INL_PF)},
307 	{RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CNXK_RVU_NIX_INL_VF)},
308 	{
309 		.vendor_id = 0,
310 	},
311 };
312 
313 static struct rte_pci_driver cnxk_nix_inl_pci = {
314 	.id_table = cnxk_nix_inl_pci_map,
315 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA,
316 	.probe = cnxk_nix_inl_dev_probe,
317 	.remove = cnxk_nix_inl_dev_remove,
318 };
319 
320 RTE_PMD_REGISTER_PCI(cnxk_nix_inl, cnxk_nix_inl_pci);
321 RTE_PMD_REGISTER_PCI_TABLE(cnxk_nix_inl, cnxk_nix_inl_pci_map);
322 RTE_PMD_REGISTER_KMOD_DEP(cnxk_nix_inl, "vfio-pci");
323 
324 RTE_PMD_REGISTER_PARAM_STRING(cnxk_nix_inl,
325 			      CNXK_NIX_INL_SELFTEST "=1"
326 			      CNXK_NIX_INL_IPSEC_IN_MAX_SPI "=<1-65535>"
327 			      CNXK_INL_CPT_CHANNEL "=<1-4095>/<1-4095>");
328