xref: /dpdk/drivers/net/cnxk/cnxk_ethdev_sec.c (revision 8efa348e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 
5 #include <cnxk_ethdev.h>
6 
7 #define CNXK_NIX_INL_SELFTEST	      "selftest"
8 #define CNXK_NIX_INL_IPSEC_IN_MIN_SPI "ipsec_in_min_spi"
9 #define CNXK_NIX_INL_IPSEC_IN_MAX_SPI "ipsec_in_max_spi"
10 #define CNXK_INL_CPT_CHANNEL	      "inl_cpt_channel"
11 
12 struct inl_cpt_channel {
13 	bool is_multi_channel;
14 	uint16_t channel;
15 	uint16_t mask;
16 };
17 
18 #define CNXK_NIX_INL_DEV_NAME RTE_STR(cnxk_nix_inl_dev_)
19 #define CNXK_NIX_INL_DEV_NAME_LEN                                              \
20 	(sizeof(CNXK_NIX_INL_DEV_NAME) + PCI_PRI_STR_SIZE)
21 
22 static inline int
bitmap_ctzll(uint64_t slab)23 bitmap_ctzll(uint64_t slab)
24 {
25 	if (slab == 0)
26 		return 0;
27 
28 	return __builtin_ctzll(slab);
29 }
30 
31 int
cnxk_eth_outb_sa_idx_get(struct cnxk_eth_dev * dev,uint32_t * idx_p,uint32_t spi)32 cnxk_eth_outb_sa_idx_get(struct cnxk_eth_dev *dev, uint32_t *idx_p,
33 			 uint32_t spi)
34 {
35 	uint32_t pos, idx;
36 	uint64_t slab;
37 	int rc;
38 
39 	if (!dev->outb.sa_bmap)
40 		return -ENOTSUP;
41 
42 	pos = 0;
43 	slab = 0;
44 	/* Scan from the beginning */
45 	plt_bitmap_scan_init(dev->outb.sa_bmap);
46 
47 	if (dev->nix.custom_sa_action) {
48 		if (spi > dev->outb.max_sa)
49 			return -ENOTSUP;
50 		idx = spi;
51 	} else {
52 		/* Scan bitmap to get the free sa index */
53 		rc = plt_bitmap_scan(dev->outb.sa_bmap, &pos, &slab);
54 		/* Empty bitmap */
55 		if (rc == 0) {
56 			plt_err("Outbound SA' exhausted, use 'ipsec_out_max_sa' "
57 				"devargs to increase");
58 			return -ERANGE;
59 		}
60 
61 		/* Get free SA index */
62 		idx = pos + bitmap_ctzll(slab);
63 	}
64 	plt_bitmap_clear(dev->outb.sa_bmap, idx);
65 	*idx_p = idx;
66 	return 0;
67 }
68 
69 int
cnxk_eth_outb_sa_idx_put(struct cnxk_eth_dev * dev,uint32_t idx)70 cnxk_eth_outb_sa_idx_put(struct cnxk_eth_dev *dev, uint32_t idx)
71 {
72 	if (idx >= dev->outb.max_sa)
73 		return -EINVAL;
74 
75 	/* Check if it is already free */
76 	if (plt_bitmap_get(dev->outb.sa_bmap, idx))
77 		return -EINVAL;
78 
79 	/* Mark index as free */
80 	plt_bitmap_set(dev->outb.sa_bmap, idx);
81 	return 0;
82 }
83 
84 struct cnxk_eth_sec_sess *
cnxk_eth_sec_sess_get_by_spi(struct cnxk_eth_dev * dev,uint32_t spi,bool inb)85 cnxk_eth_sec_sess_get_by_spi(struct cnxk_eth_dev *dev, uint32_t spi, bool inb)
86 {
87 	struct cnxk_eth_sec_sess_list *list;
88 	struct cnxk_eth_sec_sess *eth_sec;
89 
90 	list = inb ? &dev->inb.list : &dev->outb.list;
91 	TAILQ_FOREACH(eth_sec, list, entry) {
92 		if (eth_sec->spi == spi)
93 			return eth_sec;
94 	}
95 
96 	return NULL;
97 }
98 
99 struct cnxk_eth_sec_sess *
cnxk_eth_sec_sess_get_by_sess(struct cnxk_eth_dev * dev,struct rte_security_session * sess)100 cnxk_eth_sec_sess_get_by_sess(struct cnxk_eth_dev *dev,
101 			      struct rte_security_session *sess)
102 {
103 	struct cnxk_eth_sec_sess *eth_sec = NULL;
104 
105 	/* Search in inbound list */
106 	TAILQ_FOREACH(eth_sec, &dev->inb.list, entry) {
107 		if (eth_sec->sess == sess)
108 			return eth_sec;
109 	}
110 
111 	/* Search in outbound list */
112 	TAILQ_FOREACH(eth_sec, &dev->outb.list, entry) {
113 		if (eth_sec->sess == sess)
114 			return eth_sec;
115 	}
116 
117 	return NULL;
118 }
119 
120 static unsigned int
cnxk_eth_sec_session_get_size(void * device __rte_unused)121 cnxk_eth_sec_session_get_size(void *device __rte_unused)
122 {
123 	return sizeof(struct cnxk_eth_sec_sess);
124 }
125 
126 struct rte_security_ops cnxk_eth_sec_ops = {
127 	.session_get_size = cnxk_eth_sec_session_get_size
128 };
129 
130 static int
parse_ipsec_in_spi_range(const char * key,const char * value,void * extra_args)131 parse_ipsec_in_spi_range(const char *key, const char *value, void *extra_args)
132 {
133 	RTE_SET_USED(key);
134 	uint32_t val;
135 
136 	errno = 0;
137 	val = strtoul(value, NULL, 0);
138 	if (errno)
139 		val = 0;
140 
141 	*(uint32_t *)extra_args = val;
142 
143 	return 0;
144 }
145 
146 static int
parse_selftest(const char * key,const char * value,void * extra_args)147 parse_selftest(const char *key, const char *value, void *extra_args)
148 {
149 	RTE_SET_USED(key);
150 	uint32_t val;
151 
152 	val = atoi(value);
153 
154 	*(uint8_t *)extra_args = !!(val == 1);
155 	return 0;
156 }
157 
158 static int
parse_inl_cpt_channel(const char * key,const char * value,void * extra_args)159 parse_inl_cpt_channel(const char *key, const char *value, void *extra_args)
160 {
161 	RTE_SET_USED(key);
162 	uint16_t chan = 0, mask = 0;
163 	char *next = 0;
164 
165 	/* next will point to the separator '/' */
166 	chan = strtol(value, &next, 16);
167 	mask = strtol(++next, 0, 16);
168 
169 	if (chan > GENMASK(12, 0) || mask > GENMASK(12, 0))
170 		return -EINVAL;
171 
172 	((struct inl_cpt_channel *)extra_args)->channel = chan;
173 	((struct inl_cpt_channel *)extra_args)->mask = mask;
174 	((struct inl_cpt_channel *)extra_args)->is_multi_channel = true;
175 
176 	return 0;
177 }
178 
179 static int
nix_inl_parse_devargs(struct rte_devargs * devargs,struct roc_nix_inl_dev * inl_dev)180 nix_inl_parse_devargs(struct rte_devargs *devargs,
181 		      struct roc_nix_inl_dev *inl_dev)
182 {
183 	uint32_t ipsec_in_max_spi = BIT(8) - 1;
184 	uint32_t ipsec_in_min_spi = 0;
185 	struct inl_cpt_channel cpt_channel;
186 	struct rte_kvargs *kvlist;
187 	uint8_t selftest = 0;
188 
189 	memset(&cpt_channel, 0, sizeof(cpt_channel));
190 
191 	if (devargs == NULL)
192 		goto null_devargs;
193 
194 	kvlist = rte_kvargs_parse(devargs->args, NULL);
195 	if (kvlist == NULL)
196 		goto exit;
197 
198 	rte_kvargs_process(kvlist, CNXK_NIX_INL_SELFTEST, &parse_selftest,
199 			   &selftest);
200 	rte_kvargs_process(kvlist, CNXK_NIX_INL_IPSEC_IN_MIN_SPI,
201 			   &parse_ipsec_in_spi_range, &ipsec_in_min_spi);
202 	rte_kvargs_process(kvlist, CNXK_NIX_INL_IPSEC_IN_MAX_SPI,
203 			   &parse_ipsec_in_spi_range, &ipsec_in_max_spi);
204 	rte_kvargs_process(kvlist, CNXK_INL_CPT_CHANNEL, &parse_inl_cpt_channel,
205 			   &cpt_channel);
206 	rte_kvargs_free(kvlist);
207 
208 null_devargs:
209 	inl_dev->ipsec_in_min_spi = ipsec_in_min_spi;
210 	inl_dev->ipsec_in_max_spi = ipsec_in_max_spi;
211 	inl_dev->selftest = selftest;
212 	inl_dev->channel = cpt_channel.channel;
213 	inl_dev->chan_mask = cpt_channel.mask;
214 	inl_dev->is_multi_channel = cpt_channel.is_multi_channel;
215 	return 0;
216 exit:
217 	return -EINVAL;
218 }
219 
220 static inline char *
nix_inl_dev_to_name(struct rte_pci_device * pci_dev,char * name)221 nix_inl_dev_to_name(struct rte_pci_device *pci_dev, char *name)
222 {
223 	snprintf(name, CNXK_NIX_INL_DEV_NAME_LEN,
224 		 CNXK_NIX_INL_DEV_NAME PCI_PRI_FMT, pci_dev->addr.domain,
225 		 pci_dev->addr.bus, pci_dev->addr.devid,
226 		 pci_dev->addr.function);
227 
228 	return name;
229 }
230 
231 static int
cnxk_nix_inl_dev_remove(struct rte_pci_device * pci_dev)232 cnxk_nix_inl_dev_remove(struct rte_pci_device *pci_dev)
233 {
234 	char name[CNXK_NIX_INL_DEV_NAME_LEN];
235 	const struct rte_memzone *mz;
236 	struct roc_nix_inl_dev *dev;
237 	int rc;
238 
239 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
240 		return 0;
241 
242 	mz = rte_memzone_lookup(nix_inl_dev_to_name(pci_dev, name));
243 	if (!mz)
244 		return 0;
245 
246 	dev = mz->addr;
247 
248 	/* Cleanup inline dev */
249 	rc = roc_nix_inl_dev_fini(dev);
250 	if (rc) {
251 		plt_err("Failed to cleanup inl dev, rc=%d(%s)", rc,
252 			roc_error_msg_get(rc));
253 		return rc;
254 	}
255 
256 	rte_memzone_free(mz);
257 	return 0;
258 }
259 
260 static int
cnxk_nix_inl_dev_probe(struct rte_pci_driver * pci_drv,struct rte_pci_device * pci_dev)261 cnxk_nix_inl_dev_probe(struct rte_pci_driver *pci_drv,
262 		       struct rte_pci_device *pci_dev)
263 {
264 	char name[CNXK_NIX_INL_DEV_NAME_LEN];
265 	struct roc_nix_inl_dev *inl_dev;
266 	const struct rte_memzone *mz;
267 	int rc = -ENOMEM;
268 
269 	RTE_SET_USED(pci_drv);
270 
271 	rc = roc_plt_init();
272 	if (rc) {
273 		plt_err("Failed to initialize platform model, rc=%d", rc);
274 		return rc;
275 	}
276 
277 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
278 		return 0;
279 
280 	mz = rte_memzone_reserve_aligned(nix_inl_dev_to_name(pci_dev, name),
281 					 sizeof(*inl_dev), SOCKET_ID_ANY, 0,
282 					 RTE_CACHE_LINE_SIZE);
283 	if (mz == NULL)
284 		return rc;
285 
286 	inl_dev = mz->addr;
287 	inl_dev->pci_dev = pci_dev;
288 
289 	/* Parse devargs string */
290 	rc = nix_inl_parse_devargs(pci_dev->device.devargs, inl_dev);
291 	if (rc) {
292 		plt_err("Failed to parse devargs rc=%d", rc);
293 		goto free_mem;
294 	}
295 
296 	inl_dev->attach_cptlf = true;
297 	/* WQE skip is one for DPDK */
298 	inl_dev->wqe_skip = true;
299 	inl_dev->set_soft_exp_poll = true;
300 	rc = roc_nix_inl_dev_init(inl_dev);
301 	if (rc) {
302 		plt_err("Failed to init nix inl device, rc=%d(%s)", rc,
303 			roc_error_msg_get(rc));
304 		goto free_mem;
305 	}
306 
307 	return 0;
308 free_mem:
309 	rte_memzone_free(mz);
310 	return rc;
311 }
312 
313 static const struct rte_pci_id cnxk_nix_inl_pci_map[] = {
314 	{RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CNXK_RVU_NIX_INL_PF)},
315 	{RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CNXK_RVU_NIX_INL_VF)},
316 	{
317 		.vendor_id = 0,
318 	},
319 };
320 
321 static struct rte_pci_driver cnxk_nix_inl_pci = {
322 	.id_table = cnxk_nix_inl_pci_map,
323 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA,
324 	.probe = cnxk_nix_inl_dev_probe,
325 	.remove = cnxk_nix_inl_dev_remove,
326 };
327 
328 RTE_PMD_REGISTER_PCI(cnxk_nix_inl, cnxk_nix_inl_pci);
329 RTE_PMD_REGISTER_PCI_TABLE(cnxk_nix_inl, cnxk_nix_inl_pci_map);
330 RTE_PMD_REGISTER_KMOD_DEP(cnxk_nix_inl, "vfio-pci");
331 
332 RTE_PMD_REGISTER_PARAM_STRING(cnxk_nix_inl,
333 			      CNXK_NIX_INL_SELFTEST "=1"
334 			      CNXK_NIX_INL_IPSEC_IN_MAX_SPI "=<1-65535>"
335 			      CNXK_INL_CPT_CHANNEL "=<1-4095>/<1-4095>");
336