xref: /f-stack/dpdk/drivers/bus/dpaa/dpaa_bus.c (revision 2d9fd380)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright 2017-2020 NXP
4  *
5  */
6 /* System headers */
7 #include <stdio.h>
8 #include <inttypes.h>
9 #include <unistd.h>
10 #include <limits.h>
11 #include <sched.h>
12 #include <signal.h>
13 #include <pthread.h>
14 #include <sys/types.h>
15 #include <sys/syscall.h>
16 #include <sys/eventfd.h>
17 
18 #include <rte_byteorder.h>
19 #include <rte_common.h>
20 #include <rte_interrupts.h>
21 #include <rte_log.h>
22 #include <rte_debug.h>
23 #include <rte_atomic.h>
24 #include <rte_branch_prediction.h>
25 #include <rte_memory.h>
26 #include <rte_tailq.h>
27 #include <rte_eal.h>
28 #include <rte_alarm.h>
29 #include <rte_ether.h>
30 #include <rte_ethdev_driver.h>
31 #include <rte_malloc.h>
32 #include <rte_ring.h>
33 #include <rte_bus.h>
34 #include <rte_mbuf_pool_ops.h>
35 #include <rte_mbuf_dyn.h>
36 
37 #include <dpaa_of.h>
38 #include <rte_dpaa_bus.h>
39 #include <rte_dpaa_logs.h>
40 #include <dpaax_iova_table.h>
41 
42 #include <fsl_usd.h>
43 #include <fsl_qman.h>
44 #include <fsl_bman.h>
45 #include <netcfg.h>
46 
47 static struct rte_dpaa_bus rte_dpaa_bus;
48 struct netcfg_info *dpaa_netcfg;
49 
50 /* define a variable to hold the portal_key, once created.*/
51 static pthread_key_t dpaa_portal_key;
52 
53 unsigned int dpaa_svr_family;
54 
55 #define FSL_DPAA_BUS_NAME	dpaa_bus
56 
57 RTE_DEFINE_PER_LCORE(struct dpaa_portal *, dpaa_io);
58 
59 #define DPAA_SEQN_DYNFIELD_NAME "dpaa_seqn_dynfield"
60 int dpaa_seqn_dynfield_offset = -1;
61 
62 struct fm_eth_port_cfg *
dpaa_get_eth_port_cfg(int dev_id)63 dpaa_get_eth_port_cfg(int dev_id)
64 {
65 	return &dpaa_netcfg->port_cfg[dev_id];
66 }
67 
68 static int
compare_dpaa_devices(struct rte_dpaa_device * dev1,struct rte_dpaa_device * dev2)69 compare_dpaa_devices(struct rte_dpaa_device *dev1,
70 		     struct rte_dpaa_device *dev2)
71 {
72 	int comp = 0;
73 
74 	/* Segragating ETH from SEC devices */
75 	if (dev1->device_type > dev2->device_type)
76 		comp = 1;
77 	else if (dev1->device_type < dev2->device_type)
78 		comp = -1;
79 	else
80 		comp = 0;
81 
82 	if ((comp != 0) || (dev1->device_type != FSL_DPAA_ETH))
83 		return comp;
84 
85 	if (dev1->id.fman_id > dev2->id.fman_id) {
86 		comp = 1;
87 	} else if (dev1->id.fman_id < dev2->id.fman_id) {
88 		comp = -1;
89 	} else {
90 		/* FMAN ids match, check for mac_id */
91 		if (dev1->id.mac_id > dev2->id.mac_id)
92 			comp = 1;
93 		else if (dev1->id.mac_id < dev2->id.mac_id)
94 			comp = -1;
95 		else
96 			comp = 0;
97 	}
98 
99 	return comp;
100 }
101 
102 static inline void
dpaa_add_to_device_list(struct rte_dpaa_device * newdev)103 dpaa_add_to_device_list(struct rte_dpaa_device *newdev)
104 {
105 	int comp, inserted = 0;
106 	struct rte_dpaa_device *dev = NULL;
107 	struct rte_dpaa_device *tdev = NULL;
108 
109 	TAILQ_FOREACH_SAFE(dev, &rte_dpaa_bus.device_list, next, tdev) {
110 		comp = compare_dpaa_devices(newdev, dev);
111 		if (comp < 0) {
112 			TAILQ_INSERT_BEFORE(dev, newdev, next);
113 			inserted = 1;
114 			break;
115 		}
116 	}
117 
118 	if (!inserted)
119 		TAILQ_INSERT_TAIL(&rte_dpaa_bus.device_list, newdev, next);
120 }
121 
122 /*
123  * Reads the SEC device from DTS
124  * Returns -1 if SEC devices not available, 0 otherwise
125  */
126 static inline int
dpaa_sec_available(void)127 dpaa_sec_available(void)
128 {
129 	const struct device_node *caam_node;
130 
131 	for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
132 		return 0;
133 	}
134 
135 	return -1;
136 }
137 
138 static void dpaa_clean_device_list(void);
139 
140 static struct rte_devargs *
dpaa_devargs_lookup(struct rte_dpaa_device * dev)141 dpaa_devargs_lookup(struct rte_dpaa_device *dev)
142 {
143 	struct rte_devargs *devargs;
144 	char dev_name[32];
145 
146 	RTE_EAL_DEVARGS_FOREACH("dpaa_bus", devargs) {
147 		devargs->bus->parse(devargs->name, &dev_name);
148 		if (strcmp(dev_name, dev->device.name) == 0) {
149 			DPAA_BUS_INFO("**Devargs matched %s", dev_name);
150 			return devargs;
151 		}
152 	}
153 	return NULL;
154 }
155 
156 static int
dpaa_create_device_list(void)157 dpaa_create_device_list(void)
158 {
159 	int i;
160 	int ret;
161 	struct rte_dpaa_device *dev;
162 	struct fm_eth_port_cfg *cfg;
163 	struct fman_if *fman_intf;
164 
165 	/* Creating Ethernet Devices */
166 	for (i = 0; i < dpaa_netcfg->num_ethports; i++) {
167 		dev = calloc(1, sizeof(struct rte_dpaa_device));
168 		if (!dev) {
169 			DPAA_BUS_LOG(ERR, "Failed to allocate ETH devices");
170 			ret = -ENOMEM;
171 			goto cleanup;
172 		}
173 
174 		dev->device.bus = &rte_dpaa_bus.bus;
175 
176 		cfg = &dpaa_netcfg->port_cfg[i];
177 		fman_intf = cfg->fman_if;
178 
179 		/* Device identifiers */
180 		dev->id.fman_id = fman_intf->fman_idx + 1;
181 		dev->id.mac_id = fman_intf->mac_idx;
182 		dev->device_type = FSL_DPAA_ETH;
183 		dev->id.dev_id = i;
184 
185 		/* Create device name */
186 		memset(dev->name, 0, RTE_ETH_NAME_MAX_LEN);
187 		sprintf(dev->name, "fm%d-mac%d", (fman_intf->fman_idx + 1),
188 			fman_intf->mac_idx);
189 		DPAA_BUS_LOG(INFO, "%s netdev added", dev->name);
190 		dev->device.name = dev->name;
191 		dev->device.devargs = dpaa_devargs_lookup(dev);
192 
193 		dpaa_add_to_device_list(dev);
194 	}
195 
196 	rte_dpaa_bus.device_count = i;
197 
198 	/* Unlike case of ETH, RTE_LIBRTE_DPAA_MAX_CRYPTODEV SEC devices are
199 	 * constantly created only if "sec" property is found in the device
200 	 * tree. Logically there is no limit for number of devices (QI
201 	 * interfaces) that can be created.
202 	 */
203 
204 	if (dpaa_sec_available()) {
205 		DPAA_BUS_LOG(INFO, "DPAA SEC devices are not available");
206 		return 0;
207 	}
208 
209 	/* Creating SEC Devices */
210 	for (i = 0; i < RTE_LIBRTE_DPAA_MAX_CRYPTODEV; i++) {
211 		dev = calloc(1, sizeof(struct rte_dpaa_device));
212 		if (!dev) {
213 			DPAA_BUS_LOG(ERR, "Failed to allocate SEC devices");
214 			ret = -1;
215 			goto cleanup;
216 		}
217 
218 		dev->device_type = FSL_DPAA_CRYPTO;
219 		dev->id.dev_id = rte_dpaa_bus.device_count + i;
220 
221 		/* Even though RTE_CRYPTODEV_NAME_MAX_LEN is valid length of
222 		 * crypto PMD, using RTE_ETH_NAME_MAX_LEN as that is the size
223 		 * allocated for dev->name/
224 		 */
225 		memset(dev->name, 0, RTE_ETH_NAME_MAX_LEN);
226 		sprintf(dev->name, "dpaa_sec-%d", i+1);
227 		DPAA_BUS_LOG(INFO, "%s cryptodev added", dev->name);
228 		dev->device.name = dev->name;
229 		dev->device.devargs = dpaa_devargs_lookup(dev);
230 
231 		dpaa_add_to_device_list(dev);
232 	}
233 
234 	rte_dpaa_bus.device_count += i;
235 
236 	return 0;
237 
238 cleanup:
239 	dpaa_clean_device_list();
240 	return ret;
241 }
242 
243 static void
dpaa_clean_device_list(void)244 dpaa_clean_device_list(void)
245 {
246 	struct rte_dpaa_device *dev = NULL;
247 	struct rte_dpaa_device *tdev = NULL;
248 
249 	TAILQ_FOREACH_SAFE(dev, &rte_dpaa_bus.device_list, next, tdev) {
250 		TAILQ_REMOVE(&rte_dpaa_bus.device_list, dev, next);
251 		free(dev);
252 		dev = NULL;
253 	}
254 }
255 
rte_dpaa_portal_init(void * arg)256 int rte_dpaa_portal_init(void *arg)
257 {
258 	static const struct rte_mbuf_dynfield dpaa_seqn_dynfield_desc = {
259 		.name = DPAA_SEQN_DYNFIELD_NAME,
260 		.size = sizeof(dpaa_seqn_t),
261 		.align = __alignof__(dpaa_seqn_t),
262 	};
263 	unsigned int cpu, lcore = rte_lcore_id();
264 	int ret;
265 
266 	BUS_INIT_FUNC_TRACE();
267 
268 	if ((size_t)arg == 1 || lcore == LCORE_ID_ANY)
269 		lcore = rte_get_main_lcore();
270 	else
271 		if (lcore >= RTE_MAX_LCORE)
272 			return -1;
273 
274 	cpu = rte_lcore_to_cpu_id(lcore);
275 
276 	dpaa_seqn_dynfield_offset =
277 		rte_mbuf_dynfield_register(&dpaa_seqn_dynfield_desc);
278 	if (dpaa_seqn_dynfield_offset < 0) {
279 		DPAA_BUS_LOG(ERR, "Failed to register mbuf field for dpaa sequence number\n");
280 		return -rte_errno;
281 	}
282 
283 	/* Initialise bman thread portals */
284 	ret = bman_thread_init();
285 	if (ret) {
286 		DPAA_BUS_LOG(ERR, "bman_thread_init failed on core %u"
287 			     " (lcore=%u) with ret: %d", cpu, lcore, ret);
288 		return ret;
289 	}
290 
291 	DPAA_BUS_LOG(DEBUG, "BMAN thread initialized - CPU=%d lcore=%d",
292 		     cpu, lcore);
293 
294 	/* Initialise qman thread portals */
295 	ret = qman_thread_init();
296 	if (ret) {
297 		DPAA_BUS_LOG(ERR, "qman_thread_init failed on core %u"
298 			    " (lcore=%u) with ret: %d", cpu, lcore, ret);
299 		bman_thread_finish();
300 		return ret;
301 	}
302 
303 	DPAA_BUS_LOG(DEBUG, "QMAN thread initialized - CPU=%d lcore=%d",
304 		     cpu, lcore);
305 
306 	DPAA_PER_LCORE_PORTAL = rte_malloc(NULL, sizeof(struct dpaa_portal),
307 				    RTE_CACHE_LINE_SIZE);
308 	if (!DPAA_PER_LCORE_PORTAL) {
309 		DPAA_BUS_LOG(ERR, "Unable to allocate memory");
310 		bman_thread_finish();
311 		qman_thread_finish();
312 		return -ENOMEM;
313 	}
314 
315 	DPAA_PER_LCORE_PORTAL->qman_idx = qman_get_portal_index();
316 	DPAA_PER_LCORE_PORTAL->bman_idx = bman_get_portal_index();
317 	DPAA_PER_LCORE_PORTAL->tid = syscall(SYS_gettid);
318 
319 	ret = pthread_setspecific(dpaa_portal_key,
320 				  (void *)DPAA_PER_LCORE_PORTAL);
321 	if (ret) {
322 		DPAA_BUS_LOG(ERR, "pthread_setspecific failed on core %u"
323 			     " (lcore=%u) with ret: %d", cpu, lcore, ret);
324 		dpaa_portal_finish(NULL);
325 
326 		return ret;
327 	}
328 
329 	DPAA_BUS_LOG(DEBUG, "QMAN thread initialized");
330 
331 	return 0;
332 }
333 
334 int
rte_dpaa_portal_fq_init(void * arg,struct qman_fq * fq)335 rte_dpaa_portal_fq_init(void *arg, struct qman_fq *fq)
336 {
337 	/* Affine above created portal with channel*/
338 	u32 sdqcr;
339 	int ret;
340 
341 	if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
342 		ret = rte_dpaa_portal_init(arg);
343 		if (ret < 0) {
344 			DPAA_BUS_LOG(ERR, "portal initialization failure");
345 			return ret;
346 		}
347 	}
348 
349 	/* Initialise qman specific portals */
350 	ret = fsl_qman_fq_portal_init(fq->qp);
351 	if (ret) {
352 		DPAA_BUS_LOG(ERR, "Unable to init fq portal");
353 		return -1;
354 	}
355 
356 	sdqcr = QM_SDQCR_CHANNELS_POOL_CONV(fq->ch_id);
357 	qman_static_dequeue_add(sdqcr, fq->qp);
358 
359 	return 0;
360 }
361 
rte_dpaa_portal_fq_close(struct qman_fq * fq)362 int rte_dpaa_portal_fq_close(struct qman_fq *fq)
363 {
364 	return fsl_qman_fq_portal_destroy(fq->qp);
365 }
366 
367 void
dpaa_portal_finish(void * arg)368 dpaa_portal_finish(void *arg)
369 {
370 	struct dpaa_portal *dpaa_io_portal = (struct dpaa_portal *)arg;
371 
372 	if (!dpaa_io_portal) {
373 		DPAA_BUS_LOG(DEBUG, "Portal already cleaned");
374 		return;
375 	}
376 
377 	bman_thread_finish();
378 	qman_thread_finish();
379 
380 	pthread_setspecific(dpaa_portal_key, NULL);
381 
382 	rte_free(dpaa_io_portal);
383 	dpaa_io_portal = NULL;
384 	DPAA_PER_LCORE_PORTAL = NULL;
385 }
386 
387 static int
rte_dpaa_bus_parse(const char * name,void * out_name)388 rte_dpaa_bus_parse(const char *name, void *out_name)
389 {
390 	int i, j;
391 	int max_fman = 2, max_macs = 16;
392 	char *dup_name;
393 	char *sep = NULL;
394 
395 	/* There are two ways of passing device name, with and without
396 	 * separator. "dpaa_bus:fm1-mac3" with separator, and "fm1-mac3"
397 	 * without separator. Both need to be handled.
398 	 * It is also possible that "name=fm1-mac3" is passed along.
399 	 */
400 	DPAA_BUS_DEBUG("Parse device name (%s)", name);
401 
402 	/* Check for dpaa_bus:fm1-mac3 style */
403 	dup_name = strdup(name);
404 	sep = strchr(dup_name, ':');
405 	if (!sep)
406 		/* If not, check for name=fm1-mac3 style */
407 		sep = strchr(dup_name, '=');
408 
409 	if (sep)
410 		/* jump over the seprator */
411 		sep = (char *) (sep + 1);
412 	else
413 		sep = dup_name;
414 
415 	for (i = 0; i < max_fman; i++) {
416 		for (j = 0; j < max_macs; j++) {
417 			char fm_name[16];
418 			snprintf(fm_name, 16, "fm%d-mac%d", i, j);
419 			if (strcmp(fm_name, sep) == 0) {
420 				if (out_name)
421 					strcpy(out_name, sep);
422 				free(dup_name);
423 				return 0;
424 			}
425 		}
426 	}
427 
428 	for (i = 0; i < RTE_LIBRTE_DPAA_MAX_CRYPTODEV; i++) {
429 		char sec_name[16];
430 
431 		snprintf(sec_name, 16, "dpaa_sec-%d", i+1);
432 		if (strcmp(sec_name, sep) == 0) {
433 			if (out_name)
434 				strcpy(out_name, sep);
435 			free(dup_name);
436 			return 0;
437 		}
438 	}
439 
440 	free(dup_name);
441 	return -EINVAL;
442 }
443 
444 #define DPAA_DEV_PATH1 "/sys/devices/platform/soc/soc:fsl,dpaa"
445 #define DPAA_DEV_PATH2 "/sys/devices/platform/fsl,dpaa"
446 
447 static int
rte_dpaa_bus_scan(void)448 rte_dpaa_bus_scan(void)
449 {
450 	int ret;
451 
452 	BUS_INIT_FUNC_TRACE();
453 
454 	if ((access(DPAA_DEV_PATH1, F_OK) != 0) &&
455 	    (access(DPAA_DEV_PATH2, F_OK) != 0)) {
456 		RTE_LOG(DEBUG, EAL, "DPAA Bus not present. Skipping.\n");
457 		return 0;
458 	}
459 
460 	if (rte_dpaa_bus.detected)
461 		return 0;
462 
463 	rte_dpaa_bus.detected = 1;
464 
465 	/* create the key, supplying a function that'll be invoked
466 	 * when a portal affined thread will be deleted.
467 	 */
468 	ret = pthread_key_create(&dpaa_portal_key, dpaa_portal_finish);
469 	if (ret) {
470 		DPAA_BUS_LOG(DEBUG, "Unable to create pthread key. (%d)", ret);
471 		dpaa_clean_device_list();
472 		return ret;
473 	}
474 
475 	return 0;
476 }
477 
478 /* register a dpaa bus based dpaa driver */
479 void
rte_dpaa_driver_register(struct rte_dpaa_driver * driver)480 rte_dpaa_driver_register(struct rte_dpaa_driver *driver)
481 {
482 	RTE_VERIFY(driver);
483 
484 	BUS_INIT_FUNC_TRACE();
485 
486 	TAILQ_INSERT_TAIL(&rte_dpaa_bus.driver_list, driver, next);
487 	/* Update Bus references */
488 	driver->dpaa_bus = &rte_dpaa_bus;
489 }
490 
491 /* un-register a dpaa bus based dpaa driver */
492 void
rte_dpaa_driver_unregister(struct rte_dpaa_driver * driver)493 rte_dpaa_driver_unregister(struct rte_dpaa_driver *driver)
494 {
495 	struct rte_dpaa_bus *dpaa_bus;
496 
497 	BUS_INIT_FUNC_TRACE();
498 
499 	dpaa_bus = driver->dpaa_bus;
500 
501 	TAILQ_REMOVE(&dpaa_bus->driver_list, driver, next);
502 	/* Update Bus references */
503 	driver->dpaa_bus = NULL;
504 }
505 
506 static int
rte_dpaa_device_match(struct rte_dpaa_driver * drv,struct rte_dpaa_device * dev)507 rte_dpaa_device_match(struct rte_dpaa_driver *drv,
508 		      struct rte_dpaa_device *dev)
509 {
510 	if (!drv || !dev) {
511 		DPAA_BUS_DEBUG("Invalid drv or dev received.");
512 		return -1;
513 	}
514 
515 	if (drv->drv_type == dev->device_type)
516 		return 0;
517 
518 	return -1;
519 }
520 
521 static int
rte_dpaa_bus_dev_build(void)522 rte_dpaa_bus_dev_build(void)
523 {
524 	int ret;
525 
526 	/* Load the device-tree driver */
527 	ret = of_init();
528 	if (ret) {
529 		DPAA_BUS_LOG(ERR, "of_init failed with ret: %d", ret);
530 		return -1;
531 	}
532 
533 	/* Get the interface configurations from device-tree */
534 	dpaa_netcfg = netcfg_acquire();
535 	if (!dpaa_netcfg) {
536 		DPAA_BUS_LOG(ERR, "netcfg_acquire failed");
537 		return -EINVAL;
538 	}
539 
540 	RTE_LOG(NOTICE, EAL, "DPAA Bus Detected\n");
541 
542 	if (!dpaa_netcfg->num_ethports) {
543 		DPAA_BUS_LOG(INFO, "no network interfaces available");
544 		/* This is not an error */
545 		return 0;
546 	}
547 
548 #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
549 	dump_netcfg(dpaa_netcfg);
550 #endif
551 
552 	DPAA_BUS_LOG(DEBUG, "Number of ethernet devices = %d",
553 		     dpaa_netcfg->num_ethports);
554 	ret = dpaa_create_device_list();
555 	if (ret) {
556 		DPAA_BUS_LOG(ERR, "Unable to create device list. (%d)", ret);
557 		return ret;
558 	}
559 	return 0;
560 }
561 
rte_dpaa_setup_intr(struct rte_intr_handle * intr_handle)562 static int rte_dpaa_setup_intr(struct rte_intr_handle *intr_handle)
563 {
564 	int fd;
565 
566 	fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
567 	if (fd < 0) {
568 		DPAA_BUS_ERR("Cannot set up eventfd, error %i (%s)",
569 			     errno, strerror(errno));
570 		return errno;
571 	}
572 
573 	intr_handle->fd = fd;
574 	intr_handle->type = RTE_INTR_HANDLE_EXT;
575 
576 	return 0;
577 }
578 
579 static int
rte_dpaa_bus_probe(void)580 rte_dpaa_bus_probe(void)
581 {
582 	int ret = -1;
583 	struct rte_dpaa_device *dev;
584 	struct rte_dpaa_driver *drv;
585 	FILE *svr_file = NULL;
586 	unsigned int svr_ver;
587 	int probe_all = rte_dpaa_bus.bus.conf.scan_mode != RTE_BUS_SCAN_ALLOWLIST;
588 	static int process_once;
589 
590 	/* If DPAA bus is not present nothing needs to be done */
591 	if (!rte_dpaa_bus.detected)
592 		return 0;
593 
594 	/* Device list creation is only done once */
595 	if (!process_once) {
596 		rte_dpaa_bus_dev_build();
597 		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
598 			/* One time load of Qman/Bman drivers */
599 			ret = qman_global_init();
600 			if (ret) {
601 				DPAA_BUS_ERR("QMAN initialization failed: %d",
602 					     ret);
603 				return ret;
604 			}
605 			ret = bman_global_init();
606 			if (ret) {
607 				DPAA_BUS_ERR("BMAN initialization failed: %d",
608 					     ret);
609 				return ret;
610 			}
611 		}
612 	}
613 	process_once = 1;
614 
615 	/* If no device present on DPAA bus nothing needs to be done */
616 	if (TAILQ_EMPTY(&rte_dpaa_bus.device_list))
617 		return 0;
618 
619 	svr_file = fopen(DPAA_SOC_ID_FILE, "r");
620 	if (svr_file) {
621 		if (fscanf(svr_file, "svr:%x", &svr_ver) > 0)
622 			dpaa_svr_family = svr_ver & SVR_MASK;
623 		fclose(svr_file);
624 	}
625 
626 	TAILQ_FOREACH(dev, &rte_dpaa_bus.device_list, next) {
627 		if (dev->device_type == FSL_DPAA_ETH) {
628 			ret = rte_dpaa_setup_intr(&dev->intr_handle);
629 			if (ret)
630 				DPAA_BUS_ERR("Error setting up interrupt.\n");
631 		}
632 	}
633 
634 	/* And initialize the PA->VA translation table */
635 	dpaax_iova_table_populate();
636 
637 	/* For each registered driver, and device, call the driver->probe */
638 	TAILQ_FOREACH(dev, &rte_dpaa_bus.device_list, next) {
639 		TAILQ_FOREACH(drv, &rte_dpaa_bus.driver_list, next) {
640 			ret = rte_dpaa_device_match(drv, dev);
641 			if (ret)
642 				continue;
643 
644 			if (rte_dev_is_probed(&dev->device))
645 				continue;
646 
647 			if (!drv->probe ||
648 			    (dev->device.devargs &&
649 			     dev->device.devargs->policy == RTE_DEV_BLOCKED))
650 				continue;
651 
652 			if (probe_all ||
653 			    (dev->device.devargs &&
654 			     dev->device.devargs->policy == RTE_DEV_ALLOWED)) {
655 				ret = drv->probe(drv, dev);
656 				if (ret) {
657 					DPAA_BUS_ERR("unable to probe:%s",
658 						     dev->name);
659 				} else {
660 					dev->driver = drv;
661 					dev->device.driver = &drv->driver;
662 				}
663 			}
664 			break;
665 		}
666 	}
667 
668 	/* Register DPAA mempool ops only if any DPAA device has
669 	 * been detected.
670 	 */
671 	rte_mbuf_set_platform_mempool_ops(DPAA_MEMPOOL_OPS_NAME);
672 
673 	return 0;
674 }
675 
676 static struct rte_device *
rte_dpaa_find_device(const struct rte_device * start,rte_dev_cmp_t cmp,const void * data)677 rte_dpaa_find_device(const struct rte_device *start, rte_dev_cmp_t cmp,
678 		     const void *data)
679 {
680 	struct rte_dpaa_device *dev;
681 	const struct rte_dpaa_device *dstart;
682 
683 	/* find_device is called with 'data' as an opaque object - just call
684 	 * cmp with this and each device object on bus.
685 	 */
686 
687 	if (start != NULL) {
688 		dstart = RTE_DEV_TO_DPAA_CONST(start);
689 		dev = TAILQ_NEXT(dstart, next);
690 	} else {
691 		dev = TAILQ_FIRST(&rte_dpaa_bus.device_list);
692 	}
693 
694 	while (dev != NULL) {
695 		if (cmp(&dev->device, data) == 0) {
696 			DPAA_BUS_DEBUG("Found dev=(%s)\n", dev->device.name);
697 			return &dev->device;
698 		}
699 		dev = TAILQ_NEXT(dev, next);
700 	}
701 
702 	DPAA_BUS_DEBUG("Unable to find any device\n");
703 	return NULL;
704 }
705 
706 /*
707  * Get iommu class of DPAA2 devices on the bus.
708  */
709 static enum rte_iova_mode
rte_dpaa_get_iommu_class(void)710 rte_dpaa_get_iommu_class(void)
711 {
712 	if ((access(DPAA_DEV_PATH1, F_OK) != 0) &&
713 	    (access(DPAA_DEV_PATH2, F_OK) != 0)) {
714 		return RTE_IOVA_DC;
715 	}
716 	return RTE_IOVA_PA;
717 }
718 
719 static int
dpaa_bus_plug(struct rte_device * dev __rte_unused)720 dpaa_bus_plug(struct rte_device *dev __rte_unused)
721 {
722 	/* No operation is performed while plugging the device */
723 	return 0;
724 }
725 
726 static int
dpaa_bus_unplug(struct rte_device * dev __rte_unused)727 dpaa_bus_unplug(struct rte_device *dev __rte_unused)
728 {
729 	/* No operation is performed while unplugging the device */
730 	return 0;
731 }
732 
733 static void *
dpaa_bus_dev_iterate(const void * start,const char * str,const struct rte_dev_iterator * it __rte_unused)734 dpaa_bus_dev_iterate(const void *start, const char *str,
735 		     const struct rte_dev_iterator *it __rte_unused)
736 {
737 	const struct rte_dpaa_device *dstart;
738 	struct rte_dpaa_device *dev;
739 	char *dup, *dev_name = NULL;
740 
741 	if (str == NULL) {
742 		DPAA_BUS_DEBUG("No device string");
743 		return NULL;
744 	}
745 
746 	/* Expectation is that device would be name=device_name */
747 	if (strncmp(str, "name=", 5) != 0) {
748 		DPAA_BUS_DEBUG("Invalid device string (%s)\n", str);
749 		return NULL;
750 	}
751 
752 	/* Now that name=device_name format is available, split */
753 	dup = strdup(str);
754 	dev_name = dup + strlen("name=");
755 
756 	if (start != NULL) {
757 		dstart = RTE_DEV_TO_DPAA_CONST(start);
758 		dev = TAILQ_NEXT(dstart, next);
759 	} else {
760 		dev = TAILQ_FIRST(&rte_dpaa_bus.device_list);
761 	}
762 
763 	while (dev != NULL) {
764 		if (strcmp(dev->device.name, dev_name) == 0) {
765 			free(dup);
766 			return &dev->device;
767 		}
768 		dev = TAILQ_NEXT(dev, next);
769 	}
770 
771 	free(dup);
772 	return NULL;
773 }
774 
775 static struct rte_dpaa_bus rte_dpaa_bus = {
776 	.bus = {
777 		.scan = rte_dpaa_bus_scan,
778 		.probe = rte_dpaa_bus_probe,
779 		.parse = rte_dpaa_bus_parse,
780 		.find_device = rte_dpaa_find_device,
781 		.get_iommu_class = rte_dpaa_get_iommu_class,
782 		.plug = dpaa_bus_plug,
783 		.unplug = dpaa_bus_unplug,
784 		.dev_iterate = dpaa_bus_dev_iterate,
785 	},
786 	.device_list = TAILQ_HEAD_INITIALIZER(rte_dpaa_bus.device_list),
787 	.driver_list = TAILQ_HEAD_INITIALIZER(rte_dpaa_bus.driver_list),
788 	.device_count = 0,
789 };
790 
791 RTE_REGISTER_BUS(FSL_DPAA_BUS_NAME, rte_dpaa_bus.bus);
792 RTE_LOG_REGISTER(dpaa_logtype_bus, bus.dpaa, NOTICE);
793