xref: /dpdk/drivers/bus/dpaa/dpaa_bus.c (revision b1bbf222)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright 2017-2020 NXP
4  *
5  */
6 /* System headers */
7 #include <stdio.h>
8 #include <inttypes.h>
9 #include <unistd.h>
10 #include <limits.h>
11 #include <sched.h>
12 #include <signal.h>
13 #include <pthread.h>
14 #include <sys/types.h>
15 #include <sys/eventfd.h>
16 
17 #include <rte_byteorder.h>
18 #include <rte_common.h>
19 #include <rte_interrupts.h>
20 #include <rte_log.h>
21 #include <rte_debug.h>
22 #include <rte_atomic.h>
23 #include <rte_branch_prediction.h>
24 #include <rte_memory.h>
25 #include <rte_tailq.h>
26 #include <rte_eal.h>
27 #include <rte_alarm.h>
28 #include <rte_ether.h>
29 #include <ethdev_driver.h>
30 #include <rte_malloc.h>
31 #include <rte_ring.h>
32 #include <rte_bus.h>
33 #include <rte_mbuf_pool_ops.h>
34 #include <rte_mbuf_dyn.h>
35 
36 #include <dpaa_of.h>
37 #include <rte_dpaa_bus.h>
38 #include <rte_dpaa_logs.h>
39 #include <dpaax_iova_table.h>
40 
41 #include <fsl_usd.h>
42 #include <fsl_qman.h>
43 #include <fsl_bman.h>
44 #include <netcfg.h>
45 
46 static struct rte_dpaa_bus rte_dpaa_bus;
47 struct netcfg_info *dpaa_netcfg;
48 
49 /* define a variable to hold the portal_key, once created.*/
50 static pthread_key_t dpaa_portal_key;
51 
52 unsigned int dpaa_svr_family;
53 
54 #define FSL_DPAA_BUS_NAME	dpaa_bus
55 
56 RTE_DEFINE_PER_LCORE(struct dpaa_portal *, dpaa_io);
57 
58 #define DPAA_SEQN_DYNFIELD_NAME "dpaa_seqn_dynfield"
59 int dpaa_seqn_dynfield_offset = -1;
60 
61 struct fm_eth_port_cfg *
dpaa_get_eth_port_cfg(int dev_id)62 dpaa_get_eth_port_cfg(int dev_id)
63 {
64 	return &dpaa_netcfg->port_cfg[dev_id];
65 }
66 
67 static int
compare_dpaa_devices(struct rte_dpaa_device * dev1,struct rte_dpaa_device * dev2)68 compare_dpaa_devices(struct rte_dpaa_device *dev1,
69 		     struct rte_dpaa_device *dev2)
70 {
71 	int comp = 0;
72 
73 	/* Segregating ETH from SEC devices */
74 	if (dev1->device_type > dev2->device_type)
75 		comp = 1;
76 	else if (dev1->device_type < dev2->device_type)
77 		comp = -1;
78 	else
79 		comp = 0;
80 
81 	if ((comp != 0) || (dev1->device_type != FSL_DPAA_ETH))
82 		return comp;
83 
84 	if (dev1->id.fman_id > dev2->id.fman_id) {
85 		comp = 1;
86 	} else if (dev1->id.fman_id < dev2->id.fman_id) {
87 		comp = -1;
88 	} else {
89 		/* FMAN ids match, check for mac_id */
90 		if (dev1->id.mac_id > dev2->id.mac_id)
91 			comp = 1;
92 		else if (dev1->id.mac_id < dev2->id.mac_id)
93 			comp = -1;
94 		else
95 			comp = 0;
96 	}
97 
98 	return comp;
99 }
100 
101 static inline void
dpaa_add_to_device_list(struct rte_dpaa_device * newdev)102 dpaa_add_to_device_list(struct rte_dpaa_device *newdev)
103 {
104 	int comp, inserted = 0;
105 	struct rte_dpaa_device *dev = NULL;
106 	struct rte_dpaa_device *tdev = NULL;
107 
108 	RTE_TAILQ_FOREACH_SAFE(dev, &rte_dpaa_bus.device_list, next, tdev) {
109 		comp = compare_dpaa_devices(newdev, dev);
110 		if (comp < 0) {
111 			TAILQ_INSERT_BEFORE(dev, newdev, next);
112 			inserted = 1;
113 			break;
114 		}
115 	}
116 
117 	if (!inserted)
118 		TAILQ_INSERT_TAIL(&rte_dpaa_bus.device_list, newdev, next);
119 }
120 
121 /*
122  * Reads the SEC device from DTS
123  * Returns -1 if SEC devices not available, 0 otherwise
124  */
125 static inline int
dpaa_sec_available(void)126 dpaa_sec_available(void)
127 {
128 	const struct device_node *caam_node;
129 
130 	for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
131 		return 0;
132 	}
133 
134 	return -1;
135 }
136 
137 static void dpaa_clean_device_list(void);
138 
139 static struct rte_devargs *
dpaa_devargs_lookup(struct rte_dpaa_device * dev)140 dpaa_devargs_lookup(struct rte_dpaa_device *dev)
141 {
142 	struct rte_devargs *devargs;
143 	char dev_name[32];
144 
145 	RTE_EAL_DEVARGS_FOREACH("dpaa_bus", devargs) {
146 		devargs->bus->parse(devargs->name, &dev_name);
147 		if (strcmp(dev_name, dev->device.name) == 0) {
148 			DPAA_BUS_INFO("**Devargs matched %s", dev_name);
149 			return devargs;
150 		}
151 	}
152 	return NULL;
153 }
154 
155 static int
dpaa_create_device_list(void)156 dpaa_create_device_list(void)
157 {
158 	int i;
159 	int ret;
160 	struct rte_dpaa_device *dev;
161 	struct fm_eth_port_cfg *cfg;
162 	struct fman_if *fman_intf;
163 
164 	/* Creating Ethernet Devices */
165 	for (i = 0; i < dpaa_netcfg->num_ethports; i++) {
166 		dev = calloc(1, sizeof(struct rte_dpaa_device));
167 		if (!dev) {
168 			DPAA_BUS_LOG(ERR, "Failed to allocate ETH devices");
169 			ret = -ENOMEM;
170 			goto cleanup;
171 		}
172 
173 		dev->device.bus = &rte_dpaa_bus.bus;
174 
175 		/* Allocate interrupt handle instance */
176 		dev->intr_handle =
177 			rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_PRIVATE);
178 		if (dev->intr_handle == NULL) {
179 			DPAA_BUS_LOG(ERR, "Failed to allocate intr handle");
180 			ret = -ENOMEM;
181 			goto cleanup;
182 		}
183 
184 		cfg = &dpaa_netcfg->port_cfg[i];
185 		fman_intf = cfg->fman_if;
186 
187 		/* Device identifiers */
188 		dev->id.fman_id = fman_intf->fman_idx + 1;
189 		dev->id.mac_id = fman_intf->mac_idx;
190 		dev->device_type = FSL_DPAA_ETH;
191 		dev->id.dev_id = i;
192 
193 		/* Create device name */
194 		memset(dev->name, 0, RTE_ETH_NAME_MAX_LEN);
195 		sprintf(dev->name, "fm%d-mac%d", (fman_intf->fman_idx + 1),
196 			fman_intf->mac_idx);
197 		DPAA_BUS_LOG(INFO, "%s netdev added", dev->name);
198 		dev->device.name = dev->name;
199 		dev->device.devargs = dpaa_devargs_lookup(dev);
200 
201 		dpaa_add_to_device_list(dev);
202 	}
203 
204 	rte_dpaa_bus.device_count = i;
205 
206 	/* Unlike case of ETH, RTE_LIBRTE_DPAA_MAX_CRYPTODEV SEC devices are
207 	 * constantly created only if "sec" property is found in the device
208 	 * tree. Logically there is no limit for number of devices (QI
209 	 * interfaces) that can be created.
210 	 */
211 
212 	if (dpaa_sec_available()) {
213 		DPAA_BUS_LOG(INFO, "DPAA SEC devices are not available");
214 		return 0;
215 	}
216 
217 	/* Creating SEC Devices */
218 	for (i = 0; i < RTE_LIBRTE_DPAA_MAX_CRYPTODEV; i++) {
219 		dev = calloc(1, sizeof(struct rte_dpaa_device));
220 		if (!dev) {
221 			DPAA_BUS_LOG(ERR, "Failed to allocate SEC devices");
222 			ret = -1;
223 			goto cleanup;
224 		}
225 
226 		/* Allocate interrupt handle instance */
227 		dev->intr_handle =
228 			rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_PRIVATE);
229 		if (dev->intr_handle == NULL) {
230 			DPAA_BUS_LOG(ERR, "Failed to allocate intr handle");
231 			ret = -ENOMEM;
232 			goto cleanup;
233 		}
234 
235 		dev->device_type = FSL_DPAA_CRYPTO;
236 		dev->id.dev_id = rte_dpaa_bus.device_count + i;
237 
238 		/* Even though RTE_CRYPTODEV_NAME_MAX_LEN is valid length of
239 		 * crypto PMD, using RTE_ETH_NAME_MAX_LEN as that is the size
240 		 * allocated for dev->name/
241 		 */
242 		memset(dev->name, 0, RTE_ETH_NAME_MAX_LEN);
243 		sprintf(dev->name, "dpaa_sec-%d", i+1);
244 		DPAA_BUS_LOG(INFO, "%s cryptodev added", dev->name);
245 		dev->device.name = dev->name;
246 		dev->device.devargs = dpaa_devargs_lookup(dev);
247 
248 		dpaa_add_to_device_list(dev);
249 	}
250 
251 	rte_dpaa_bus.device_count += i;
252 
253 	/* Creating QDMA Device */
254 	for (i = 0; i < RTE_DPAA_QDMA_DEVICES; i++) {
255 		dev = calloc(1, sizeof(struct rte_dpaa_device));
256 		if (!dev) {
257 			DPAA_BUS_LOG(ERR, "Failed to allocate QDMA device");
258 			ret = -1;
259 			goto cleanup;
260 		}
261 
262 		dev->device_type = FSL_DPAA_QDMA;
263 		dev->id.dev_id = rte_dpaa_bus.device_count + i;
264 
265 		memset(dev->name, 0, RTE_ETH_NAME_MAX_LEN);
266 		sprintf(dev->name, "dpaa_qdma-%d", i+1);
267 		DPAA_BUS_LOG(INFO, "%s qdma device added", dev->name);
268 		dev->device.name = dev->name;
269 		dev->device.devargs = dpaa_devargs_lookup(dev);
270 
271 		dpaa_add_to_device_list(dev);
272 	}
273 	rte_dpaa_bus.device_count += i;
274 
275 	return 0;
276 
277 cleanup:
278 	dpaa_clean_device_list();
279 	return ret;
280 }
281 
282 static void
dpaa_clean_device_list(void)283 dpaa_clean_device_list(void)
284 {
285 	struct rte_dpaa_device *dev = NULL;
286 	struct rte_dpaa_device *tdev = NULL;
287 
288 	RTE_TAILQ_FOREACH_SAFE(dev, &rte_dpaa_bus.device_list, next, tdev) {
289 		TAILQ_REMOVE(&rte_dpaa_bus.device_list, dev, next);
290 		rte_intr_instance_free(dev->intr_handle);
291 		free(dev);
292 		dev = NULL;
293 	}
294 }
295 
rte_dpaa_portal_init(void * arg)296 int rte_dpaa_portal_init(void *arg)
297 {
298 	static const struct rte_mbuf_dynfield dpaa_seqn_dynfield_desc = {
299 		.name = DPAA_SEQN_DYNFIELD_NAME,
300 		.size = sizeof(dpaa_seqn_t),
301 		.align = __alignof__(dpaa_seqn_t),
302 	};
303 	unsigned int cpu, lcore = rte_lcore_id();
304 	int ret;
305 
306 	BUS_INIT_FUNC_TRACE();
307 
308 	if ((size_t)arg == 1 || lcore == LCORE_ID_ANY)
309 		lcore = rte_get_main_lcore();
310 	else
311 		if (lcore >= RTE_MAX_LCORE)
312 			return -1;
313 
314 	cpu = rte_lcore_to_cpu_id(lcore);
315 
316 	dpaa_seqn_dynfield_offset =
317 		rte_mbuf_dynfield_register(&dpaa_seqn_dynfield_desc);
318 	if (dpaa_seqn_dynfield_offset < 0) {
319 		DPAA_BUS_LOG(ERR, "Failed to register mbuf field for dpaa sequence number\n");
320 		return -rte_errno;
321 	}
322 
323 	/* Initialise bman thread portals */
324 	ret = bman_thread_init();
325 	if (ret) {
326 		DPAA_BUS_LOG(ERR, "bman_thread_init failed on core %u"
327 			     " (lcore=%u) with ret: %d", cpu, lcore, ret);
328 		return ret;
329 	}
330 
331 	DPAA_BUS_LOG(DEBUG, "BMAN thread initialized - CPU=%d lcore=%d",
332 		     cpu, lcore);
333 
334 	/* Initialise qman thread portals */
335 	ret = qman_thread_init();
336 	if (ret) {
337 		DPAA_BUS_LOG(ERR, "qman_thread_init failed on core %u"
338 			    " (lcore=%u) with ret: %d", cpu, lcore, ret);
339 		bman_thread_finish();
340 		return ret;
341 	}
342 
343 	DPAA_BUS_LOG(DEBUG, "QMAN thread initialized - CPU=%d lcore=%d",
344 		     cpu, lcore);
345 
346 	DPAA_PER_LCORE_PORTAL = rte_malloc(NULL, sizeof(struct dpaa_portal),
347 				    RTE_CACHE_LINE_SIZE);
348 	if (!DPAA_PER_LCORE_PORTAL) {
349 		DPAA_BUS_LOG(ERR, "Unable to allocate memory");
350 		bman_thread_finish();
351 		qman_thread_finish();
352 		return -ENOMEM;
353 	}
354 
355 	DPAA_PER_LCORE_PORTAL->qman_idx = qman_get_portal_index();
356 	DPAA_PER_LCORE_PORTAL->bman_idx = bman_get_portal_index();
357 	DPAA_PER_LCORE_PORTAL->tid = rte_gettid();
358 
359 	ret = pthread_setspecific(dpaa_portal_key,
360 				  (void *)DPAA_PER_LCORE_PORTAL);
361 	if (ret) {
362 		DPAA_BUS_LOG(ERR, "pthread_setspecific failed on core %u"
363 			     " (lcore=%u) with ret: %d", cpu, lcore, ret);
364 		dpaa_portal_finish(NULL);
365 
366 		return ret;
367 	}
368 
369 	DPAA_BUS_LOG(DEBUG, "QMAN thread initialized");
370 
371 	return 0;
372 }
373 
374 int
rte_dpaa_portal_fq_init(void * arg,struct qman_fq * fq)375 rte_dpaa_portal_fq_init(void *arg, struct qman_fq *fq)
376 {
377 	/* Affine above created portal with channel*/
378 	u32 sdqcr;
379 	int ret;
380 
381 	if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
382 		ret = rte_dpaa_portal_init(arg);
383 		if (ret < 0) {
384 			DPAA_BUS_LOG(ERR, "portal initialization failure");
385 			return ret;
386 		}
387 	}
388 
389 	/* Initialise qman specific portals */
390 	ret = fsl_qman_fq_portal_init(fq->qp);
391 	if (ret) {
392 		DPAA_BUS_LOG(ERR, "Unable to init fq portal");
393 		return -1;
394 	}
395 
396 	sdqcr = QM_SDQCR_CHANNELS_POOL_CONV(fq->ch_id);
397 	qman_static_dequeue_add(sdqcr, fq->qp);
398 
399 	return 0;
400 }
401 
rte_dpaa_portal_fq_close(struct qman_fq * fq)402 int rte_dpaa_portal_fq_close(struct qman_fq *fq)
403 {
404 	return fsl_qman_fq_portal_destroy(fq->qp);
405 }
406 
407 void
dpaa_portal_finish(void * arg)408 dpaa_portal_finish(void *arg)
409 {
410 	struct dpaa_portal *dpaa_io_portal = (struct dpaa_portal *)arg;
411 
412 	if (!dpaa_io_portal) {
413 		DPAA_BUS_LOG(DEBUG, "Portal already cleaned");
414 		return;
415 	}
416 
417 	bman_thread_finish();
418 	qman_thread_finish();
419 
420 	pthread_setspecific(dpaa_portal_key, NULL);
421 
422 	rte_free(dpaa_io_portal);
423 	dpaa_io_portal = NULL;
424 	DPAA_PER_LCORE_PORTAL = NULL;
425 }
426 
427 static int
rte_dpaa_bus_parse(const char * name,void * out)428 rte_dpaa_bus_parse(const char *name, void *out)
429 {
430 	unsigned int i, j;
431 	size_t delta;
432 	size_t max_name_len;
433 
434 	/* There are two ways of passing device name, with and without
435 	 * separator. "dpaa_bus:fm1-mac3" with separator, and "fm1-mac3"
436 	 * without separator. Both need to be handled.
437 	 * It is also possible that "name=fm1-mac3" is passed along.
438 	 */
439 	DPAA_BUS_DEBUG("Parse device name (%s)", name);
440 
441 	delta = 0;
442 	if (strncmp(name, "dpaa_bus:", 9) == 0) {
443 		delta = 9;
444 	} else if (strncmp(name, "name=", 5) == 0) {
445 		delta = 5;
446 	}
447 
448 	if (strncmp("dpaa_sec", &name[delta], 8) == 0) {
449 		if (sscanf(&name[delta], "dpaa_sec-%u", &i) != 1 ||
450 				i < 1 || i > 4)
451 			return -EINVAL;
452 		max_name_len = sizeof("dpaa_sec-.") - 1;
453 	} else {
454 		if (sscanf(&name[delta], "fm%u-mac%u", &i, &j) != 2 ||
455 				i >= 2 || j >= 16)
456 			return -EINVAL;
457 
458 		max_name_len = sizeof("fm.-mac..") - 1;
459 	}
460 
461 	if (out != NULL) {
462 		char *out_name = out;
463 
464 		/* Do not check for truncation, either name ends with
465 		 * '\0' or the device name is followed by parameters and there
466 		 * will be a ',' instead. Not copying past this comma is not an
467 		 * error.
468 		 */
469 		strlcpy(out_name, &name[delta], max_name_len + 1);
470 
471 		/* Second digit of mac%u could instead be ','. */
472 		if ((strlen(out_name) == max_name_len) &&
473 		    out_name[max_name_len] == ',')
474 			out_name[max_name_len] = '\0';
475 	}
476 
477 	return 0;
478 }
479 
480 #define DPAA_DEV_PATH1 "/sys/devices/platform/soc/soc:fsl,dpaa"
481 #define DPAA_DEV_PATH2 "/sys/devices/platform/fsl,dpaa"
482 
483 static int
rte_dpaa_bus_scan(void)484 rte_dpaa_bus_scan(void)
485 {
486 	int ret;
487 
488 	BUS_INIT_FUNC_TRACE();
489 
490 	if ((access(DPAA_DEV_PATH1, F_OK) != 0) &&
491 	    (access(DPAA_DEV_PATH2, F_OK) != 0)) {
492 		RTE_LOG(DEBUG, EAL, "DPAA Bus not present. Skipping.\n");
493 		return 0;
494 	}
495 
496 	if (rte_dpaa_bus.detected)
497 		return 0;
498 
499 	rte_dpaa_bus.detected = 1;
500 
501 	/* create the key, supplying a function that'll be invoked
502 	 * when a portal affined thread will be deleted.
503 	 */
504 	ret = pthread_key_create(&dpaa_portal_key, dpaa_portal_finish);
505 	if (ret) {
506 		DPAA_BUS_LOG(DEBUG, "Unable to create pthread key. (%d)", ret);
507 		dpaa_clean_device_list();
508 		return ret;
509 	}
510 
511 	return 0;
512 }
513 
514 /* register a dpaa bus based dpaa driver */
515 void
rte_dpaa_driver_register(struct rte_dpaa_driver * driver)516 rte_dpaa_driver_register(struct rte_dpaa_driver *driver)
517 {
518 	RTE_VERIFY(driver);
519 
520 	BUS_INIT_FUNC_TRACE();
521 
522 	TAILQ_INSERT_TAIL(&rte_dpaa_bus.driver_list, driver, next);
523 	/* Update Bus references */
524 	driver->dpaa_bus = &rte_dpaa_bus;
525 }
526 
527 /* un-register a dpaa bus based dpaa driver */
528 void
rte_dpaa_driver_unregister(struct rte_dpaa_driver * driver)529 rte_dpaa_driver_unregister(struct rte_dpaa_driver *driver)
530 {
531 	struct rte_dpaa_bus *dpaa_bus;
532 
533 	BUS_INIT_FUNC_TRACE();
534 
535 	dpaa_bus = driver->dpaa_bus;
536 
537 	TAILQ_REMOVE(&dpaa_bus->driver_list, driver, next);
538 	/* Update Bus references */
539 	driver->dpaa_bus = NULL;
540 }
541 
542 static int
rte_dpaa_device_match(struct rte_dpaa_driver * drv,struct rte_dpaa_device * dev)543 rte_dpaa_device_match(struct rte_dpaa_driver *drv,
544 		      struct rte_dpaa_device *dev)
545 {
546 	if (!drv || !dev) {
547 		DPAA_BUS_DEBUG("Invalid drv or dev received.");
548 		return -1;
549 	}
550 
551 	if (drv->drv_type == dev->device_type)
552 		return 0;
553 
554 	return -1;
555 }
556 
557 static int
rte_dpaa_bus_dev_build(void)558 rte_dpaa_bus_dev_build(void)
559 {
560 	int ret;
561 
562 	/* Load the device-tree driver */
563 	ret = of_init();
564 	if (ret) {
565 		DPAA_BUS_LOG(ERR, "of_init failed with ret: %d", ret);
566 		return -1;
567 	}
568 
569 	/* Get the interface configurations from device-tree */
570 	dpaa_netcfg = netcfg_acquire();
571 	if (!dpaa_netcfg) {
572 		DPAA_BUS_LOG(ERR,
573 			"netcfg failed: /dev/fsl_usdpaa device not available");
574 		DPAA_BUS_WARN(
575 			"Check if you are using USDPAA based device tree");
576 		return -EINVAL;
577 	}
578 
579 	RTE_LOG(NOTICE, EAL, "DPAA Bus Detected\n");
580 
581 	if (!dpaa_netcfg->num_ethports) {
582 		DPAA_BUS_LOG(INFO, "NO DPDK mapped net interfaces available");
583 		/* This is not an error */
584 	}
585 
586 #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
587 	dump_netcfg(dpaa_netcfg);
588 #endif
589 
590 	DPAA_BUS_LOG(DEBUG, "Number of ethernet devices = %d",
591 		     dpaa_netcfg->num_ethports);
592 	ret = dpaa_create_device_list();
593 	if (ret) {
594 		DPAA_BUS_LOG(ERR, "Unable to create device list. (%d)", ret);
595 		return ret;
596 	}
597 	return 0;
598 }
599 
rte_dpaa_setup_intr(struct rte_intr_handle * intr_handle)600 static int rte_dpaa_setup_intr(struct rte_intr_handle *intr_handle)
601 {
602 	int fd;
603 
604 	fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
605 	if (fd < 0) {
606 		DPAA_BUS_ERR("Cannot set up eventfd, error %i (%s)",
607 			     errno, strerror(errno));
608 		return errno;
609 	}
610 
611 	if (rte_intr_fd_set(intr_handle, fd))
612 		return rte_errno;
613 
614 	if (rte_intr_type_set(intr_handle, RTE_INTR_HANDLE_EXT))
615 		return rte_errno;
616 
617 	return 0;
618 }
619 
620 static int
rte_dpaa_bus_probe(void)621 rte_dpaa_bus_probe(void)
622 {
623 	int ret = -1;
624 	struct rte_dpaa_device *dev;
625 	struct rte_dpaa_driver *drv;
626 	FILE *svr_file = NULL;
627 	unsigned int svr_ver;
628 	int probe_all = rte_dpaa_bus.bus.conf.scan_mode != RTE_BUS_SCAN_ALLOWLIST;
629 	static int process_once;
630 
631 	/* If DPAA bus is not present nothing needs to be done */
632 	if (!rte_dpaa_bus.detected)
633 		return 0;
634 
635 	/* Device list creation is only done once */
636 	if (!process_once) {
637 		rte_dpaa_bus_dev_build();
638 		/* One time load of Qman/Bman drivers */
639 		ret = qman_global_init();
640 		if (ret) {
641 			DPAA_BUS_ERR("QMAN initialization failed: %d",
642 				     ret);
643 			return ret;
644 		}
645 		ret = bman_global_init();
646 		if (ret) {
647 			DPAA_BUS_ERR("BMAN initialization failed: %d",
648 				     ret);
649 			return ret;
650 		}
651 	}
652 	process_once = 1;
653 
654 	/* If no device present on DPAA bus nothing needs to be done */
655 	if (TAILQ_EMPTY(&rte_dpaa_bus.device_list))
656 		return 0;
657 
658 	svr_file = fopen(DPAA_SOC_ID_FILE, "r");
659 	if (svr_file) {
660 		if (fscanf(svr_file, "svr:%x", &svr_ver) > 0)
661 			dpaa_svr_family = svr_ver & SVR_MASK;
662 		fclose(svr_file);
663 	}
664 
665 	TAILQ_FOREACH(dev, &rte_dpaa_bus.device_list, next) {
666 		if (dev->device_type == FSL_DPAA_ETH) {
667 			ret = rte_dpaa_setup_intr(dev->intr_handle);
668 			if (ret)
669 				DPAA_BUS_ERR("Error setting up interrupt.\n");
670 		}
671 	}
672 
673 	/* And initialize the PA->VA translation table */
674 	dpaax_iova_table_populate();
675 
676 	/* For each registered driver, and device, call the driver->probe */
677 	TAILQ_FOREACH(dev, &rte_dpaa_bus.device_list, next) {
678 		TAILQ_FOREACH(drv, &rte_dpaa_bus.driver_list, next) {
679 			ret = rte_dpaa_device_match(drv, dev);
680 			if (ret)
681 				continue;
682 
683 			if (rte_dev_is_probed(&dev->device))
684 				continue;
685 
686 			if (!drv->probe ||
687 			    (dev->device.devargs &&
688 			     dev->device.devargs->policy == RTE_DEV_BLOCKED))
689 				continue;
690 
691 			if (probe_all ||
692 			    (dev->device.devargs &&
693 			     dev->device.devargs->policy == RTE_DEV_ALLOWED)) {
694 				ret = drv->probe(drv, dev);
695 				if (ret) {
696 					DPAA_BUS_ERR("unable to probe:%s",
697 						     dev->name);
698 				} else {
699 					dev->driver = drv;
700 					dev->device.driver = &drv->driver;
701 				}
702 			}
703 			break;
704 		}
705 	}
706 
707 	/* Register DPAA mempool ops only if any DPAA device has
708 	 * been detected.
709 	 */
710 	rte_mbuf_set_platform_mempool_ops(DPAA_MEMPOOL_OPS_NAME);
711 
712 	return 0;
713 }
714 
715 static struct rte_device *
rte_dpaa_find_device(const struct rte_device * start,rte_dev_cmp_t cmp,const void * data)716 rte_dpaa_find_device(const struct rte_device *start, rte_dev_cmp_t cmp,
717 		     const void *data)
718 {
719 	struct rte_dpaa_device *dev;
720 	const struct rte_dpaa_device *dstart;
721 
722 	/* find_device is called with 'data' as an opaque object - just call
723 	 * cmp with this and each device object on bus.
724 	 */
725 
726 	if (start != NULL) {
727 		dstart = RTE_DEV_TO_DPAA_CONST(start);
728 		dev = TAILQ_NEXT(dstart, next);
729 	} else {
730 		dev = TAILQ_FIRST(&rte_dpaa_bus.device_list);
731 	}
732 
733 	while (dev != NULL) {
734 		if (cmp(&dev->device, data) == 0) {
735 			DPAA_BUS_DEBUG("Found dev=(%s)\n", dev->device.name);
736 			return &dev->device;
737 		}
738 		dev = TAILQ_NEXT(dev, next);
739 	}
740 
741 	DPAA_BUS_DEBUG("Unable to find any device\n");
742 	return NULL;
743 }
744 
745 /*
746  * Get iommu class of DPAA2 devices on the bus.
747  */
748 static enum rte_iova_mode
rte_dpaa_get_iommu_class(void)749 rte_dpaa_get_iommu_class(void)
750 {
751 	if ((access(DPAA_DEV_PATH1, F_OK) != 0) &&
752 	    (access(DPAA_DEV_PATH2, F_OK) != 0)) {
753 		return RTE_IOVA_DC;
754 	}
755 	return RTE_IOVA_PA;
756 }
757 
758 static int
dpaa_bus_plug(struct rte_device * dev __rte_unused)759 dpaa_bus_plug(struct rte_device *dev __rte_unused)
760 {
761 	/* No operation is performed while plugging the device */
762 	return 0;
763 }
764 
765 static int
dpaa_bus_unplug(struct rte_device * dev __rte_unused)766 dpaa_bus_unplug(struct rte_device *dev __rte_unused)
767 {
768 	/* No operation is performed while unplugging the device */
769 	return 0;
770 }
771 
772 static void *
dpaa_bus_dev_iterate(const void * start,const char * str,const struct rte_dev_iterator * it __rte_unused)773 dpaa_bus_dev_iterate(const void *start, const char *str,
774 		     const struct rte_dev_iterator *it __rte_unused)
775 {
776 	const struct rte_dpaa_device *dstart;
777 	struct rte_dpaa_device *dev;
778 	char *dup, *dev_name = NULL;
779 
780 	if (str == NULL) {
781 		DPAA_BUS_DEBUG("No device string");
782 		return NULL;
783 	}
784 
785 	/* Expectation is that device would be name=device_name */
786 	if (strncmp(str, "name=", 5) != 0) {
787 		DPAA_BUS_DEBUG("Invalid device string (%s)\n", str);
788 		return NULL;
789 	}
790 
791 	/* Now that name=device_name format is available, split */
792 	dup = strdup(str);
793 	dev_name = dup + strlen("name=");
794 
795 	if (start != NULL) {
796 		dstart = RTE_DEV_TO_DPAA_CONST(start);
797 		dev = TAILQ_NEXT(dstart, next);
798 	} else {
799 		dev = TAILQ_FIRST(&rte_dpaa_bus.device_list);
800 	}
801 
802 	while (dev != NULL) {
803 		if (strcmp(dev->device.name, dev_name) == 0) {
804 			free(dup);
805 			return &dev->device;
806 		}
807 		dev = TAILQ_NEXT(dev, next);
808 	}
809 
810 	free(dup);
811 	return NULL;
812 }
813 
814 static struct rte_dpaa_bus rte_dpaa_bus = {
815 	.bus = {
816 		.scan = rte_dpaa_bus_scan,
817 		.probe = rte_dpaa_bus_probe,
818 		.parse = rte_dpaa_bus_parse,
819 		.find_device = rte_dpaa_find_device,
820 		.get_iommu_class = rte_dpaa_get_iommu_class,
821 		.plug = dpaa_bus_plug,
822 		.unplug = dpaa_bus_unplug,
823 		.dev_iterate = dpaa_bus_dev_iterate,
824 	},
825 	.device_list = TAILQ_HEAD_INITIALIZER(rte_dpaa_bus.device_list),
826 	.driver_list = TAILQ_HEAD_INITIALIZER(rte_dpaa_bus.driver_list),
827 	.device_count = 0,
828 };
829 
830 RTE_REGISTER_BUS(FSL_DPAA_BUS_NAME, rte_dpaa_bus.bus);
831 RTE_LOG_REGISTER_DEFAULT(dpaa_logtype_bus, NOTICE);
832