1d30ea906Sjfb8856606 /* SPDX-License-Identifier: BSD-3-Clause
2d30ea906Sjfb8856606 * Copyright(c) 2017 Cavium, Inc
32bfe3f2eSlogwang */
42bfe3f2eSlogwang
52bfe3f2eSlogwang #include "test_order_common.h"
62bfe3f2eSlogwang
72bfe3f2eSlogwang int
order_test_result(struct evt_test * test,struct evt_options * opt)82bfe3f2eSlogwang order_test_result(struct evt_test *test, struct evt_options *opt)
92bfe3f2eSlogwang {
102bfe3f2eSlogwang RTE_SET_USED(opt);
112bfe3f2eSlogwang struct test_order *t = evt_test_priv(test);
122bfe3f2eSlogwang
132bfe3f2eSlogwang return t->result;
142bfe3f2eSlogwang }
152bfe3f2eSlogwang
162bfe3f2eSlogwang static inline int
order_producer(void * arg)172bfe3f2eSlogwang order_producer(void *arg)
182bfe3f2eSlogwang {
192bfe3f2eSlogwang struct prod_data *p = arg;
202bfe3f2eSlogwang struct test_order *t = p->t;
212bfe3f2eSlogwang struct evt_options *opt = t->opt;
222bfe3f2eSlogwang const uint8_t dev_id = p->dev_id;
232bfe3f2eSlogwang const uint8_t port = p->port_id;
242bfe3f2eSlogwang struct rte_mempool *pool = t->pool;
252bfe3f2eSlogwang const uint64_t nb_pkts = t->nb_pkts;
262bfe3f2eSlogwang uint32_t *producer_flow_seq = t->producer_flow_seq;
272bfe3f2eSlogwang const uint32_t nb_flows = t->nb_flows;
282bfe3f2eSlogwang uint64_t count = 0;
292bfe3f2eSlogwang struct rte_mbuf *m;
302bfe3f2eSlogwang struct rte_event ev;
312bfe3f2eSlogwang
322bfe3f2eSlogwang if (opt->verbose_level > 1)
332bfe3f2eSlogwang printf("%s(): lcore %d dev_id %d port=%d queue=%d\n",
342bfe3f2eSlogwang __func__, rte_lcore_id(), dev_id, port, p->queue_id);
352bfe3f2eSlogwang
362bfe3f2eSlogwang ev.event = 0;
372bfe3f2eSlogwang ev.op = RTE_EVENT_OP_NEW;
382bfe3f2eSlogwang ev.queue_id = p->queue_id;
392bfe3f2eSlogwang ev.sched_type = RTE_SCHED_TYPE_ORDERED;
402bfe3f2eSlogwang ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
412bfe3f2eSlogwang ev.event_type = RTE_EVENT_TYPE_CPU;
422bfe3f2eSlogwang ev.sub_event_type = 0; /* stage 0 */
432bfe3f2eSlogwang
442bfe3f2eSlogwang while (count < nb_pkts && t->err == false) {
452bfe3f2eSlogwang m = rte_pktmbuf_alloc(pool);
462bfe3f2eSlogwang if (m == NULL)
472bfe3f2eSlogwang continue;
482bfe3f2eSlogwang
49*2d9fd380Sjfb8856606 const flow_id_t flow = (uintptr_t)m % nb_flows;
502bfe3f2eSlogwang /* Maintain seq number per flow */
51*2d9fd380Sjfb8856606 *order_mbuf_seqn(t, m) = producer_flow_seq[flow]++;
52*2d9fd380Sjfb8856606 order_flow_id_save(t, flow, m, &ev);
532bfe3f2eSlogwang
542bfe3f2eSlogwang while (rte_event_enqueue_burst(dev_id, port, &ev, 1) != 1) {
552bfe3f2eSlogwang if (t->err)
562bfe3f2eSlogwang break;
572bfe3f2eSlogwang rte_pause();
582bfe3f2eSlogwang }
592bfe3f2eSlogwang
602bfe3f2eSlogwang count++;
612bfe3f2eSlogwang }
622bfe3f2eSlogwang return 0;
632bfe3f2eSlogwang }
642bfe3f2eSlogwang
652bfe3f2eSlogwang int
order_opt_check(struct evt_options * opt)662bfe3f2eSlogwang order_opt_check(struct evt_options *opt)
672bfe3f2eSlogwang {
684b05018fSfengbojiang if (opt->prod_type != EVT_PROD_TYPE_SYNT) {
69*2d9fd380Sjfb8856606 evt_err("Invalid producer type '%s' valid producer '%s'",
70*2d9fd380Sjfb8856606 evt_prod_id_to_name(opt->prod_type),
71*2d9fd380Sjfb8856606 evt_prod_id_to_name(EVT_PROD_TYPE_SYNT));
72*2d9fd380Sjfb8856606 return -1;
734b05018fSfengbojiang }
744b05018fSfengbojiang
75*2d9fd380Sjfb8856606 /* 1 producer + N workers + main */
762bfe3f2eSlogwang if (rte_lcore_count() < 3) {
772bfe3f2eSlogwang evt_err("test need minimum 3 lcores");
782bfe3f2eSlogwang return -1;
792bfe3f2eSlogwang }
802bfe3f2eSlogwang
812bfe3f2eSlogwang /* Validate worker lcores */
82*2d9fd380Sjfb8856606 if (evt_lcores_has_overlap(opt->wlcores, rte_get_main_lcore())) {
83*2d9fd380Sjfb8856606 evt_err("worker lcores overlaps with main lcore");
842bfe3f2eSlogwang return -1;
852bfe3f2eSlogwang }
862bfe3f2eSlogwang
872bfe3f2eSlogwang if (evt_nr_active_lcores(opt->plcores) == 0) {
882bfe3f2eSlogwang evt_err("missing the producer lcore");
892bfe3f2eSlogwang return -1;
902bfe3f2eSlogwang }
912bfe3f2eSlogwang
922bfe3f2eSlogwang if (evt_nr_active_lcores(opt->plcores) != 1) {
932bfe3f2eSlogwang evt_err("only one producer lcore must be selected");
942bfe3f2eSlogwang return -1;
952bfe3f2eSlogwang }
962bfe3f2eSlogwang
972bfe3f2eSlogwang int plcore = evt_get_first_active_lcore(opt->plcores);
982bfe3f2eSlogwang
992bfe3f2eSlogwang if (plcore < 0) {
1002bfe3f2eSlogwang evt_err("failed to find active producer");
1012bfe3f2eSlogwang return plcore;
1022bfe3f2eSlogwang }
1032bfe3f2eSlogwang
1042bfe3f2eSlogwang if (evt_lcores_has_overlap(opt->wlcores, plcore)) {
1052bfe3f2eSlogwang evt_err("worker lcores overlaps producer lcore");
1062bfe3f2eSlogwang return -1;
1072bfe3f2eSlogwang }
1082bfe3f2eSlogwang if (evt_has_disabled_lcore(opt->wlcores)) {
1092bfe3f2eSlogwang evt_err("one or more workers lcores are not enabled");
1102bfe3f2eSlogwang return -1;
1112bfe3f2eSlogwang }
1122bfe3f2eSlogwang if (!evt_has_active_lcore(opt->wlcores)) {
1132bfe3f2eSlogwang evt_err("minimum one worker is required");
1142bfe3f2eSlogwang return -1;
1152bfe3f2eSlogwang }
1162bfe3f2eSlogwang
1172bfe3f2eSlogwang /* Validate producer lcore */
118*2d9fd380Sjfb8856606 if (plcore == (int)rte_get_main_lcore()) {
119*2d9fd380Sjfb8856606 evt_err("producer lcore and main lcore should be different");
1202bfe3f2eSlogwang return -1;
1212bfe3f2eSlogwang }
1222bfe3f2eSlogwang if (!rte_lcore_is_enabled(plcore)) {
1232bfe3f2eSlogwang evt_err("producer lcore is not enabled");
1242bfe3f2eSlogwang return -1;
1252bfe3f2eSlogwang }
1262bfe3f2eSlogwang
1272bfe3f2eSlogwang /* Fixups */
1282bfe3f2eSlogwang if (opt->nb_pkts == 0)
1292bfe3f2eSlogwang opt->nb_pkts = INT64_MAX;
1302bfe3f2eSlogwang
1312bfe3f2eSlogwang return 0;
1322bfe3f2eSlogwang }
1332bfe3f2eSlogwang
1342bfe3f2eSlogwang int
order_test_setup(struct evt_test * test,struct evt_options * opt)1352bfe3f2eSlogwang order_test_setup(struct evt_test *test, struct evt_options *opt)
1362bfe3f2eSlogwang {
1372bfe3f2eSlogwang void *test_order;
138*2d9fd380Sjfb8856606 struct test_order *t;
139*2d9fd380Sjfb8856606 static const struct rte_mbuf_dynfield flow_id_dynfield_desc = {
140*2d9fd380Sjfb8856606 .name = "test_event_dynfield_flow_id",
141*2d9fd380Sjfb8856606 .size = sizeof(flow_id_t),
142*2d9fd380Sjfb8856606 .align = __alignof__(flow_id_t),
143*2d9fd380Sjfb8856606 };
144*2d9fd380Sjfb8856606 static const struct rte_mbuf_dynfield seqn_dynfield_desc = {
145*2d9fd380Sjfb8856606 .name = "test_event_dynfield_seqn",
146*2d9fd380Sjfb8856606 .size = sizeof(seqn_t),
147*2d9fd380Sjfb8856606 .align = __alignof__(seqn_t),
148*2d9fd380Sjfb8856606 };
1492bfe3f2eSlogwang
1502bfe3f2eSlogwang test_order = rte_zmalloc_socket(test->name, sizeof(struct test_order),
1512bfe3f2eSlogwang RTE_CACHE_LINE_SIZE, opt->socket_id);
1522bfe3f2eSlogwang if (test_order == NULL) {
1532bfe3f2eSlogwang evt_err("failed to allocate test_order memory");
1542bfe3f2eSlogwang goto nomem;
1552bfe3f2eSlogwang }
1562bfe3f2eSlogwang test->test_priv = test_order;
157*2d9fd380Sjfb8856606 t = evt_test_priv(test);
1582bfe3f2eSlogwang
159*2d9fd380Sjfb8856606 t->flow_id_dynfield_offset =
160*2d9fd380Sjfb8856606 rte_mbuf_dynfield_register(&flow_id_dynfield_desc);
161*2d9fd380Sjfb8856606 if (t->flow_id_dynfield_offset < 0) {
162*2d9fd380Sjfb8856606 evt_err("failed to register mbuf field");
163*2d9fd380Sjfb8856606 return -rte_errno;
164*2d9fd380Sjfb8856606 }
165*2d9fd380Sjfb8856606
166*2d9fd380Sjfb8856606 t->seqn_dynfield_offset =
167*2d9fd380Sjfb8856606 rte_mbuf_dynfield_register(&seqn_dynfield_desc);
168*2d9fd380Sjfb8856606 if (t->seqn_dynfield_offset < 0) {
169*2d9fd380Sjfb8856606 evt_err("failed to register mbuf field");
170*2d9fd380Sjfb8856606 return -rte_errno;
171*2d9fd380Sjfb8856606 }
1722bfe3f2eSlogwang
1732bfe3f2eSlogwang t->producer_flow_seq = rte_zmalloc_socket("test_producer_flow_seq",
1742bfe3f2eSlogwang sizeof(*t->producer_flow_seq) * opt->nb_flows,
1752bfe3f2eSlogwang RTE_CACHE_LINE_SIZE, opt->socket_id);
1762bfe3f2eSlogwang
1772bfe3f2eSlogwang if (t->producer_flow_seq == NULL) {
1782bfe3f2eSlogwang evt_err("failed to allocate t->producer_flow_seq memory");
1792bfe3f2eSlogwang goto prod_nomem;
1802bfe3f2eSlogwang }
1812bfe3f2eSlogwang
1822bfe3f2eSlogwang t->expected_flow_seq = rte_zmalloc_socket("test_expected_flow_seq",
1832bfe3f2eSlogwang sizeof(*t->expected_flow_seq) * opt->nb_flows,
1842bfe3f2eSlogwang RTE_CACHE_LINE_SIZE, opt->socket_id);
1852bfe3f2eSlogwang
1862bfe3f2eSlogwang if (t->expected_flow_seq == NULL) {
1872bfe3f2eSlogwang evt_err("failed to allocate t->expected_flow_seq memory");
1882bfe3f2eSlogwang goto exp_nomem;
1892bfe3f2eSlogwang }
1902bfe3f2eSlogwang rte_atomic64_set(&t->outstand_pkts, opt->nb_pkts);
1912bfe3f2eSlogwang t->err = false;
1922bfe3f2eSlogwang t->nb_pkts = opt->nb_pkts;
1932bfe3f2eSlogwang t->nb_flows = opt->nb_flows;
1942bfe3f2eSlogwang t->result = EVT_TEST_FAILED;
1952bfe3f2eSlogwang t->opt = opt;
1962bfe3f2eSlogwang return 0;
1972bfe3f2eSlogwang
1982bfe3f2eSlogwang exp_nomem:
1992bfe3f2eSlogwang rte_free(t->producer_flow_seq);
2002bfe3f2eSlogwang prod_nomem:
2012bfe3f2eSlogwang rte_free(test->test_priv);
2022bfe3f2eSlogwang nomem:
2032bfe3f2eSlogwang return -ENOMEM;
2042bfe3f2eSlogwang }
2052bfe3f2eSlogwang
2062bfe3f2eSlogwang void
order_test_destroy(struct evt_test * test,struct evt_options * opt)2072bfe3f2eSlogwang order_test_destroy(struct evt_test *test, struct evt_options *opt)
2082bfe3f2eSlogwang {
2092bfe3f2eSlogwang RTE_SET_USED(opt);
2102bfe3f2eSlogwang struct test_order *t = evt_test_priv(test);
2112bfe3f2eSlogwang
2122bfe3f2eSlogwang rte_free(t->expected_flow_seq);
2132bfe3f2eSlogwang rte_free(t->producer_flow_seq);
2142bfe3f2eSlogwang rte_free(test->test_priv);
2152bfe3f2eSlogwang }
2162bfe3f2eSlogwang
2172bfe3f2eSlogwang int
order_mempool_setup(struct evt_test * test,struct evt_options * opt)2182bfe3f2eSlogwang order_mempool_setup(struct evt_test *test, struct evt_options *opt)
2192bfe3f2eSlogwang {
2202bfe3f2eSlogwang struct test_order *t = evt_test_priv(test);
2212bfe3f2eSlogwang
2222bfe3f2eSlogwang t->pool = rte_pktmbuf_pool_create(test->name, opt->pool_sz,
2232bfe3f2eSlogwang 256 /* Cache */, 0,
2242bfe3f2eSlogwang 512, /* Use very small mbufs */
2252bfe3f2eSlogwang opt->socket_id);
2262bfe3f2eSlogwang if (t->pool == NULL) {
2272bfe3f2eSlogwang evt_err("failed to create mempool");
2282bfe3f2eSlogwang return -ENOMEM;
2292bfe3f2eSlogwang }
2302bfe3f2eSlogwang
2312bfe3f2eSlogwang return 0;
2322bfe3f2eSlogwang }
2332bfe3f2eSlogwang
2342bfe3f2eSlogwang void
order_mempool_destroy(struct evt_test * test,struct evt_options * opt)2352bfe3f2eSlogwang order_mempool_destroy(struct evt_test *test, struct evt_options *opt)
2362bfe3f2eSlogwang {
2372bfe3f2eSlogwang RTE_SET_USED(opt);
2382bfe3f2eSlogwang struct test_order *t = evt_test_priv(test);
2392bfe3f2eSlogwang
2402bfe3f2eSlogwang rte_mempool_free(t->pool);
2412bfe3f2eSlogwang }
2422bfe3f2eSlogwang
2432bfe3f2eSlogwang void
order_eventdev_destroy(struct evt_test * test,struct evt_options * opt)2442bfe3f2eSlogwang order_eventdev_destroy(struct evt_test *test, struct evt_options *opt)
2452bfe3f2eSlogwang {
2462bfe3f2eSlogwang RTE_SET_USED(test);
2472bfe3f2eSlogwang
2482bfe3f2eSlogwang rte_event_dev_stop(opt->dev_id);
2492bfe3f2eSlogwang rte_event_dev_close(opt->dev_id);
2502bfe3f2eSlogwang }
2512bfe3f2eSlogwang
2522bfe3f2eSlogwang void
order_opt_dump(struct evt_options * opt)2532bfe3f2eSlogwang order_opt_dump(struct evt_options *opt)
2542bfe3f2eSlogwang {
2552bfe3f2eSlogwang evt_dump_producer_lcores(opt);
2562bfe3f2eSlogwang evt_dump("nb_wrker_lcores", "%d", evt_nr_active_lcores(opt->wlcores));
2572bfe3f2eSlogwang evt_dump_worker_lcores(opt);
2582bfe3f2eSlogwang evt_dump("nb_evdev_ports", "%d", order_nb_event_ports(opt));
2592bfe3f2eSlogwang }
2602bfe3f2eSlogwang
2612bfe3f2eSlogwang int
order_launch_lcores(struct evt_test * test,struct evt_options * opt,int (* worker)(void *))2622bfe3f2eSlogwang order_launch_lcores(struct evt_test *test, struct evt_options *opt,
2632bfe3f2eSlogwang int (*worker)(void *))
2642bfe3f2eSlogwang {
2652bfe3f2eSlogwang int ret, lcore_id;
2662bfe3f2eSlogwang struct test_order *t = evt_test_priv(test);
2672bfe3f2eSlogwang
2682bfe3f2eSlogwang int wkr_idx = 0;
2692bfe3f2eSlogwang /* launch workers */
270*2d9fd380Sjfb8856606 RTE_LCORE_FOREACH_WORKER(lcore_id) {
2712bfe3f2eSlogwang if (!(opt->wlcores[lcore_id]))
2722bfe3f2eSlogwang continue;
2732bfe3f2eSlogwang
2742bfe3f2eSlogwang ret = rte_eal_remote_launch(worker, &t->worker[wkr_idx],
2752bfe3f2eSlogwang lcore_id);
2762bfe3f2eSlogwang if (ret) {
2772bfe3f2eSlogwang evt_err("failed to launch worker %d", lcore_id);
2782bfe3f2eSlogwang return ret;
2792bfe3f2eSlogwang }
2802bfe3f2eSlogwang wkr_idx++;
2812bfe3f2eSlogwang }
2822bfe3f2eSlogwang
2832bfe3f2eSlogwang /* launch producer */
2842bfe3f2eSlogwang int plcore = evt_get_first_active_lcore(opt->plcores);
2852bfe3f2eSlogwang
2862bfe3f2eSlogwang ret = rte_eal_remote_launch(order_producer, &t->prod, plcore);
2872bfe3f2eSlogwang if (ret) {
2882bfe3f2eSlogwang evt_err("failed to launch order_producer %d", plcore);
2892bfe3f2eSlogwang return ret;
2902bfe3f2eSlogwang }
2912bfe3f2eSlogwang
2922bfe3f2eSlogwang uint64_t cycles = rte_get_timer_cycles();
2932bfe3f2eSlogwang int64_t old_remaining = -1;
2942bfe3f2eSlogwang
2952bfe3f2eSlogwang while (t->err == false) {
2962bfe3f2eSlogwang uint64_t new_cycles = rte_get_timer_cycles();
2972bfe3f2eSlogwang int64_t remaining = rte_atomic64_read(&t->outstand_pkts);
2982bfe3f2eSlogwang
2992bfe3f2eSlogwang if (remaining <= 0) {
3002bfe3f2eSlogwang t->result = EVT_TEST_SUCCESS;
3012bfe3f2eSlogwang break;
3022bfe3f2eSlogwang }
3032bfe3f2eSlogwang
3042bfe3f2eSlogwang if (new_cycles - cycles > rte_get_timer_hz() * 1) {
3052bfe3f2eSlogwang printf(CLGRN"\r%"PRId64""CLNRM, remaining);
3062bfe3f2eSlogwang fflush(stdout);
3072bfe3f2eSlogwang if (old_remaining == remaining) {
3082bfe3f2eSlogwang rte_event_dev_dump(opt->dev_id, stdout);
3092bfe3f2eSlogwang evt_err("No schedules for seconds, deadlock");
3102bfe3f2eSlogwang t->err = true;
3112bfe3f2eSlogwang rte_smp_wmb();
3122bfe3f2eSlogwang break;
3132bfe3f2eSlogwang }
3142bfe3f2eSlogwang old_remaining = remaining;
3152bfe3f2eSlogwang cycles = new_cycles;
3162bfe3f2eSlogwang }
3172bfe3f2eSlogwang }
3182bfe3f2eSlogwang printf("\r");
3192bfe3f2eSlogwang
3202bfe3f2eSlogwang return 0;
3212bfe3f2eSlogwang }
3222bfe3f2eSlogwang
3232bfe3f2eSlogwang int
order_event_dev_port_setup(struct evt_test * test,struct evt_options * opt,uint8_t nb_workers,uint8_t nb_queues)3242bfe3f2eSlogwang order_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
3252bfe3f2eSlogwang uint8_t nb_workers, uint8_t nb_queues)
3262bfe3f2eSlogwang {
3272bfe3f2eSlogwang int ret;
3282bfe3f2eSlogwang uint8_t port;
3292bfe3f2eSlogwang struct test_order *t = evt_test_priv(test);
3304b05018fSfengbojiang struct rte_event_dev_info dev_info;
3314b05018fSfengbojiang
3324b05018fSfengbojiang memset(&dev_info, 0, sizeof(struct rte_event_dev_info));
3334b05018fSfengbojiang ret = rte_event_dev_info_get(opt->dev_id, &dev_info);
3344b05018fSfengbojiang if (ret) {
3354b05018fSfengbojiang evt_err("failed to get eventdev info %d", opt->dev_id);
3364b05018fSfengbojiang return ret;
3374b05018fSfengbojiang }
3384b05018fSfengbojiang
3394b05018fSfengbojiang if (opt->wkr_deq_dep > dev_info.max_event_port_dequeue_depth)
3404b05018fSfengbojiang opt->wkr_deq_dep = dev_info.max_event_port_dequeue_depth;
3412bfe3f2eSlogwang
3422bfe3f2eSlogwang /* port configuration */
3434b05018fSfengbojiang const struct rte_event_port_conf p_conf = {
3442bfe3f2eSlogwang .dequeue_depth = opt->wkr_deq_dep,
3454b05018fSfengbojiang .enqueue_depth = dev_info.max_event_port_dequeue_depth,
3464b05018fSfengbojiang .new_event_threshold = dev_info.max_num_events,
3472bfe3f2eSlogwang };
3482bfe3f2eSlogwang
3492bfe3f2eSlogwang /* setup one port per worker, linking to all queues */
3502bfe3f2eSlogwang for (port = 0; port < nb_workers; port++) {
3512bfe3f2eSlogwang struct worker_data *w = &t->worker[port];
3522bfe3f2eSlogwang
3532bfe3f2eSlogwang w->dev_id = opt->dev_id;
3542bfe3f2eSlogwang w->port_id = port;
3552bfe3f2eSlogwang w->t = t;
3562bfe3f2eSlogwang
3574b05018fSfengbojiang ret = rte_event_port_setup(opt->dev_id, port, &p_conf);
3582bfe3f2eSlogwang if (ret) {
3592bfe3f2eSlogwang evt_err("failed to setup port %d", port);
3602bfe3f2eSlogwang return ret;
3612bfe3f2eSlogwang }
3622bfe3f2eSlogwang
3632bfe3f2eSlogwang ret = rte_event_port_link(opt->dev_id, port, NULL, NULL, 0);
3642bfe3f2eSlogwang if (ret != nb_queues) {
3652bfe3f2eSlogwang evt_err("failed to link all queues to port %d", port);
3662bfe3f2eSlogwang return -EINVAL;
3672bfe3f2eSlogwang }
3682bfe3f2eSlogwang }
3692bfe3f2eSlogwang struct prod_data *p = &t->prod;
3702bfe3f2eSlogwang
3712bfe3f2eSlogwang p->dev_id = opt->dev_id;
3722bfe3f2eSlogwang p->port_id = port; /* last port */
3732bfe3f2eSlogwang p->queue_id = 0;
3742bfe3f2eSlogwang p->t = t;
3752bfe3f2eSlogwang
3764b05018fSfengbojiang ret = rte_event_port_setup(opt->dev_id, port, &p_conf);
3772bfe3f2eSlogwang if (ret) {
3782bfe3f2eSlogwang evt_err("failed to setup producer port %d", port);
3792bfe3f2eSlogwang return ret;
3802bfe3f2eSlogwang }
3812bfe3f2eSlogwang
3822bfe3f2eSlogwang return ret;
3832bfe3f2eSlogwang }
384