1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Intel Corporation
3  */
4 
5 #include <unistd.h>
6 
7 #include <rte_common.h>
8 #include <rte_log.h>
9 #include <rte_dev.h>
10 #include <rte_malloc.h>
11 #include <rte_mempool.h>
12 #include <rte_byteorder.h>
13 #include <rte_errno.h>
14 #include <rte_branch_prediction.h>
15 #include <rte_hexdump.h>
16 #include <rte_pci.h>
17 #include <rte_bus_pci.h>
18 #ifdef RTE_BBDEV_OFFLOAD_COST
19 #include <rte_cycles.h>
20 #endif
21 
22 #include <rte_bbdev.h>
23 #include <rte_bbdev_pmd.h>
24 #include "rte_acc100_pmd.h"
25 
26 #ifdef RTE_LIBRTE_BBDEV_DEBUG
27 RTE_LOG_REGISTER(acc100_logtype, pmd.bb.acc100, DEBUG);
28 #else
29 RTE_LOG_REGISTER(acc100_logtype, pmd.bb.acc100, NOTICE);
30 #endif
31 
32 /* Write to MMIO register address */
33 static inline void
mmio_write(void * addr,uint32_t value)34 mmio_write(void *addr, uint32_t value)
35 {
36 	*((volatile uint32_t *)(addr)) = rte_cpu_to_le_32(value);
37 }
38 
39 /* Write a register of a ACC100 device */
40 static inline void
acc100_reg_write(struct acc100_device * d,uint32_t offset,uint32_t value)41 acc100_reg_write(struct acc100_device *d, uint32_t offset, uint32_t value)
42 {
43 	void *reg_addr = RTE_PTR_ADD(d->mmio_base, offset);
44 	mmio_write(reg_addr, value);
45 	usleep(ACC100_LONG_WAIT);
46 }
47 
48 /* Read a register of a ACC100 device */
49 static inline uint32_t
acc100_reg_read(struct acc100_device * d,uint32_t offset)50 acc100_reg_read(struct acc100_device *d, uint32_t offset)
51 {
52 
53 	void *reg_addr = RTE_PTR_ADD(d->mmio_base, offset);
54 	uint32_t ret = *((volatile uint32_t *)(reg_addr));
55 	return rte_le_to_cpu_32(ret);
56 }
57 
58 /* Basic Implementation of Log2 for exact 2^N */
59 static inline uint32_t
log2_basic(uint32_t value)60 log2_basic(uint32_t value)
61 {
62 	return (value == 0) ? 0 : rte_bsf32(value);
63 }
64 
65 /* Calculate memory alignment offset assuming alignment is 2^N */
66 static inline uint32_t
calc_mem_alignment_offset(void * unaligned_virt_mem,uint32_t alignment)67 calc_mem_alignment_offset(void *unaligned_virt_mem, uint32_t alignment)
68 {
69 	rte_iova_t unaligned_phy_mem = rte_malloc_virt2iova(unaligned_virt_mem);
70 	return (uint32_t)(alignment -
71 			(unaligned_phy_mem & (alignment-1)));
72 }
73 
74 /* Calculate the offset of the enqueue register */
75 static inline uint32_t
queue_offset(bool pf_device,uint8_t vf_id,uint8_t qgrp_id,uint16_t aq_id)76 queue_offset(bool pf_device, uint8_t vf_id, uint8_t qgrp_id, uint16_t aq_id)
77 {
78 	if (pf_device)
79 		return ((vf_id << 12) + (qgrp_id << 7) + (aq_id << 3) +
80 				HWPfQmgrIngressAq);
81 	else
82 		return ((qgrp_id << 7) + (aq_id << 3) +
83 				HWVfQmgrIngressAq);
84 }
85 
86 enum {UL_4G = 0, UL_5G, DL_4G, DL_5G, NUM_ACC};
87 
88 /* Return the accelerator enum for a Queue Group Index */
89 static inline int
accFromQgid(int qg_idx,const struct rte_acc100_conf * acc100_conf)90 accFromQgid(int qg_idx, const struct rte_acc100_conf *acc100_conf)
91 {
92 	int accQg[ACC100_NUM_QGRPS];
93 	int NumQGroupsPerFn[NUM_ACC];
94 	int acc, qgIdx, qgIndex = 0;
95 	for (qgIdx = 0; qgIdx < ACC100_NUM_QGRPS; qgIdx++)
96 		accQg[qgIdx] = 0;
97 	NumQGroupsPerFn[UL_4G] = acc100_conf->q_ul_4g.num_qgroups;
98 	NumQGroupsPerFn[UL_5G] = acc100_conf->q_ul_5g.num_qgroups;
99 	NumQGroupsPerFn[DL_4G] = acc100_conf->q_dl_4g.num_qgroups;
100 	NumQGroupsPerFn[DL_5G] = acc100_conf->q_dl_5g.num_qgroups;
101 	for (acc = UL_4G;  acc < NUM_ACC; acc++)
102 		for (qgIdx = 0; qgIdx < NumQGroupsPerFn[acc]; qgIdx++)
103 			accQg[qgIndex++] = acc;
104 	acc = accQg[qg_idx];
105 	return acc;
106 }
107 
108 /* Return the queue topology for a Queue Group Index */
109 static inline void
qtopFromAcc(struct rte_acc100_queue_topology ** qtop,int acc_enum,struct rte_acc100_conf * acc100_conf)110 qtopFromAcc(struct rte_acc100_queue_topology **qtop, int acc_enum,
111 		struct rte_acc100_conf *acc100_conf)
112 {
113 	struct rte_acc100_queue_topology *p_qtop;
114 	p_qtop = NULL;
115 	switch (acc_enum) {
116 	case UL_4G:
117 		p_qtop = &(acc100_conf->q_ul_4g);
118 		break;
119 	case UL_5G:
120 		p_qtop = &(acc100_conf->q_ul_5g);
121 		break;
122 	case DL_4G:
123 		p_qtop = &(acc100_conf->q_dl_4g);
124 		break;
125 	case DL_5G:
126 		p_qtop = &(acc100_conf->q_dl_5g);
127 		break;
128 	default:
129 		/* NOTREACHED */
130 		rte_bbdev_log(ERR, "Unexpected error evaluating qtopFromAcc");
131 		break;
132 	}
133 	*qtop = p_qtop;
134 }
135 
136 /* Return the AQ depth for a Queue Group Index */
137 static inline int
aqDepth(int qg_idx,struct rte_acc100_conf * acc100_conf)138 aqDepth(int qg_idx, struct rte_acc100_conf *acc100_conf)
139 {
140 	struct rte_acc100_queue_topology *q_top = NULL;
141 	int acc_enum = accFromQgid(qg_idx, acc100_conf);
142 	qtopFromAcc(&q_top, acc_enum, acc100_conf);
143 	if (unlikely(q_top == NULL))
144 		return 0;
145 	return q_top->aq_depth_log2;
146 }
147 
148 /* Return the AQ depth for a Queue Group Index */
149 static inline int
aqNum(int qg_idx,struct rte_acc100_conf * acc100_conf)150 aqNum(int qg_idx, struct rte_acc100_conf *acc100_conf)
151 {
152 	struct rte_acc100_queue_topology *q_top = NULL;
153 	int acc_enum = accFromQgid(qg_idx, acc100_conf);
154 	qtopFromAcc(&q_top, acc_enum, acc100_conf);
155 	if (unlikely(q_top == NULL))
156 		return 0;
157 	return q_top->num_aqs_per_groups;
158 }
159 
160 static void
initQTop(struct rte_acc100_conf * acc100_conf)161 initQTop(struct rte_acc100_conf *acc100_conf)
162 {
163 	acc100_conf->q_ul_4g.num_aqs_per_groups = 0;
164 	acc100_conf->q_ul_4g.num_qgroups = 0;
165 	acc100_conf->q_ul_4g.first_qgroup_index = -1;
166 	acc100_conf->q_ul_5g.num_aqs_per_groups = 0;
167 	acc100_conf->q_ul_5g.num_qgroups = 0;
168 	acc100_conf->q_ul_5g.first_qgroup_index = -1;
169 	acc100_conf->q_dl_4g.num_aqs_per_groups = 0;
170 	acc100_conf->q_dl_4g.num_qgroups = 0;
171 	acc100_conf->q_dl_4g.first_qgroup_index = -1;
172 	acc100_conf->q_dl_5g.num_aqs_per_groups = 0;
173 	acc100_conf->q_dl_5g.num_qgroups = 0;
174 	acc100_conf->q_dl_5g.first_qgroup_index = -1;
175 }
176 
177 static inline void
updateQtop(uint8_t acc,uint8_t qg,struct rte_acc100_conf * acc100_conf,struct acc100_device * d)178 updateQtop(uint8_t acc, uint8_t qg, struct rte_acc100_conf *acc100_conf,
179 		struct acc100_device *d) {
180 	uint32_t reg;
181 	struct rte_acc100_queue_topology *q_top = NULL;
182 	qtopFromAcc(&q_top, acc, acc100_conf);
183 	if (unlikely(q_top == NULL))
184 		return;
185 	uint16_t aq;
186 	q_top->num_qgroups++;
187 	if (q_top->first_qgroup_index == -1) {
188 		q_top->first_qgroup_index = qg;
189 		/* Can be optimized to assume all are enabled by default */
190 		reg = acc100_reg_read(d, queue_offset(d->pf_device,
191 				0, qg, ACC100_NUM_AQS - 1));
192 		if (reg & ACC100_QUEUE_ENABLE) {
193 			q_top->num_aqs_per_groups = ACC100_NUM_AQS;
194 			return;
195 		}
196 		q_top->num_aqs_per_groups = 0;
197 		for (aq = 0; aq < ACC100_NUM_AQS; aq++) {
198 			reg = acc100_reg_read(d, queue_offset(d->pf_device,
199 					0, qg, aq));
200 			if (reg & ACC100_QUEUE_ENABLE)
201 				q_top->num_aqs_per_groups++;
202 		}
203 	}
204 }
205 
206 /* Fetch configuration enabled for the PF/VF using MMIO Read (slow) */
207 static inline void
fetch_acc100_config(struct rte_bbdev * dev)208 fetch_acc100_config(struct rte_bbdev *dev)
209 {
210 	struct acc100_device *d = dev->data->dev_private;
211 	struct rte_acc100_conf *acc100_conf = &d->acc100_conf;
212 	const struct acc100_registry_addr *reg_addr;
213 	uint8_t acc, qg;
214 	uint32_t reg, reg_aq, reg_len0, reg_len1;
215 	uint32_t reg_mode;
216 
217 	/* No need to retrieve the configuration is already done */
218 	if (d->configured)
219 		return;
220 
221 	/* Choose correct registry addresses for the device type */
222 	if (d->pf_device)
223 		reg_addr = &pf_reg_addr;
224 	else
225 		reg_addr = &vf_reg_addr;
226 
227 	d->ddr_size = (1 + acc100_reg_read(d, reg_addr->ddr_range)) << 10;
228 
229 	/* Single VF Bundle by VF */
230 	acc100_conf->num_vf_bundles = 1;
231 	initQTop(acc100_conf);
232 
233 	struct rte_acc100_queue_topology *q_top = NULL;
234 	int qman_func_id[ACC100_NUM_ACCS] = {ACC100_ACCMAP_0, ACC100_ACCMAP_1,
235 			ACC100_ACCMAP_2, ACC100_ACCMAP_3, ACC100_ACCMAP_4};
236 	reg = acc100_reg_read(d, reg_addr->qman_group_func);
237 	for (qg = 0; qg < ACC100_NUM_QGRPS_PER_WORD; qg++) {
238 		reg_aq = acc100_reg_read(d,
239 				queue_offset(d->pf_device, 0, qg, 0));
240 		if (reg_aq & ACC100_QUEUE_ENABLE) {
241 			uint32_t idx = (reg >> (qg * 4)) & 0x7;
242 			if (idx < ACC100_NUM_ACCS) {
243 				acc = qman_func_id[idx];
244 				updateQtop(acc, qg, acc100_conf, d);
245 			}
246 		}
247 	}
248 
249 	/* Check the depth of the AQs*/
250 	reg_len0 = acc100_reg_read(d, reg_addr->depth_log0_offset);
251 	reg_len1 = acc100_reg_read(d, reg_addr->depth_log1_offset);
252 	for (acc = 0; acc < NUM_ACC; acc++) {
253 		qtopFromAcc(&q_top, acc, acc100_conf);
254 		if (q_top->first_qgroup_index < ACC100_NUM_QGRPS_PER_WORD)
255 			q_top->aq_depth_log2 = (reg_len0 >>
256 					(q_top->first_qgroup_index * 4))
257 					& 0xF;
258 		else
259 			q_top->aq_depth_log2 = (reg_len1 >>
260 					((q_top->first_qgroup_index -
261 					ACC100_NUM_QGRPS_PER_WORD) * 4))
262 					& 0xF;
263 	}
264 
265 	/* Read PF mode */
266 	if (d->pf_device) {
267 		reg_mode = acc100_reg_read(d, HWPfHiPfMode);
268 		acc100_conf->pf_mode_en = (reg_mode == ACC100_PF_VAL) ? 1 : 0;
269 	}
270 
271 	rte_bbdev_log_debug(
272 			"%s Config LLR SIGN IN/OUT %s %s QG %u %u %u %u AQ %u %u %u %u Len %u %u %u %u\n",
273 			(d->pf_device) ? "PF" : "VF",
274 			(acc100_conf->input_pos_llr_1_bit) ? "POS" : "NEG",
275 			(acc100_conf->output_pos_llr_1_bit) ? "POS" : "NEG",
276 			acc100_conf->q_ul_4g.num_qgroups,
277 			acc100_conf->q_dl_4g.num_qgroups,
278 			acc100_conf->q_ul_5g.num_qgroups,
279 			acc100_conf->q_dl_5g.num_qgroups,
280 			acc100_conf->q_ul_4g.num_aqs_per_groups,
281 			acc100_conf->q_dl_4g.num_aqs_per_groups,
282 			acc100_conf->q_ul_5g.num_aqs_per_groups,
283 			acc100_conf->q_dl_5g.num_aqs_per_groups,
284 			acc100_conf->q_ul_4g.aq_depth_log2,
285 			acc100_conf->q_dl_4g.aq_depth_log2,
286 			acc100_conf->q_ul_5g.aq_depth_log2,
287 			acc100_conf->q_dl_5g.aq_depth_log2);
288 }
289 
290 static void
free_base_addresses(void ** base_addrs,int size)291 free_base_addresses(void **base_addrs, int size)
292 {
293 	int i;
294 	for (i = 0; i < size; i++)
295 		rte_free(base_addrs[i]);
296 }
297 
298 static inline uint32_t
get_desc_len(void)299 get_desc_len(void)
300 {
301 	return sizeof(union acc100_dma_desc);
302 }
303 
304 /* Allocate the 2 * 64MB block for the sw rings */
305 static int
alloc_2x64mb_sw_rings_mem(struct rte_bbdev * dev,struct acc100_device * d,int socket)306 alloc_2x64mb_sw_rings_mem(struct rte_bbdev *dev, struct acc100_device *d,
307 		int socket)
308 {
309 	uint32_t sw_ring_size = ACC100_SIZE_64MBYTE;
310 	d->sw_rings_base = rte_zmalloc_socket(dev->device->driver->name,
311 			2 * sw_ring_size, RTE_CACHE_LINE_SIZE, socket);
312 	if (d->sw_rings_base == NULL) {
313 		rte_bbdev_log(ERR, "Failed to allocate memory for %s:%u",
314 				dev->device->driver->name,
315 				dev->data->dev_id);
316 		return -ENOMEM;
317 	}
318 	uint32_t next_64mb_align_offset = calc_mem_alignment_offset(
319 			d->sw_rings_base, ACC100_SIZE_64MBYTE);
320 	d->sw_rings = RTE_PTR_ADD(d->sw_rings_base, next_64mb_align_offset);
321 	d->sw_rings_iova = rte_malloc_virt2iova(d->sw_rings_base) +
322 			next_64mb_align_offset;
323 	d->sw_ring_size = ACC100_MAX_QUEUE_DEPTH * get_desc_len();
324 	d->sw_ring_max_depth = ACC100_MAX_QUEUE_DEPTH;
325 
326 	return 0;
327 }
328 
329 /* Attempt to allocate minimised memory space for sw rings */
330 static void
alloc_sw_rings_min_mem(struct rte_bbdev * dev,struct acc100_device * d,uint16_t num_queues,int socket)331 alloc_sw_rings_min_mem(struct rte_bbdev *dev, struct acc100_device *d,
332 		uint16_t num_queues, int socket)
333 {
334 	rte_iova_t sw_rings_base_iova, next_64mb_align_addr_iova;
335 	uint32_t next_64mb_align_offset;
336 	rte_iova_t sw_ring_iova_end_addr;
337 	void *base_addrs[ACC100_SW_RING_MEM_ALLOC_ATTEMPTS];
338 	void *sw_rings_base;
339 	int i = 0;
340 	uint32_t q_sw_ring_size = ACC100_MAX_QUEUE_DEPTH * get_desc_len();
341 	uint32_t dev_sw_ring_size = q_sw_ring_size * num_queues;
342 
343 	/* Find an aligned block of memory to store sw rings */
344 	while (i < ACC100_SW_RING_MEM_ALLOC_ATTEMPTS) {
345 		/*
346 		 * sw_ring allocated memory is guaranteed to be aligned to
347 		 * q_sw_ring_size at the condition that the requested size is
348 		 * less than the page size
349 		 */
350 		sw_rings_base = rte_zmalloc_socket(
351 				dev->device->driver->name,
352 				dev_sw_ring_size, q_sw_ring_size, socket);
353 
354 		if (sw_rings_base == NULL) {
355 			rte_bbdev_log(ERR,
356 					"Failed to allocate memory for %s:%u",
357 					dev->device->driver->name,
358 					dev->data->dev_id);
359 			break;
360 		}
361 
362 		sw_rings_base_iova = rte_malloc_virt2iova(sw_rings_base);
363 		next_64mb_align_offset = calc_mem_alignment_offset(
364 				sw_rings_base, ACC100_SIZE_64MBYTE);
365 		next_64mb_align_addr_iova = sw_rings_base_iova +
366 				next_64mb_align_offset;
367 		sw_ring_iova_end_addr = sw_rings_base_iova + dev_sw_ring_size;
368 
369 		/* Check if the end of the sw ring memory block is before the
370 		 * start of next 64MB aligned mem address
371 		 */
372 		if (sw_ring_iova_end_addr < next_64mb_align_addr_iova) {
373 			d->sw_rings_iova = sw_rings_base_iova;
374 			d->sw_rings = sw_rings_base;
375 			d->sw_rings_base = sw_rings_base;
376 			d->sw_ring_size = q_sw_ring_size;
377 			d->sw_ring_max_depth = ACC100_MAX_QUEUE_DEPTH;
378 			break;
379 		}
380 		/* Store the address of the unaligned mem block */
381 		base_addrs[i] = sw_rings_base;
382 		i++;
383 	}
384 
385 	/* Free all unaligned blocks of mem allocated in the loop */
386 	free_base_addresses(base_addrs, i);
387 }
388 
389 /*
390  * Find queue_id of a device queue based on details from the Info Ring.
391  * If a queue isn't found UINT16_MAX is returned.
392  */
393 static inline uint16_t
get_queue_id_from_ring_info(struct rte_bbdev_data * data,const union acc100_info_ring_data ring_data)394 get_queue_id_from_ring_info(struct rte_bbdev_data *data,
395 		const union acc100_info_ring_data ring_data)
396 {
397 	uint16_t queue_id;
398 
399 	for (queue_id = 0; queue_id < data->num_queues; ++queue_id) {
400 		struct acc100_queue *acc100_q =
401 				data->queues[queue_id].queue_private;
402 		if (acc100_q != NULL && acc100_q->aq_id == ring_data.aq_id &&
403 				acc100_q->qgrp_id == ring_data.qg_id &&
404 				acc100_q->vf_id == ring_data.vf_id)
405 			return queue_id;
406 	}
407 
408 	return UINT16_MAX;
409 }
410 
411 /* Checks PF Info Ring to find the interrupt cause and handles it accordingly */
412 static inline void
acc100_check_ir(struct acc100_device * acc100_dev)413 acc100_check_ir(struct acc100_device *acc100_dev)
414 {
415 	volatile union acc100_info_ring_data *ring_data;
416 	uint16_t info_ring_head = acc100_dev->info_ring_head;
417 	if (acc100_dev->info_ring == NULL)
418 		return;
419 
420 	ring_data = acc100_dev->info_ring + (acc100_dev->info_ring_head &
421 			ACC100_INFO_RING_MASK);
422 
423 	while (ring_data->valid) {
424 		if ((ring_data->int_nb < ACC100_PF_INT_DMA_DL_DESC_IRQ) || (
425 				ring_data->int_nb >
426 				ACC100_PF_INT_DMA_DL5G_DESC_IRQ))
427 			rte_bbdev_log(WARNING, "InfoRing: ITR:%d Info:0x%x",
428 				ring_data->int_nb, ring_data->detailed_info);
429 		/* Initialize Info Ring entry and move forward */
430 		ring_data->val = 0;
431 		info_ring_head++;
432 		ring_data = acc100_dev->info_ring +
433 				(info_ring_head & ACC100_INFO_RING_MASK);
434 	}
435 }
436 
437 /* Checks PF Info Ring to find the interrupt cause and handles it accordingly */
438 static inline void
acc100_pf_interrupt_handler(struct rte_bbdev * dev)439 acc100_pf_interrupt_handler(struct rte_bbdev *dev)
440 {
441 	struct acc100_device *acc100_dev = dev->data->dev_private;
442 	volatile union acc100_info_ring_data *ring_data;
443 	struct acc100_deq_intr_details deq_intr_det;
444 
445 	ring_data = acc100_dev->info_ring + (acc100_dev->info_ring_head &
446 			ACC100_INFO_RING_MASK);
447 
448 	while (ring_data->valid) {
449 
450 		rte_bbdev_log_debug(
451 				"ACC100 PF Interrupt received, Info Ring data: 0x%x",
452 				ring_data->val);
453 
454 		switch (ring_data->int_nb) {
455 		case ACC100_PF_INT_DMA_DL_DESC_IRQ:
456 		case ACC100_PF_INT_DMA_UL_DESC_IRQ:
457 		case ACC100_PF_INT_DMA_UL5G_DESC_IRQ:
458 		case ACC100_PF_INT_DMA_DL5G_DESC_IRQ:
459 			deq_intr_det.queue_id = get_queue_id_from_ring_info(
460 					dev->data, *ring_data);
461 			if (deq_intr_det.queue_id == UINT16_MAX) {
462 				rte_bbdev_log(ERR,
463 						"Couldn't find queue: aq_id: %u, qg_id: %u, vf_id: %u",
464 						ring_data->aq_id,
465 						ring_data->qg_id,
466 						ring_data->vf_id);
467 				return;
468 			}
469 			rte_bbdev_pmd_callback_process(dev,
470 					RTE_BBDEV_EVENT_DEQUEUE, &deq_intr_det);
471 			break;
472 		default:
473 			rte_bbdev_pmd_callback_process(dev,
474 					RTE_BBDEV_EVENT_ERROR, NULL);
475 			break;
476 		}
477 
478 		/* Initialize Info Ring entry and move forward */
479 		ring_data->val = 0;
480 		++acc100_dev->info_ring_head;
481 		ring_data = acc100_dev->info_ring +
482 				(acc100_dev->info_ring_head &
483 				ACC100_INFO_RING_MASK);
484 	}
485 }
486 
487 /* Checks VF Info Ring to find the interrupt cause and handles it accordingly */
488 static inline void
acc100_vf_interrupt_handler(struct rte_bbdev * dev)489 acc100_vf_interrupt_handler(struct rte_bbdev *dev)
490 {
491 	struct acc100_device *acc100_dev = dev->data->dev_private;
492 	volatile union acc100_info_ring_data *ring_data;
493 	struct acc100_deq_intr_details deq_intr_det;
494 
495 	ring_data = acc100_dev->info_ring + (acc100_dev->info_ring_head &
496 			ACC100_INFO_RING_MASK);
497 
498 	while (ring_data->valid) {
499 
500 		rte_bbdev_log_debug(
501 				"ACC100 VF Interrupt received, Info Ring data: 0x%x",
502 				ring_data->val);
503 
504 		switch (ring_data->int_nb) {
505 		case ACC100_VF_INT_DMA_DL_DESC_IRQ:
506 		case ACC100_VF_INT_DMA_UL_DESC_IRQ:
507 		case ACC100_VF_INT_DMA_UL5G_DESC_IRQ:
508 		case ACC100_VF_INT_DMA_DL5G_DESC_IRQ:
509 			/* VFs are not aware of their vf_id - it's set to 0 in
510 			 * queue structures.
511 			 */
512 			ring_data->vf_id = 0;
513 			deq_intr_det.queue_id = get_queue_id_from_ring_info(
514 					dev->data, *ring_data);
515 			if (deq_intr_det.queue_id == UINT16_MAX) {
516 				rte_bbdev_log(ERR,
517 						"Couldn't find queue: aq_id: %u, qg_id: %u",
518 						ring_data->aq_id,
519 						ring_data->qg_id);
520 				return;
521 			}
522 			rte_bbdev_pmd_callback_process(dev,
523 					RTE_BBDEV_EVENT_DEQUEUE, &deq_intr_det);
524 			break;
525 		default:
526 			rte_bbdev_pmd_callback_process(dev,
527 					RTE_BBDEV_EVENT_ERROR, NULL);
528 			break;
529 		}
530 
531 		/* Initialize Info Ring entry and move forward */
532 		ring_data->valid = 0;
533 		++acc100_dev->info_ring_head;
534 		ring_data = acc100_dev->info_ring + (acc100_dev->info_ring_head
535 				& ACC100_INFO_RING_MASK);
536 	}
537 }
538 
539 /* Interrupt handler triggered by ACC100 dev for handling specific interrupt */
540 static void
acc100_dev_interrupt_handler(void * cb_arg)541 acc100_dev_interrupt_handler(void *cb_arg)
542 {
543 	struct rte_bbdev *dev = cb_arg;
544 	struct acc100_device *acc100_dev = dev->data->dev_private;
545 
546 	/* Read info ring */
547 	if (acc100_dev->pf_device)
548 		acc100_pf_interrupt_handler(dev);
549 	else
550 		acc100_vf_interrupt_handler(dev);
551 }
552 
553 /* Allocate and setup inforing */
554 static int
allocate_info_ring(struct rte_bbdev * dev)555 allocate_info_ring(struct rte_bbdev *dev)
556 {
557 	struct acc100_device *d = dev->data->dev_private;
558 	const struct acc100_registry_addr *reg_addr;
559 	rte_iova_t info_ring_iova;
560 	uint32_t phys_low, phys_high;
561 
562 	if (d->info_ring != NULL)
563 		return 0; /* Already configured */
564 
565 	/* Choose correct registry addresses for the device type */
566 	if (d->pf_device)
567 		reg_addr = &pf_reg_addr;
568 	else
569 		reg_addr = &vf_reg_addr;
570 	/* Allocate InfoRing */
571 	d->info_ring = rte_zmalloc_socket("Info Ring",
572 			ACC100_INFO_RING_NUM_ENTRIES *
573 			sizeof(*d->info_ring), RTE_CACHE_LINE_SIZE,
574 			dev->data->socket_id);
575 	if (d->info_ring == NULL) {
576 		rte_bbdev_log(ERR,
577 				"Failed to allocate Info Ring for %s:%u",
578 				dev->device->driver->name,
579 				dev->data->dev_id);
580 		return -ENOMEM;
581 	}
582 	info_ring_iova = rte_malloc_virt2iova(d->info_ring);
583 
584 	/* Setup Info Ring */
585 	phys_high = (uint32_t)(info_ring_iova >> 32);
586 	phys_low  = (uint32_t)(info_ring_iova);
587 	acc100_reg_write(d, reg_addr->info_ring_hi, phys_high);
588 	acc100_reg_write(d, reg_addr->info_ring_lo, phys_low);
589 	acc100_reg_write(d, reg_addr->info_ring_en, ACC100_REG_IRQ_EN_ALL);
590 	d->info_ring_head = (acc100_reg_read(d, reg_addr->info_ring_ptr) &
591 			0xFFF) / sizeof(union acc100_info_ring_data);
592 	return 0;
593 }
594 
595 
596 /* Allocate 64MB memory used for all software rings */
597 static int
acc100_setup_queues(struct rte_bbdev * dev,uint16_t num_queues,int socket_id)598 acc100_setup_queues(struct rte_bbdev *dev, uint16_t num_queues, int socket_id)
599 {
600 	uint32_t phys_low, phys_high, value;
601 	struct acc100_device *d = dev->data->dev_private;
602 	const struct acc100_registry_addr *reg_addr;
603 	int ret;
604 
605 	if (d->pf_device && !d->acc100_conf.pf_mode_en) {
606 		rte_bbdev_log(NOTICE,
607 				"%s has PF mode disabled. This PF can't be used.",
608 				dev->data->name);
609 		return -ENODEV;
610 	}
611 
612 	alloc_sw_rings_min_mem(dev, d, num_queues, socket_id);
613 
614 	/* If minimal memory space approach failed, then allocate
615 	 * the 2 * 64MB block for the sw rings
616 	 */
617 	if (d->sw_rings == NULL)
618 		alloc_2x64mb_sw_rings_mem(dev, d, socket_id);
619 
620 	if (d->sw_rings == NULL) {
621 		rte_bbdev_log(NOTICE,
622 				"Failure allocating sw_rings memory");
623 		return -ENODEV;
624 	}
625 
626 	/* Configure ACC100 with the base address for DMA descriptor rings
627 	 * Same descriptor rings used for UL and DL DMA Engines
628 	 * Note : Assuming only VF0 bundle is used for PF mode
629 	 */
630 	phys_high = (uint32_t)(d->sw_rings_iova >> 32);
631 	phys_low  = (uint32_t)(d->sw_rings_iova & ~(ACC100_SIZE_64MBYTE-1));
632 
633 	/* Choose correct registry addresses for the device type */
634 	if (d->pf_device)
635 		reg_addr = &pf_reg_addr;
636 	else
637 		reg_addr = &vf_reg_addr;
638 
639 	/* Read the populated cfg from ACC100 registers */
640 	fetch_acc100_config(dev);
641 
642 	/* Release AXI from PF */
643 	if (d->pf_device)
644 		acc100_reg_write(d, HWPfDmaAxiControl, 1);
645 
646 	acc100_reg_write(d, reg_addr->dma_ring_ul5g_hi, phys_high);
647 	acc100_reg_write(d, reg_addr->dma_ring_ul5g_lo, phys_low);
648 	acc100_reg_write(d, reg_addr->dma_ring_dl5g_hi, phys_high);
649 	acc100_reg_write(d, reg_addr->dma_ring_dl5g_lo, phys_low);
650 	acc100_reg_write(d, reg_addr->dma_ring_ul4g_hi, phys_high);
651 	acc100_reg_write(d, reg_addr->dma_ring_ul4g_lo, phys_low);
652 	acc100_reg_write(d, reg_addr->dma_ring_dl4g_hi, phys_high);
653 	acc100_reg_write(d, reg_addr->dma_ring_dl4g_lo, phys_low);
654 
655 	/*
656 	 * Configure Ring Size to the max queue ring size
657 	 * (used for wrapping purpose)
658 	 */
659 	value = log2_basic(d->sw_ring_size / 64);
660 	acc100_reg_write(d, reg_addr->ring_size, value);
661 
662 	/* Configure tail pointer for use when SDONE enabled */
663 	d->tail_ptrs = rte_zmalloc_socket(
664 			dev->device->driver->name,
665 			ACC100_NUM_QGRPS * ACC100_NUM_AQS * sizeof(uint32_t),
666 			RTE_CACHE_LINE_SIZE, socket_id);
667 	if (d->tail_ptrs == NULL) {
668 		rte_bbdev_log(ERR, "Failed to allocate tail ptr for %s:%u",
669 				dev->device->driver->name,
670 				dev->data->dev_id);
671 		rte_free(d->sw_rings);
672 		return -ENOMEM;
673 	}
674 	d->tail_ptr_iova = rte_malloc_virt2iova(d->tail_ptrs);
675 
676 	phys_high = (uint32_t)(d->tail_ptr_iova >> 32);
677 	phys_low  = (uint32_t)(d->tail_ptr_iova);
678 	acc100_reg_write(d, reg_addr->tail_ptrs_ul5g_hi, phys_high);
679 	acc100_reg_write(d, reg_addr->tail_ptrs_ul5g_lo, phys_low);
680 	acc100_reg_write(d, reg_addr->tail_ptrs_dl5g_hi, phys_high);
681 	acc100_reg_write(d, reg_addr->tail_ptrs_dl5g_lo, phys_low);
682 	acc100_reg_write(d, reg_addr->tail_ptrs_ul4g_hi, phys_high);
683 	acc100_reg_write(d, reg_addr->tail_ptrs_ul4g_lo, phys_low);
684 	acc100_reg_write(d, reg_addr->tail_ptrs_dl4g_hi, phys_high);
685 	acc100_reg_write(d, reg_addr->tail_ptrs_dl4g_lo, phys_low);
686 
687 	ret = allocate_info_ring(dev);
688 	if (ret < 0) {
689 		rte_bbdev_log(ERR, "Failed to allocate info_ring for %s:%u",
690 				dev->device->driver->name,
691 				dev->data->dev_id);
692 		/* Continue */
693 	}
694 
695 	d->harq_layout = rte_zmalloc_socket("HARQ Layout",
696 			ACC100_HARQ_LAYOUT * sizeof(*d->harq_layout),
697 			RTE_CACHE_LINE_SIZE, dev->data->socket_id);
698 	if (d->harq_layout == NULL) {
699 		rte_bbdev_log(ERR, "Failed to allocate harq_layout for %s:%u",
700 				dev->device->driver->name,
701 				dev->data->dev_id);
702 		rte_free(d->sw_rings);
703 		return -ENOMEM;
704 	}
705 
706 	/* Mark as configured properly */
707 	d->configured = true;
708 
709 	rte_bbdev_log_debug(
710 			"ACC100 (%s) configured  sw_rings = %p, sw_rings_iova = %#"
711 			PRIx64, dev->data->name, d->sw_rings, d->sw_rings_iova);
712 
713 	return 0;
714 }
715 
716 static int
acc100_intr_enable(struct rte_bbdev * dev)717 acc100_intr_enable(struct rte_bbdev *dev)
718 {
719 	int ret;
720 	struct acc100_device *d = dev->data->dev_private;
721 
722 	/* Only MSI are currently supported */
723 	if (dev->intr_handle->type == RTE_INTR_HANDLE_VFIO_MSI ||
724 			dev->intr_handle->type == RTE_INTR_HANDLE_UIO) {
725 
726 		ret = allocate_info_ring(dev);
727 		if (ret < 0) {
728 			rte_bbdev_log(ERR,
729 					"Couldn't allocate info ring for device: %s",
730 					dev->data->name);
731 			return ret;
732 		}
733 
734 		ret = rte_intr_enable(dev->intr_handle);
735 		if (ret < 0) {
736 			rte_bbdev_log(ERR,
737 					"Couldn't enable interrupts for device: %s",
738 					dev->data->name);
739 			rte_free(d->info_ring);
740 			return ret;
741 		}
742 		ret = rte_intr_callback_register(dev->intr_handle,
743 				acc100_dev_interrupt_handler, dev);
744 		if (ret < 0) {
745 			rte_bbdev_log(ERR,
746 					"Couldn't register interrupt callback for device: %s",
747 					dev->data->name);
748 			rte_free(d->info_ring);
749 			return ret;
750 		}
751 
752 		return 0;
753 	}
754 
755 	rte_bbdev_log(ERR, "ACC100 (%s) supports only VFIO MSI interrupts",
756 			dev->data->name);
757 	return -ENOTSUP;
758 }
759 
760 /* Free memory used for software rings */
761 static int
acc100_dev_close(struct rte_bbdev * dev)762 acc100_dev_close(struct rte_bbdev *dev)
763 {
764 	struct acc100_device *d = dev->data->dev_private;
765 	acc100_check_ir(d);
766 	if (d->sw_rings_base != NULL) {
767 		rte_free(d->tail_ptrs);
768 		rte_free(d->info_ring);
769 		rte_free(d->sw_rings_base);
770 		d->sw_rings_base = NULL;
771 	}
772 	/* Ensure all in flight HW transactions are completed */
773 	usleep(ACC100_LONG_WAIT);
774 	return 0;
775 }
776 
777 /**
778  * Report a ACC100 queue index which is free
779  * Return 0 to 16k for a valid queue_idx or -1 when no queue is available
780  * Note : Only supporting VF0 Bundle for PF mode
781  */
782 static int
acc100_find_free_queue_idx(struct rte_bbdev * dev,const struct rte_bbdev_queue_conf * conf)783 acc100_find_free_queue_idx(struct rte_bbdev *dev,
784 		const struct rte_bbdev_queue_conf *conf)
785 {
786 	struct acc100_device *d = dev->data->dev_private;
787 	int op_2_acc[5] = {0, UL_4G, DL_4G, UL_5G, DL_5G};
788 	int acc = op_2_acc[conf->op_type];
789 	struct rte_acc100_queue_topology *qtop = NULL;
790 
791 	qtopFromAcc(&qtop, acc, &(d->acc100_conf));
792 	if (qtop == NULL)
793 		return -1;
794 	/* Identify matching QGroup Index which are sorted in priority order */
795 	uint16_t group_idx = qtop->first_qgroup_index;
796 	group_idx += conf->priority;
797 	if (group_idx >= ACC100_NUM_QGRPS ||
798 			conf->priority >= qtop->num_qgroups) {
799 		rte_bbdev_log(INFO, "Invalid Priority on %s, priority %u",
800 				dev->data->name, conf->priority);
801 		return -1;
802 	}
803 	/* Find a free AQ_idx  */
804 	uint16_t aq_idx;
805 	for (aq_idx = 0; aq_idx < qtop->num_aqs_per_groups; aq_idx++) {
806 		if (((d->q_assigned_bit_map[group_idx] >> aq_idx) & 0x1) == 0) {
807 			/* Mark the Queue as assigned */
808 			d->q_assigned_bit_map[group_idx] |= (1 << aq_idx);
809 			/* Report the AQ Index */
810 			return (group_idx << ACC100_GRP_ID_SHIFT) + aq_idx;
811 		}
812 	}
813 	rte_bbdev_log(INFO, "Failed to find free queue on %s, priority %u",
814 			dev->data->name, conf->priority);
815 	return -1;
816 }
817 
818 /* Setup ACC100 queue */
819 static int
acc100_queue_setup(struct rte_bbdev * dev,uint16_t queue_id,const struct rte_bbdev_queue_conf * conf)820 acc100_queue_setup(struct rte_bbdev *dev, uint16_t queue_id,
821 		const struct rte_bbdev_queue_conf *conf)
822 {
823 	struct acc100_device *d = dev->data->dev_private;
824 	struct acc100_queue *q;
825 	int16_t q_idx;
826 
827 	/* Allocate the queue data structure. */
828 	q = rte_zmalloc_socket(dev->device->driver->name, sizeof(*q),
829 			RTE_CACHE_LINE_SIZE, conf->socket);
830 	if (q == NULL) {
831 		rte_bbdev_log(ERR, "Failed to allocate queue memory");
832 		return -ENOMEM;
833 	}
834 	if (d == NULL) {
835 		rte_bbdev_log(ERR, "Undefined device");
836 		return -ENODEV;
837 	}
838 
839 	q->d = d;
840 	q->ring_addr = RTE_PTR_ADD(d->sw_rings, (d->sw_ring_size * queue_id));
841 	q->ring_addr_iova = d->sw_rings_iova + (d->sw_ring_size * queue_id);
842 
843 	/* Prepare the Ring with default descriptor format */
844 	union acc100_dma_desc *desc = NULL;
845 	unsigned int desc_idx, b_idx;
846 	int fcw_len = (conf->op_type == RTE_BBDEV_OP_LDPC_ENC ?
847 		ACC100_FCW_LE_BLEN : (conf->op_type == RTE_BBDEV_OP_TURBO_DEC ?
848 		ACC100_FCW_TD_BLEN : ACC100_FCW_LD_BLEN));
849 
850 	for (desc_idx = 0; desc_idx < d->sw_ring_max_depth; desc_idx++) {
851 		desc = q->ring_addr + desc_idx;
852 		desc->req.word0 = ACC100_DMA_DESC_TYPE;
853 		desc->req.word1 = 0; /**< Timestamp */
854 		desc->req.word2 = 0;
855 		desc->req.word3 = 0;
856 		uint64_t fcw_offset = (desc_idx << 8) + ACC100_DESC_FCW_OFFSET;
857 		desc->req.data_ptrs[0].address = q->ring_addr_iova + fcw_offset;
858 		desc->req.data_ptrs[0].blen = fcw_len;
859 		desc->req.data_ptrs[0].blkid = ACC100_DMA_BLKID_FCW;
860 		desc->req.data_ptrs[0].last = 0;
861 		desc->req.data_ptrs[0].dma_ext = 0;
862 		for (b_idx = 1; b_idx < ACC100_DMA_MAX_NUM_POINTERS - 1;
863 				b_idx++) {
864 			desc->req.data_ptrs[b_idx].blkid = ACC100_DMA_BLKID_IN;
865 			desc->req.data_ptrs[b_idx].last = 1;
866 			desc->req.data_ptrs[b_idx].dma_ext = 0;
867 			b_idx++;
868 			desc->req.data_ptrs[b_idx].blkid =
869 					ACC100_DMA_BLKID_OUT_ENC;
870 			desc->req.data_ptrs[b_idx].last = 1;
871 			desc->req.data_ptrs[b_idx].dma_ext = 0;
872 		}
873 		/* Preset some fields of LDPC FCW */
874 		desc->req.fcw_ld.FCWversion = ACC100_FCW_VER;
875 		desc->req.fcw_ld.gain_i = 1;
876 		desc->req.fcw_ld.gain_h = 1;
877 	}
878 
879 	q->lb_in = rte_zmalloc_socket(dev->device->driver->name,
880 			RTE_CACHE_LINE_SIZE,
881 			RTE_CACHE_LINE_SIZE, conf->socket);
882 	if (q->lb_in == NULL) {
883 		rte_bbdev_log(ERR, "Failed to allocate lb_in memory");
884 		rte_free(q);
885 		return -ENOMEM;
886 	}
887 	q->lb_in_addr_iova = rte_malloc_virt2iova(q->lb_in);
888 	q->lb_out = rte_zmalloc_socket(dev->device->driver->name,
889 			RTE_CACHE_LINE_SIZE,
890 			RTE_CACHE_LINE_SIZE, conf->socket);
891 	if (q->lb_out == NULL) {
892 		rte_bbdev_log(ERR, "Failed to allocate lb_out memory");
893 		rte_free(q->lb_in);
894 		rte_free(q);
895 		return -ENOMEM;
896 	}
897 	q->lb_out_addr_iova = rte_malloc_virt2iova(q->lb_out);
898 
899 	/*
900 	 * Software queue ring wraps synchronously with the HW when it reaches
901 	 * the boundary of the maximum allocated queue size, no matter what the
902 	 * sw queue size is. This wrapping is guarded by setting the wrap_mask
903 	 * to represent the maximum queue size as allocated at the time when
904 	 * the device has been setup (in configure()).
905 	 *
906 	 * The queue depth is set to the queue size value (conf->queue_size).
907 	 * This limits the occupancy of the queue at any point of time, so that
908 	 * the queue does not get swamped with enqueue requests.
909 	 */
910 	q->sw_ring_depth = conf->queue_size;
911 	q->sw_ring_wrap_mask = d->sw_ring_max_depth - 1;
912 
913 	q->op_type = conf->op_type;
914 
915 	q_idx = acc100_find_free_queue_idx(dev, conf);
916 	if (q_idx == -1) {
917 		rte_free(q->lb_in);
918 		rte_free(q->lb_out);
919 		rte_free(q);
920 		return -1;
921 	}
922 
923 	q->qgrp_id = (q_idx >> ACC100_GRP_ID_SHIFT) & 0xF;
924 	q->vf_id = (q_idx >> ACC100_VF_ID_SHIFT)  & 0x3F;
925 	q->aq_id = q_idx & 0xF;
926 	q->aq_depth = (conf->op_type ==  RTE_BBDEV_OP_TURBO_DEC) ?
927 			(1 << d->acc100_conf.q_ul_4g.aq_depth_log2) :
928 			(1 << d->acc100_conf.q_dl_4g.aq_depth_log2);
929 
930 	q->mmio_reg_enqueue = RTE_PTR_ADD(d->mmio_base,
931 			queue_offset(d->pf_device,
932 					q->vf_id, q->qgrp_id, q->aq_id));
933 
934 	rte_bbdev_log_debug(
935 			"Setup dev%u q%u: qgrp_id=%u, vf_id=%u, aq_id=%u, aq_depth=%u, mmio_reg_enqueue=%p",
936 			dev->data->dev_id, queue_id, q->qgrp_id, q->vf_id,
937 			q->aq_id, q->aq_depth, q->mmio_reg_enqueue);
938 
939 	dev->data->queues[queue_id].queue_private = q;
940 	return 0;
941 }
942 
943 /* Release ACC100 queue */
944 static int
acc100_queue_release(struct rte_bbdev * dev,uint16_t q_id)945 acc100_queue_release(struct rte_bbdev *dev, uint16_t q_id)
946 {
947 	struct acc100_device *d = dev->data->dev_private;
948 	struct acc100_queue *q = dev->data->queues[q_id].queue_private;
949 
950 	if (q != NULL) {
951 		/* Mark the Queue as un-assigned */
952 		d->q_assigned_bit_map[q->qgrp_id] &= (0xFFFFFFFF -
953 				(1 << q->aq_id));
954 		rte_free(q->lb_in);
955 		rte_free(q->lb_out);
956 		rte_free(q);
957 		dev->data->queues[q_id].queue_private = NULL;
958 	}
959 
960 	return 0;
961 }
962 
963 /* Get ACC100 device info */
964 static void
acc100_dev_info_get(struct rte_bbdev * dev,struct rte_bbdev_driver_info * dev_info)965 acc100_dev_info_get(struct rte_bbdev *dev,
966 		struct rte_bbdev_driver_info *dev_info)
967 {
968 	struct acc100_device *d = dev->data->dev_private;
969 
970 	static const struct rte_bbdev_op_cap bbdev_capabilities[] = {
971 		{
972 			.type = RTE_BBDEV_OP_TURBO_DEC,
973 			.cap.turbo_dec = {
974 				.capability_flags =
975 					RTE_BBDEV_TURBO_SUBBLOCK_DEINTERLEAVE |
976 					RTE_BBDEV_TURBO_CRC_TYPE_24B |
977 					RTE_BBDEV_TURBO_HALF_ITERATION_EVEN |
978 					RTE_BBDEV_TURBO_EARLY_TERMINATION |
979 					RTE_BBDEV_TURBO_DEC_INTERRUPTS |
980 					RTE_BBDEV_TURBO_NEG_LLR_1_BIT_IN |
981 					RTE_BBDEV_TURBO_MAP_DEC |
982 					RTE_BBDEV_TURBO_DEC_TB_CRC_24B_KEEP |
983 					RTE_BBDEV_TURBO_DEC_SCATTER_GATHER,
984 				.max_llr_modulus = INT8_MAX,
985 				.num_buffers_src =
986 						RTE_BBDEV_TURBO_MAX_CODE_BLOCKS,
987 				.num_buffers_hard_out =
988 						RTE_BBDEV_TURBO_MAX_CODE_BLOCKS,
989 				.num_buffers_soft_out =
990 						RTE_BBDEV_TURBO_MAX_CODE_BLOCKS,
991 			}
992 		},
993 		{
994 			.type = RTE_BBDEV_OP_TURBO_ENC,
995 			.cap.turbo_enc = {
996 				.capability_flags =
997 					RTE_BBDEV_TURBO_CRC_24B_ATTACH |
998 					RTE_BBDEV_TURBO_RV_INDEX_BYPASS |
999 					RTE_BBDEV_TURBO_RATE_MATCH |
1000 					RTE_BBDEV_TURBO_ENC_INTERRUPTS |
1001 					RTE_BBDEV_TURBO_ENC_SCATTER_GATHER,
1002 				.num_buffers_src =
1003 						RTE_BBDEV_TURBO_MAX_CODE_BLOCKS,
1004 				.num_buffers_dst =
1005 						RTE_BBDEV_TURBO_MAX_CODE_BLOCKS,
1006 			}
1007 		},
1008 		{
1009 			.type   = RTE_BBDEV_OP_LDPC_ENC,
1010 			.cap.ldpc_enc = {
1011 				.capability_flags =
1012 					RTE_BBDEV_LDPC_RATE_MATCH |
1013 					RTE_BBDEV_LDPC_CRC_24B_ATTACH |
1014 					RTE_BBDEV_LDPC_INTERLEAVER_BYPASS |
1015 					RTE_BBDEV_LDPC_ENC_INTERRUPTS,
1016 				.num_buffers_src =
1017 						RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
1018 				.num_buffers_dst =
1019 						RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
1020 			}
1021 		},
1022 		{
1023 			.type   = RTE_BBDEV_OP_LDPC_DEC,
1024 			.cap.ldpc_dec = {
1025 			.capability_flags =
1026 				RTE_BBDEV_LDPC_CRC_TYPE_24B_CHECK |
1027 				RTE_BBDEV_LDPC_CRC_TYPE_24B_DROP |
1028 				RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE |
1029 				RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE |
1030 #ifdef ACC100_EXT_MEM
1031 				RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK |
1032 				RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_IN_ENABLE |
1033 				RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_OUT_ENABLE |
1034 #endif
1035 				RTE_BBDEV_LDPC_ITERATION_STOP_ENABLE |
1036 				RTE_BBDEV_LDPC_DEINTERLEAVER_BYPASS |
1037 				RTE_BBDEV_LDPC_DECODE_BYPASS |
1038 				RTE_BBDEV_LDPC_DEC_SCATTER_GATHER |
1039 				RTE_BBDEV_LDPC_HARQ_6BIT_COMPRESSION |
1040 				RTE_BBDEV_LDPC_LLR_COMPRESSION |
1041 				RTE_BBDEV_LDPC_DEC_INTERRUPTS,
1042 			.llr_size = 8,
1043 			.llr_decimals = 1,
1044 			.num_buffers_src =
1045 					RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
1046 			.num_buffers_hard_out =
1047 					RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
1048 			.num_buffers_soft_out = 0,
1049 			}
1050 		},
1051 		RTE_BBDEV_END_OF_CAPABILITIES_LIST()
1052 	};
1053 
1054 	static struct rte_bbdev_queue_conf default_queue_conf;
1055 	default_queue_conf.socket = dev->data->socket_id;
1056 	default_queue_conf.queue_size = ACC100_MAX_QUEUE_DEPTH;
1057 
1058 	dev_info->driver_name = dev->device->driver->name;
1059 
1060 	/* Read and save the populated config from ACC100 registers */
1061 	fetch_acc100_config(dev);
1062 
1063 	/* This isn't ideal because it reports the maximum number of queues but
1064 	 * does not provide info on how many can be uplink/downlink or different
1065 	 * priorities
1066 	 */
1067 	dev_info->max_num_queues =
1068 			d->acc100_conf.q_dl_5g.num_aqs_per_groups *
1069 			d->acc100_conf.q_dl_5g.num_qgroups +
1070 			d->acc100_conf.q_ul_5g.num_aqs_per_groups *
1071 			d->acc100_conf.q_ul_5g.num_qgroups +
1072 			d->acc100_conf.q_dl_4g.num_aqs_per_groups *
1073 			d->acc100_conf.q_dl_4g.num_qgroups +
1074 			d->acc100_conf.q_ul_4g.num_aqs_per_groups *
1075 			d->acc100_conf.q_ul_4g.num_qgroups;
1076 	dev_info->queue_size_lim = ACC100_MAX_QUEUE_DEPTH;
1077 	dev_info->hardware_accelerated = true;
1078 	dev_info->max_dl_queue_priority =
1079 			d->acc100_conf.q_dl_4g.num_qgroups - 1;
1080 	dev_info->max_ul_queue_priority =
1081 			d->acc100_conf.q_ul_4g.num_qgroups - 1;
1082 	dev_info->default_queue_conf = default_queue_conf;
1083 	dev_info->cpu_flag_reqs = NULL;
1084 	dev_info->min_alignment = 64;
1085 	dev_info->capabilities = bbdev_capabilities;
1086 #ifdef ACC100_EXT_MEM
1087 	dev_info->harq_buffer_size = d->ddr_size;
1088 #else
1089 	dev_info->harq_buffer_size = 0;
1090 #endif
1091 	acc100_check_ir(d);
1092 }
1093 
1094 static int
acc100_queue_intr_enable(struct rte_bbdev * dev,uint16_t queue_id)1095 acc100_queue_intr_enable(struct rte_bbdev *dev, uint16_t queue_id)
1096 {
1097 	struct acc100_queue *q = dev->data->queues[queue_id].queue_private;
1098 
1099 	if (dev->intr_handle->type != RTE_INTR_HANDLE_VFIO_MSI &&
1100 			dev->intr_handle->type != RTE_INTR_HANDLE_UIO)
1101 		return -ENOTSUP;
1102 
1103 	q->irq_enable = 1;
1104 	return 0;
1105 }
1106 
1107 static int
acc100_queue_intr_disable(struct rte_bbdev * dev,uint16_t queue_id)1108 acc100_queue_intr_disable(struct rte_bbdev *dev, uint16_t queue_id)
1109 {
1110 	struct acc100_queue *q = dev->data->queues[queue_id].queue_private;
1111 
1112 	if (dev->intr_handle->type != RTE_INTR_HANDLE_VFIO_MSI &&
1113 			dev->intr_handle->type != RTE_INTR_HANDLE_UIO)
1114 		return -ENOTSUP;
1115 
1116 	q->irq_enable = 0;
1117 	return 0;
1118 }
1119 
1120 static const struct rte_bbdev_ops acc100_bbdev_ops = {
1121 	.setup_queues = acc100_setup_queues,
1122 	.intr_enable = acc100_intr_enable,
1123 	.close = acc100_dev_close,
1124 	.info_get = acc100_dev_info_get,
1125 	.queue_setup = acc100_queue_setup,
1126 	.queue_release = acc100_queue_release,
1127 	.queue_intr_enable = acc100_queue_intr_enable,
1128 	.queue_intr_disable = acc100_queue_intr_disable
1129 };
1130 
1131 /* ACC100 PCI PF address map */
1132 static struct rte_pci_id pci_id_acc100_pf_map[] = {
1133 	{
1134 		RTE_PCI_DEVICE(RTE_ACC100_VENDOR_ID, RTE_ACC100_PF_DEVICE_ID)
1135 	},
1136 	{.device_id = 0},
1137 };
1138 
1139 /* ACC100 PCI VF address map */
1140 static struct rte_pci_id pci_id_acc100_vf_map[] = {
1141 	{
1142 		RTE_PCI_DEVICE(RTE_ACC100_VENDOR_ID, RTE_ACC100_VF_DEVICE_ID)
1143 	},
1144 	{.device_id = 0},
1145 };
1146 
1147 /* Read flag value 0/1 from bitmap */
1148 static inline bool
check_bit(uint32_t bitmap,uint32_t bitmask)1149 check_bit(uint32_t bitmap, uint32_t bitmask)
1150 {
1151 	return bitmap & bitmask;
1152 }
1153 
1154 static inline char *
mbuf_append(struct rte_mbuf * m_head,struct rte_mbuf * m,uint16_t len)1155 mbuf_append(struct rte_mbuf *m_head, struct rte_mbuf *m, uint16_t len)
1156 {
1157 	if (unlikely(len > rte_pktmbuf_tailroom(m)))
1158 		return NULL;
1159 
1160 	char *tail = (char *)m->buf_addr + m->data_off + m->data_len;
1161 	m->data_len = (uint16_t)(m->data_len + len);
1162 	m_head->pkt_len  = (m_head->pkt_len + len);
1163 	return tail;
1164 }
1165 
1166 /* Fill in a frame control word for turbo encoding. */
1167 static inline void
acc100_fcw_te_fill(const struct rte_bbdev_enc_op * op,struct acc100_fcw_te * fcw)1168 acc100_fcw_te_fill(const struct rte_bbdev_enc_op *op, struct acc100_fcw_te *fcw)
1169 {
1170 	fcw->code_block_mode = op->turbo_enc.code_block_mode;
1171 	if (fcw->code_block_mode == 0) { /* For TB mode */
1172 		fcw->k_neg = op->turbo_enc.tb_params.k_neg;
1173 		fcw->k_pos = op->turbo_enc.tb_params.k_pos;
1174 		fcw->c_neg = op->turbo_enc.tb_params.c_neg;
1175 		fcw->c = op->turbo_enc.tb_params.c;
1176 		fcw->ncb_neg = op->turbo_enc.tb_params.ncb_neg;
1177 		fcw->ncb_pos = op->turbo_enc.tb_params.ncb_pos;
1178 
1179 		if (check_bit(op->turbo_enc.op_flags,
1180 				RTE_BBDEV_TURBO_RATE_MATCH)) {
1181 			fcw->bypass_rm = 0;
1182 			fcw->cab = op->turbo_enc.tb_params.cab;
1183 			fcw->ea = op->turbo_enc.tb_params.ea;
1184 			fcw->eb = op->turbo_enc.tb_params.eb;
1185 		} else {
1186 			/* E is set to the encoding output size when RM is
1187 			 * bypassed.
1188 			 */
1189 			fcw->bypass_rm = 1;
1190 			fcw->cab = fcw->c_neg;
1191 			fcw->ea = 3 * fcw->k_neg + 12;
1192 			fcw->eb = 3 * fcw->k_pos + 12;
1193 		}
1194 	} else { /* For CB mode */
1195 		fcw->k_pos = op->turbo_enc.cb_params.k;
1196 		fcw->ncb_pos = op->turbo_enc.cb_params.ncb;
1197 
1198 		if (check_bit(op->turbo_enc.op_flags,
1199 				RTE_BBDEV_TURBO_RATE_MATCH)) {
1200 			fcw->bypass_rm = 0;
1201 			fcw->eb = op->turbo_enc.cb_params.e;
1202 		} else {
1203 			/* E is set to the encoding output size when RM is
1204 			 * bypassed.
1205 			 */
1206 			fcw->bypass_rm = 1;
1207 			fcw->eb = 3 * fcw->k_pos + 12;
1208 		}
1209 	}
1210 
1211 	fcw->bypass_rv_idx1 = check_bit(op->turbo_enc.op_flags,
1212 			RTE_BBDEV_TURBO_RV_INDEX_BYPASS);
1213 	fcw->code_block_crc = check_bit(op->turbo_enc.op_flags,
1214 			RTE_BBDEV_TURBO_CRC_24B_ATTACH);
1215 	fcw->rv_idx1 = op->turbo_enc.rv_index;
1216 }
1217 
1218 /* Compute value of k0.
1219  * Based on 3GPP 38.212 Table 5.4.2.1-2
1220  * Starting position of different redundancy versions, k0
1221  */
1222 static inline uint16_t
get_k0(uint16_t n_cb,uint16_t z_c,uint8_t bg,uint8_t rv_index)1223 get_k0(uint16_t n_cb, uint16_t z_c, uint8_t bg, uint8_t rv_index)
1224 {
1225 	if (rv_index == 0)
1226 		return 0;
1227 	uint16_t n = (bg == 1 ? ACC100_N_ZC_1 : ACC100_N_ZC_2) * z_c;
1228 	if (n_cb == n) {
1229 		if (rv_index == 1)
1230 			return (bg == 1 ? ACC100_K0_1_1 : ACC100_K0_1_2) * z_c;
1231 		else if (rv_index == 2)
1232 			return (bg == 1 ? ACC100_K0_2_1 : ACC100_K0_2_2) * z_c;
1233 		else
1234 			return (bg == 1 ? ACC100_K0_3_1 : ACC100_K0_3_2) * z_c;
1235 	}
1236 	/* LBRM case - includes a division by N */
1237 	if (rv_index == 1)
1238 		return (((bg == 1 ? ACC100_K0_1_1 : ACC100_K0_1_2) * n_cb)
1239 				/ n) * z_c;
1240 	else if (rv_index == 2)
1241 		return (((bg == 1 ? ACC100_K0_2_1 : ACC100_K0_2_2) * n_cb)
1242 				/ n) * z_c;
1243 	else
1244 		return (((bg == 1 ? ACC100_K0_3_1 : ACC100_K0_3_2) * n_cb)
1245 				/ n) * z_c;
1246 }
1247 
1248 /* Fill in a frame control word for LDPC encoding. */
1249 static inline void
acc100_fcw_le_fill(const struct rte_bbdev_enc_op * op,struct acc100_fcw_le * fcw,int num_cb)1250 acc100_fcw_le_fill(const struct rte_bbdev_enc_op *op,
1251 		struct acc100_fcw_le *fcw, int num_cb)
1252 {
1253 	fcw->qm = op->ldpc_enc.q_m;
1254 	fcw->nfiller = op->ldpc_enc.n_filler;
1255 	fcw->BG = (op->ldpc_enc.basegraph - 1);
1256 	fcw->Zc = op->ldpc_enc.z_c;
1257 	fcw->ncb = op->ldpc_enc.n_cb;
1258 	fcw->k0 = get_k0(fcw->ncb, fcw->Zc, op->ldpc_enc.basegraph,
1259 			op->ldpc_enc.rv_index);
1260 	fcw->rm_e = op->ldpc_enc.cb_params.e;
1261 	fcw->crc_select = check_bit(op->ldpc_enc.op_flags,
1262 			RTE_BBDEV_LDPC_CRC_24B_ATTACH);
1263 	fcw->bypass_intlv = check_bit(op->ldpc_enc.op_flags,
1264 			RTE_BBDEV_LDPC_INTERLEAVER_BYPASS);
1265 	fcw->mcb_count = num_cb;
1266 }
1267 
1268 /* Fill in a frame control word for turbo decoding. */
1269 static inline void
acc100_fcw_td_fill(const struct rte_bbdev_dec_op * op,struct acc100_fcw_td * fcw)1270 acc100_fcw_td_fill(const struct rte_bbdev_dec_op *op, struct acc100_fcw_td *fcw)
1271 {
1272 	/* Note : Early termination is always enabled for 4GUL */
1273 	fcw->fcw_ver = 1;
1274 	if (op->turbo_dec.code_block_mode == 0)
1275 		fcw->k_pos = op->turbo_dec.tb_params.k_pos;
1276 	else
1277 		fcw->k_pos = op->turbo_dec.cb_params.k;
1278 	fcw->turbo_crc_type = check_bit(op->turbo_dec.op_flags,
1279 			RTE_BBDEV_TURBO_CRC_TYPE_24B);
1280 	fcw->bypass_sb_deint = 0;
1281 	fcw->raw_decoder_input_on = 0;
1282 	fcw->max_iter = op->turbo_dec.iter_max;
1283 	fcw->half_iter_on = !check_bit(op->turbo_dec.op_flags,
1284 			RTE_BBDEV_TURBO_HALF_ITERATION_EVEN);
1285 }
1286 
1287 /* Fill in a frame control word for LDPC decoding. */
1288 static inline void
acc100_fcw_ld_fill(const struct rte_bbdev_dec_op * op,struct acc100_fcw_ld * fcw,union acc100_harq_layout_data * harq_layout)1289 acc100_fcw_ld_fill(const struct rte_bbdev_dec_op *op, struct acc100_fcw_ld *fcw,
1290 		union acc100_harq_layout_data *harq_layout)
1291 {
1292 	uint16_t harq_out_length, harq_in_length, ncb_p, k0_p, parity_offset;
1293 	uint16_t harq_index;
1294 	uint32_t l;
1295 	bool harq_prun = false;
1296 
1297 	fcw->qm = op->ldpc_dec.q_m;
1298 	fcw->nfiller = op->ldpc_dec.n_filler;
1299 	fcw->BG = (op->ldpc_dec.basegraph - 1);
1300 	fcw->Zc = op->ldpc_dec.z_c;
1301 	fcw->ncb = op->ldpc_dec.n_cb;
1302 	fcw->k0 = get_k0(fcw->ncb, fcw->Zc, op->ldpc_dec.basegraph,
1303 			op->ldpc_dec.rv_index);
1304 	if (op->ldpc_dec.code_block_mode == 1)
1305 		fcw->rm_e = op->ldpc_dec.cb_params.e;
1306 	else
1307 		fcw->rm_e = (op->ldpc_dec.tb_params.r <
1308 				op->ldpc_dec.tb_params.cab) ?
1309 						op->ldpc_dec.tb_params.ea :
1310 						op->ldpc_dec.tb_params.eb;
1311 
1312 	fcw->hcin_en = check_bit(op->ldpc_dec.op_flags,
1313 			RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE);
1314 	fcw->hcout_en = check_bit(op->ldpc_dec.op_flags,
1315 			RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE);
1316 	fcw->crc_select = check_bit(op->ldpc_dec.op_flags,
1317 			RTE_BBDEV_LDPC_CRC_TYPE_24B_CHECK);
1318 	fcw->bypass_dec = check_bit(op->ldpc_dec.op_flags,
1319 			RTE_BBDEV_LDPC_DECODE_BYPASS);
1320 	fcw->bypass_intlv = check_bit(op->ldpc_dec.op_flags,
1321 			RTE_BBDEV_LDPC_DEINTERLEAVER_BYPASS);
1322 	if (op->ldpc_dec.q_m == 1) {
1323 		fcw->bypass_intlv = 1;
1324 		fcw->qm = 2;
1325 	}
1326 	fcw->hcin_decomp_mode = check_bit(op->ldpc_dec.op_flags,
1327 			RTE_BBDEV_LDPC_HARQ_6BIT_COMPRESSION);
1328 	fcw->hcout_comp_mode = check_bit(op->ldpc_dec.op_flags,
1329 			RTE_BBDEV_LDPC_HARQ_6BIT_COMPRESSION);
1330 	fcw->llr_pack_mode = check_bit(op->ldpc_dec.op_flags,
1331 			RTE_BBDEV_LDPC_LLR_COMPRESSION);
1332 	harq_index = op->ldpc_dec.harq_combined_output.offset /
1333 			ACC100_HARQ_OFFSET;
1334 #ifdef ACC100_EXT_MEM
1335 	/* Limit cases when HARQ pruning is valid */
1336 	harq_prun = ((op->ldpc_dec.harq_combined_output.offset %
1337 			ACC100_HARQ_OFFSET) == 0) &&
1338 			(op->ldpc_dec.harq_combined_output.offset <= UINT16_MAX
1339 			* ACC100_HARQ_OFFSET);
1340 #endif
1341 	if (fcw->hcin_en > 0) {
1342 		harq_in_length = op->ldpc_dec.harq_combined_input.length;
1343 		if (fcw->hcin_decomp_mode > 0)
1344 			harq_in_length = harq_in_length * 8 / 6;
1345 		harq_in_length = RTE_ALIGN(harq_in_length, 64);
1346 		if ((harq_layout[harq_index].offset > 0) & harq_prun) {
1347 			rte_bbdev_log_debug("HARQ IN offset unexpected for now\n");
1348 			fcw->hcin_size0 = harq_layout[harq_index].size0;
1349 			fcw->hcin_offset = harq_layout[harq_index].offset;
1350 			fcw->hcin_size1 = harq_in_length -
1351 					harq_layout[harq_index].offset;
1352 		} else {
1353 			fcw->hcin_size0 = harq_in_length;
1354 			fcw->hcin_offset = 0;
1355 			fcw->hcin_size1 = 0;
1356 		}
1357 	} else {
1358 		fcw->hcin_size0 = 0;
1359 		fcw->hcin_offset = 0;
1360 		fcw->hcin_size1 = 0;
1361 	}
1362 
1363 	fcw->itmax = op->ldpc_dec.iter_max;
1364 	fcw->itstop = check_bit(op->ldpc_dec.op_flags,
1365 			RTE_BBDEV_LDPC_ITERATION_STOP_ENABLE);
1366 	fcw->synd_precoder = fcw->itstop;
1367 	/*
1368 	 * These are all implicitly set
1369 	 * fcw->synd_post = 0;
1370 	 * fcw->so_en = 0;
1371 	 * fcw->so_bypass_rm = 0;
1372 	 * fcw->so_bypass_intlv = 0;
1373 	 * fcw->dec_convllr = 0;
1374 	 * fcw->hcout_convllr = 0;
1375 	 * fcw->hcout_size1 = 0;
1376 	 * fcw->so_it = 0;
1377 	 * fcw->hcout_offset = 0;
1378 	 * fcw->negstop_th = 0;
1379 	 * fcw->negstop_it = 0;
1380 	 * fcw->negstop_en = 0;
1381 	 * fcw->gain_i = 1;
1382 	 * fcw->gain_h = 1;
1383 	 */
1384 	if (fcw->hcout_en > 0) {
1385 		parity_offset = (op->ldpc_dec.basegraph == 1 ? 20 : 8)
1386 			* op->ldpc_dec.z_c - op->ldpc_dec.n_filler;
1387 		k0_p = (fcw->k0 > parity_offset) ?
1388 				fcw->k0 - op->ldpc_dec.n_filler : fcw->k0;
1389 		ncb_p = fcw->ncb - op->ldpc_dec.n_filler;
1390 		l = k0_p + fcw->rm_e;
1391 		harq_out_length = (uint16_t) fcw->hcin_size0;
1392 		harq_out_length = RTE_MIN(RTE_MAX(harq_out_length, l), ncb_p);
1393 		harq_out_length = (harq_out_length + 0x3F) & 0xFFC0;
1394 		if ((k0_p > fcw->hcin_size0 + ACC100_HARQ_OFFSET_THRESHOLD) &&
1395 				harq_prun) {
1396 			fcw->hcout_size0 = (uint16_t) fcw->hcin_size0;
1397 			fcw->hcout_offset = k0_p & 0xFFC0;
1398 			fcw->hcout_size1 = harq_out_length - fcw->hcout_offset;
1399 		} else {
1400 			fcw->hcout_size0 = harq_out_length;
1401 			fcw->hcout_size1 = 0;
1402 			fcw->hcout_offset = 0;
1403 		}
1404 		harq_layout[harq_index].offset = fcw->hcout_offset;
1405 		harq_layout[harq_index].size0 = fcw->hcout_size0;
1406 	} else {
1407 		fcw->hcout_size0 = 0;
1408 		fcw->hcout_size1 = 0;
1409 		fcw->hcout_offset = 0;
1410 	}
1411 }
1412 
1413 /**
1414  * Fills descriptor with data pointers of one block type.
1415  *
1416  * @param desc
1417  *   Pointer to DMA descriptor.
1418  * @param input
1419  *   Pointer to pointer to input data which will be encoded. It can be changed
1420  *   and points to next segment in scatter-gather case.
1421  * @param offset
1422  *   Input offset in rte_mbuf structure. It is used for calculating the point
1423  *   where data is starting.
1424  * @param cb_len
1425  *   Length of currently processed Code Block
1426  * @param seg_total_left
1427  *   It indicates how many bytes still left in segment (mbuf) for further
1428  *   processing.
1429  * @param op_flags
1430  *   Store information about device capabilities
1431  * @param next_triplet
1432  *   Index for ACC100 DMA Descriptor triplet
1433  *
1434  * @return
1435  *   Returns index of next triplet on success, other value if lengths of
1436  *   pkt and processed cb do not match.
1437  *
1438  */
1439 static inline int
acc100_dma_fill_blk_type_in(struct acc100_dma_req_desc * desc,struct rte_mbuf ** input,uint32_t * offset,uint32_t cb_len,uint32_t * seg_total_left,int next_triplet)1440 acc100_dma_fill_blk_type_in(struct acc100_dma_req_desc *desc,
1441 		struct rte_mbuf **input, uint32_t *offset, uint32_t cb_len,
1442 		uint32_t *seg_total_left, int next_triplet)
1443 {
1444 	uint32_t part_len;
1445 	struct rte_mbuf *m = *input;
1446 
1447 	part_len = (*seg_total_left < cb_len) ? *seg_total_left : cb_len;
1448 	cb_len -= part_len;
1449 	*seg_total_left -= part_len;
1450 
1451 	desc->data_ptrs[next_triplet].address =
1452 			rte_pktmbuf_iova_offset(m, *offset);
1453 	desc->data_ptrs[next_triplet].blen = part_len;
1454 	desc->data_ptrs[next_triplet].blkid = ACC100_DMA_BLKID_IN;
1455 	desc->data_ptrs[next_triplet].last = 0;
1456 	desc->data_ptrs[next_triplet].dma_ext = 0;
1457 	*offset += part_len;
1458 	next_triplet++;
1459 
1460 	while (cb_len > 0) {
1461 		if (next_triplet < ACC100_DMA_MAX_NUM_POINTERS &&
1462 				m->next != NULL) {
1463 
1464 			m = m->next;
1465 			*seg_total_left = rte_pktmbuf_data_len(m);
1466 			part_len = (*seg_total_left < cb_len) ?
1467 					*seg_total_left :
1468 					cb_len;
1469 			desc->data_ptrs[next_triplet].address =
1470 					rte_pktmbuf_iova_offset(m, 0);
1471 			desc->data_ptrs[next_triplet].blen = part_len;
1472 			desc->data_ptrs[next_triplet].blkid =
1473 					ACC100_DMA_BLKID_IN;
1474 			desc->data_ptrs[next_triplet].last = 0;
1475 			desc->data_ptrs[next_triplet].dma_ext = 0;
1476 			cb_len -= part_len;
1477 			*seg_total_left -= part_len;
1478 			/* Initializing offset for next segment (mbuf) */
1479 			*offset = part_len;
1480 			next_triplet++;
1481 		} else {
1482 			rte_bbdev_log(ERR,
1483 				"Some data still left for processing: "
1484 				"data_left: %u, next_triplet: %u, next_mbuf: %p",
1485 				cb_len, next_triplet, m->next);
1486 			return -EINVAL;
1487 		}
1488 	}
1489 	/* Storing new mbuf as it could be changed in scatter-gather case*/
1490 	*input = m;
1491 
1492 	return next_triplet;
1493 }
1494 
1495 /* Fills descriptor with data pointers of one block type.
1496  * Returns index of next triplet on success, other value if lengths of
1497  * output data and processed mbuf do not match.
1498  */
1499 static inline int
acc100_dma_fill_blk_type_out(struct acc100_dma_req_desc * desc,struct rte_mbuf * output,uint32_t out_offset,uint32_t output_len,int next_triplet,int blk_id)1500 acc100_dma_fill_blk_type_out(struct acc100_dma_req_desc *desc,
1501 		struct rte_mbuf *output, uint32_t out_offset,
1502 		uint32_t output_len, int next_triplet, int blk_id)
1503 {
1504 	desc->data_ptrs[next_triplet].address =
1505 			rte_pktmbuf_iova_offset(output, out_offset);
1506 	desc->data_ptrs[next_triplet].blen = output_len;
1507 	desc->data_ptrs[next_triplet].blkid = blk_id;
1508 	desc->data_ptrs[next_triplet].last = 0;
1509 	desc->data_ptrs[next_triplet].dma_ext = 0;
1510 	next_triplet++;
1511 
1512 	return next_triplet;
1513 }
1514 
1515 static inline void
acc100_header_init(struct acc100_dma_req_desc * desc)1516 acc100_header_init(struct acc100_dma_req_desc *desc)
1517 {
1518 	desc->word0 = ACC100_DMA_DESC_TYPE;
1519 	desc->word1 = 0; /**< Timestamp could be disabled */
1520 	desc->word2 = 0;
1521 	desc->word3 = 0;
1522 	desc->numCBs = 1;
1523 }
1524 
1525 #ifdef RTE_LIBRTE_BBDEV_DEBUG
1526 /* Check if any input data is unexpectedly left for processing */
1527 static inline int
check_mbuf_total_left(uint32_t mbuf_total_left)1528 check_mbuf_total_left(uint32_t mbuf_total_left)
1529 {
1530 	if (mbuf_total_left == 0)
1531 		return 0;
1532 	rte_bbdev_log(ERR,
1533 		"Some date still left for processing: mbuf_total_left = %u",
1534 		mbuf_total_left);
1535 	return -EINVAL;
1536 }
1537 #endif
1538 
1539 static inline int
acc100_dma_desc_te_fill(struct rte_bbdev_enc_op * op,struct acc100_dma_req_desc * desc,struct rte_mbuf ** input,struct rte_mbuf * output,uint32_t * in_offset,uint32_t * out_offset,uint32_t * out_length,uint32_t * mbuf_total_left,uint32_t * seg_total_left,uint8_t r)1540 acc100_dma_desc_te_fill(struct rte_bbdev_enc_op *op,
1541 		struct acc100_dma_req_desc *desc, struct rte_mbuf **input,
1542 		struct rte_mbuf *output, uint32_t *in_offset,
1543 		uint32_t *out_offset, uint32_t *out_length,
1544 		uint32_t *mbuf_total_left, uint32_t *seg_total_left, uint8_t r)
1545 {
1546 	int next_triplet = 1; /* FCW already done */
1547 	uint32_t e, ea, eb, length;
1548 	uint16_t k, k_neg, k_pos;
1549 	uint8_t cab, c_neg;
1550 
1551 	desc->word0 = ACC100_DMA_DESC_TYPE;
1552 	desc->word1 = 0; /**< Timestamp could be disabled */
1553 	desc->word2 = 0;
1554 	desc->word3 = 0;
1555 	desc->numCBs = 1;
1556 
1557 	if (op->turbo_enc.code_block_mode == 0) {
1558 		ea = op->turbo_enc.tb_params.ea;
1559 		eb = op->turbo_enc.tb_params.eb;
1560 		cab = op->turbo_enc.tb_params.cab;
1561 		k_neg = op->turbo_enc.tb_params.k_neg;
1562 		k_pos = op->turbo_enc.tb_params.k_pos;
1563 		c_neg = op->turbo_enc.tb_params.c_neg;
1564 		e = (r < cab) ? ea : eb;
1565 		k = (r < c_neg) ? k_neg : k_pos;
1566 	} else {
1567 		e = op->turbo_enc.cb_params.e;
1568 		k = op->turbo_enc.cb_params.k;
1569 	}
1570 
1571 	if (check_bit(op->turbo_enc.op_flags, RTE_BBDEV_TURBO_CRC_24B_ATTACH))
1572 		length = (k - 24) >> 3;
1573 	else
1574 		length = k >> 3;
1575 
1576 	if (unlikely((*mbuf_total_left == 0) || (*mbuf_total_left < length))) {
1577 		rte_bbdev_log(ERR,
1578 				"Mismatch between mbuf length and included CB sizes: mbuf len %u, cb len %u",
1579 				*mbuf_total_left, length);
1580 		return -1;
1581 	}
1582 
1583 	next_triplet = acc100_dma_fill_blk_type_in(desc, input, in_offset,
1584 			length, seg_total_left, next_triplet);
1585 	if (unlikely(next_triplet < 0)) {
1586 		rte_bbdev_log(ERR,
1587 				"Mismatch between data to process and mbuf data length in bbdev_op: %p",
1588 				op);
1589 		return -1;
1590 	}
1591 	desc->data_ptrs[next_triplet - 1].last = 1;
1592 	desc->m2dlen = next_triplet;
1593 	*mbuf_total_left -= length;
1594 
1595 	/* Set output length */
1596 	if (check_bit(op->turbo_enc.op_flags, RTE_BBDEV_TURBO_RATE_MATCH))
1597 		/* Integer round up division by 8 */
1598 		*out_length = (e + 7) >> 3;
1599 	else
1600 		*out_length = (k >> 3) * 3 + 2;
1601 
1602 	next_triplet = acc100_dma_fill_blk_type_out(desc, output, *out_offset,
1603 			*out_length, next_triplet, ACC100_DMA_BLKID_OUT_ENC);
1604 	if (unlikely(next_triplet < 0)) {
1605 		rte_bbdev_log(ERR,
1606 				"Mismatch between data to process and mbuf data length in bbdev_op: %p",
1607 				op);
1608 		return -1;
1609 	}
1610 	op->turbo_enc.output.length += *out_length;
1611 	*out_offset += *out_length;
1612 	desc->data_ptrs[next_triplet - 1].last = 1;
1613 	desc->d2mlen = next_triplet - desc->m2dlen;
1614 
1615 	desc->op_addr = op;
1616 
1617 	return 0;
1618 }
1619 
1620 static inline int
acc100_dma_desc_le_fill(struct rte_bbdev_enc_op * op,struct acc100_dma_req_desc * desc,struct rte_mbuf ** input,struct rte_mbuf * output,uint32_t * in_offset,uint32_t * out_offset,uint32_t * out_length,uint32_t * mbuf_total_left,uint32_t * seg_total_left)1621 acc100_dma_desc_le_fill(struct rte_bbdev_enc_op *op,
1622 		struct acc100_dma_req_desc *desc, struct rte_mbuf **input,
1623 		struct rte_mbuf *output, uint32_t *in_offset,
1624 		uint32_t *out_offset, uint32_t *out_length,
1625 		uint32_t *mbuf_total_left, uint32_t *seg_total_left)
1626 {
1627 	int next_triplet = 1; /* FCW already done */
1628 	uint16_t K, in_length_in_bits, in_length_in_bytes;
1629 	struct rte_bbdev_op_ldpc_enc *enc = &op->ldpc_enc;
1630 
1631 	acc100_header_init(desc);
1632 
1633 	K = (enc->basegraph == 1 ? 22 : 10) * enc->z_c;
1634 	in_length_in_bits = K - enc->n_filler;
1635 	if ((enc->op_flags & RTE_BBDEV_LDPC_CRC_24A_ATTACH) ||
1636 			(enc->op_flags & RTE_BBDEV_LDPC_CRC_24B_ATTACH))
1637 		in_length_in_bits -= 24;
1638 	in_length_in_bytes = in_length_in_bits >> 3;
1639 
1640 	if (unlikely((*mbuf_total_left == 0) ||
1641 			(*mbuf_total_left < in_length_in_bytes))) {
1642 		rte_bbdev_log(ERR,
1643 				"Mismatch between mbuf length and included CB sizes: mbuf len %u, cb len %u",
1644 				*mbuf_total_left, in_length_in_bytes);
1645 		return -1;
1646 	}
1647 
1648 	next_triplet = acc100_dma_fill_blk_type_in(desc, input, in_offset,
1649 			in_length_in_bytes,
1650 			seg_total_left, next_triplet);
1651 	if (unlikely(next_triplet < 0)) {
1652 		rte_bbdev_log(ERR,
1653 				"Mismatch between data to process and mbuf data length in bbdev_op: %p",
1654 				op);
1655 		return -1;
1656 	}
1657 	desc->data_ptrs[next_triplet - 1].last = 1;
1658 	desc->m2dlen = next_triplet;
1659 	*mbuf_total_left -= in_length_in_bytes;
1660 
1661 	/* Set output length */
1662 	/* Integer round up division by 8 */
1663 	*out_length = (enc->cb_params.e + 7) >> 3;
1664 
1665 	next_triplet = acc100_dma_fill_blk_type_out(desc, output, *out_offset,
1666 			*out_length, next_triplet, ACC100_DMA_BLKID_OUT_ENC);
1667 	op->ldpc_enc.output.length += *out_length;
1668 	*out_offset += *out_length;
1669 	desc->data_ptrs[next_triplet - 1].last = 1;
1670 	desc->data_ptrs[next_triplet - 1].dma_ext = 0;
1671 	desc->d2mlen = next_triplet - desc->m2dlen;
1672 
1673 	desc->op_addr = op;
1674 
1675 	return 0;
1676 }
1677 
1678 static inline int
acc100_dma_desc_td_fill(struct rte_bbdev_dec_op * op,struct acc100_dma_req_desc * desc,struct rte_mbuf ** input,struct rte_mbuf * h_output,struct rte_mbuf * s_output,uint32_t * in_offset,uint32_t * h_out_offset,uint32_t * s_out_offset,uint32_t * h_out_length,uint32_t * s_out_length,uint32_t * mbuf_total_left,uint32_t * seg_total_left,uint8_t r)1679 acc100_dma_desc_td_fill(struct rte_bbdev_dec_op *op,
1680 		struct acc100_dma_req_desc *desc, struct rte_mbuf **input,
1681 		struct rte_mbuf *h_output, struct rte_mbuf *s_output,
1682 		uint32_t *in_offset, uint32_t *h_out_offset,
1683 		uint32_t *s_out_offset, uint32_t *h_out_length,
1684 		uint32_t *s_out_length, uint32_t *mbuf_total_left,
1685 		uint32_t *seg_total_left, uint8_t r)
1686 {
1687 	int next_triplet = 1; /* FCW already done */
1688 	uint16_t k;
1689 	uint16_t crc24_overlap = 0;
1690 	uint32_t e, kw;
1691 
1692 	desc->word0 = ACC100_DMA_DESC_TYPE;
1693 	desc->word1 = 0; /**< Timestamp could be disabled */
1694 	desc->word2 = 0;
1695 	desc->word3 = 0;
1696 	desc->numCBs = 1;
1697 
1698 	if (op->turbo_dec.code_block_mode == 0) {
1699 		k = (r < op->turbo_dec.tb_params.c_neg)
1700 			? op->turbo_dec.tb_params.k_neg
1701 			: op->turbo_dec.tb_params.k_pos;
1702 		e = (r < op->turbo_dec.tb_params.cab)
1703 			? op->turbo_dec.tb_params.ea
1704 			: op->turbo_dec.tb_params.eb;
1705 	} else {
1706 		k = op->turbo_dec.cb_params.k;
1707 		e = op->turbo_dec.cb_params.e;
1708 	}
1709 
1710 	if ((op->turbo_dec.code_block_mode == 0)
1711 		&& !check_bit(op->turbo_dec.op_flags,
1712 		RTE_BBDEV_TURBO_DEC_TB_CRC_24B_KEEP))
1713 		crc24_overlap = 24;
1714 
1715 	/* Calculates circular buffer size.
1716 	 * According to 3gpp 36.212 section 5.1.4.2
1717 	 *   Kw = 3 * Kpi,
1718 	 * where:
1719 	 *   Kpi = nCol * nRow
1720 	 * where nCol is 32 and nRow can be calculated from:
1721 	 *   D =< nCol * nRow
1722 	 * where D is the size of each output from turbo encoder block (k + 4).
1723 	 */
1724 	kw = RTE_ALIGN_CEIL(k + 4, 32) * 3;
1725 
1726 	if (unlikely((*mbuf_total_left == 0) || (*mbuf_total_left < kw))) {
1727 		rte_bbdev_log(ERR,
1728 				"Mismatch between mbuf length and included CB sizes: mbuf len %u, cb len %u",
1729 				*mbuf_total_left, kw);
1730 		return -1;
1731 	}
1732 
1733 	next_triplet = acc100_dma_fill_blk_type_in(desc, input, in_offset, kw,
1734 			seg_total_left, next_triplet);
1735 	if (unlikely(next_triplet < 0)) {
1736 		rte_bbdev_log(ERR,
1737 				"Mismatch between data to process and mbuf data length in bbdev_op: %p",
1738 				op);
1739 		return -1;
1740 	}
1741 	desc->data_ptrs[next_triplet - 1].last = 1;
1742 	desc->m2dlen = next_triplet;
1743 	*mbuf_total_left -= kw;
1744 
1745 	next_triplet = acc100_dma_fill_blk_type_out(
1746 			desc, h_output, *h_out_offset,
1747 			k >> 3, next_triplet, ACC100_DMA_BLKID_OUT_HARD);
1748 	if (unlikely(next_triplet < 0)) {
1749 		rte_bbdev_log(ERR,
1750 				"Mismatch between data to process and mbuf data length in bbdev_op: %p",
1751 				op);
1752 		return -1;
1753 	}
1754 
1755 	*h_out_length = ((k - crc24_overlap) >> 3);
1756 	op->turbo_dec.hard_output.length += *h_out_length;
1757 	*h_out_offset += *h_out_length;
1758 
1759 	/* Soft output */
1760 	if (check_bit(op->turbo_dec.op_flags, RTE_BBDEV_TURBO_SOFT_OUTPUT)) {
1761 		if (check_bit(op->turbo_dec.op_flags,
1762 				RTE_BBDEV_TURBO_EQUALIZER))
1763 			*s_out_length = e;
1764 		else
1765 			*s_out_length = (k * 3) + 12;
1766 
1767 		next_triplet = acc100_dma_fill_blk_type_out(desc, s_output,
1768 				*s_out_offset, *s_out_length, next_triplet,
1769 				ACC100_DMA_BLKID_OUT_SOFT);
1770 		if (unlikely(next_triplet < 0)) {
1771 			rte_bbdev_log(ERR,
1772 					"Mismatch between data to process and mbuf data length in bbdev_op: %p",
1773 					op);
1774 			return -1;
1775 		}
1776 
1777 		op->turbo_dec.soft_output.length += *s_out_length;
1778 		*s_out_offset += *s_out_length;
1779 	}
1780 
1781 	desc->data_ptrs[next_triplet - 1].last = 1;
1782 	desc->d2mlen = next_triplet - desc->m2dlen;
1783 
1784 	desc->op_addr = op;
1785 
1786 	return 0;
1787 }
1788 
1789 static inline int
acc100_dma_desc_ld_fill(struct rte_bbdev_dec_op * op,struct acc100_dma_req_desc * desc,struct rte_mbuf ** input,struct rte_mbuf * h_output,uint32_t * in_offset,uint32_t * h_out_offset,uint32_t * h_out_length,uint32_t * mbuf_total_left,uint32_t * seg_total_left,struct acc100_fcw_ld * fcw)1790 acc100_dma_desc_ld_fill(struct rte_bbdev_dec_op *op,
1791 		struct acc100_dma_req_desc *desc,
1792 		struct rte_mbuf **input, struct rte_mbuf *h_output,
1793 		uint32_t *in_offset, uint32_t *h_out_offset,
1794 		uint32_t *h_out_length, uint32_t *mbuf_total_left,
1795 		uint32_t *seg_total_left,
1796 		struct acc100_fcw_ld *fcw)
1797 {
1798 	struct rte_bbdev_op_ldpc_dec *dec = &op->ldpc_dec;
1799 	int next_triplet = 1; /* FCW already done */
1800 	uint32_t input_length;
1801 	uint16_t output_length, crc24_overlap = 0;
1802 	uint16_t sys_cols, K, h_p_size, h_np_size;
1803 	bool h_comp = check_bit(dec->op_flags,
1804 			RTE_BBDEV_LDPC_HARQ_6BIT_COMPRESSION);
1805 
1806 	acc100_header_init(desc);
1807 
1808 	if (check_bit(op->ldpc_dec.op_flags,
1809 			RTE_BBDEV_LDPC_CRC_TYPE_24B_DROP))
1810 		crc24_overlap = 24;
1811 
1812 	/* Compute some LDPC BG lengths */
1813 	input_length = dec->cb_params.e;
1814 	if (check_bit(op->ldpc_dec.op_flags,
1815 			RTE_BBDEV_LDPC_LLR_COMPRESSION))
1816 		input_length = (input_length * 3 + 3) / 4;
1817 	sys_cols = (dec->basegraph == 1) ? 22 : 10;
1818 	K = sys_cols * dec->z_c;
1819 	output_length = K - dec->n_filler - crc24_overlap;
1820 
1821 	if (unlikely((*mbuf_total_left == 0) ||
1822 			(*mbuf_total_left < input_length))) {
1823 		rte_bbdev_log(ERR,
1824 				"Mismatch between mbuf length and included CB sizes: mbuf len %u, cb len %u",
1825 				*mbuf_total_left, input_length);
1826 		return -1;
1827 	}
1828 
1829 	next_triplet = acc100_dma_fill_blk_type_in(desc, input,
1830 			in_offset, input_length,
1831 			seg_total_left, next_triplet);
1832 
1833 	if (unlikely(next_triplet < 0)) {
1834 		rte_bbdev_log(ERR,
1835 				"Mismatch between data to process and mbuf data length in bbdev_op: %p",
1836 				op);
1837 		return -1;
1838 	}
1839 
1840 	if (check_bit(op->ldpc_dec.op_flags,
1841 				RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE)) {
1842 		h_p_size = fcw->hcin_size0 + fcw->hcin_size1;
1843 		if (h_comp)
1844 			h_p_size = (h_p_size * 3 + 3) / 4;
1845 		desc->data_ptrs[next_triplet].address =
1846 				dec->harq_combined_input.offset;
1847 		desc->data_ptrs[next_triplet].blen = h_p_size;
1848 		desc->data_ptrs[next_triplet].blkid = ACC100_DMA_BLKID_IN_HARQ;
1849 		desc->data_ptrs[next_triplet].dma_ext = 1;
1850 #ifndef ACC100_EXT_MEM
1851 		acc100_dma_fill_blk_type_out(
1852 				desc,
1853 				op->ldpc_dec.harq_combined_input.data,
1854 				op->ldpc_dec.harq_combined_input.offset,
1855 				h_p_size,
1856 				next_triplet,
1857 				ACC100_DMA_BLKID_IN_HARQ);
1858 #endif
1859 		next_triplet++;
1860 	}
1861 
1862 	desc->data_ptrs[next_triplet - 1].last = 1;
1863 	desc->m2dlen = next_triplet;
1864 	*mbuf_total_left -= input_length;
1865 
1866 	next_triplet = acc100_dma_fill_blk_type_out(desc, h_output,
1867 			*h_out_offset, output_length >> 3, next_triplet,
1868 			ACC100_DMA_BLKID_OUT_HARD);
1869 
1870 	if (check_bit(op->ldpc_dec.op_flags,
1871 				RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE)) {
1872 		/* Pruned size of the HARQ */
1873 		h_p_size = fcw->hcout_size0 + fcw->hcout_size1;
1874 		/* Non-Pruned size of the HARQ */
1875 		h_np_size = fcw->hcout_offset > 0 ?
1876 				fcw->hcout_offset + fcw->hcout_size1 :
1877 				h_p_size;
1878 		if (h_comp) {
1879 			h_np_size = (h_np_size * 3 + 3) / 4;
1880 			h_p_size = (h_p_size * 3 + 3) / 4;
1881 		}
1882 		dec->harq_combined_output.length = h_np_size;
1883 		desc->data_ptrs[next_triplet].address =
1884 				dec->harq_combined_output.offset;
1885 		desc->data_ptrs[next_triplet].blen = h_p_size;
1886 		desc->data_ptrs[next_triplet].blkid = ACC100_DMA_BLKID_OUT_HARQ;
1887 		desc->data_ptrs[next_triplet].dma_ext = 1;
1888 #ifndef ACC100_EXT_MEM
1889 		acc100_dma_fill_blk_type_out(
1890 				desc,
1891 				dec->harq_combined_output.data,
1892 				dec->harq_combined_output.offset,
1893 				h_p_size,
1894 				next_triplet,
1895 				ACC100_DMA_BLKID_OUT_HARQ);
1896 #endif
1897 		next_triplet++;
1898 	}
1899 
1900 	*h_out_length = output_length >> 3;
1901 	dec->hard_output.length += *h_out_length;
1902 	*h_out_offset += *h_out_length;
1903 	desc->data_ptrs[next_triplet - 1].last = 1;
1904 	desc->d2mlen = next_triplet - desc->m2dlen;
1905 
1906 	desc->op_addr = op;
1907 
1908 	return 0;
1909 }
1910 
1911 static inline void
acc100_dma_desc_ld_update(struct rte_bbdev_dec_op * op,struct acc100_dma_req_desc * desc,struct rte_mbuf * input,struct rte_mbuf * h_output,uint32_t * in_offset,uint32_t * h_out_offset,uint32_t * h_out_length,union acc100_harq_layout_data * harq_layout)1912 acc100_dma_desc_ld_update(struct rte_bbdev_dec_op *op,
1913 		struct acc100_dma_req_desc *desc,
1914 		struct rte_mbuf *input, struct rte_mbuf *h_output,
1915 		uint32_t *in_offset, uint32_t *h_out_offset,
1916 		uint32_t *h_out_length,
1917 		union acc100_harq_layout_data *harq_layout)
1918 {
1919 	int next_triplet = 1; /* FCW already done */
1920 	desc->data_ptrs[next_triplet].address =
1921 			rte_pktmbuf_iova_offset(input, *in_offset);
1922 	next_triplet++;
1923 
1924 	if (check_bit(op->ldpc_dec.op_flags,
1925 				RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE)) {
1926 		struct rte_bbdev_op_data hi = op->ldpc_dec.harq_combined_input;
1927 		desc->data_ptrs[next_triplet].address = hi.offset;
1928 #ifndef ACC100_EXT_MEM
1929 		desc->data_ptrs[next_triplet].address =
1930 				rte_pktmbuf_iova_offset(hi.data, hi.offset);
1931 #endif
1932 		next_triplet++;
1933 	}
1934 
1935 	desc->data_ptrs[next_triplet].address =
1936 			rte_pktmbuf_iova_offset(h_output, *h_out_offset);
1937 	*h_out_length = desc->data_ptrs[next_triplet].blen;
1938 	next_triplet++;
1939 
1940 	if (check_bit(op->ldpc_dec.op_flags,
1941 				RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE)) {
1942 		desc->data_ptrs[next_triplet].address =
1943 				op->ldpc_dec.harq_combined_output.offset;
1944 		/* Adjust based on previous operation */
1945 		struct rte_bbdev_dec_op *prev_op = desc->op_addr;
1946 		op->ldpc_dec.harq_combined_output.length =
1947 				prev_op->ldpc_dec.harq_combined_output.length;
1948 		int16_t hq_idx = op->ldpc_dec.harq_combined_output.offset /
1949 				ACC100_HARQ_OFFSET;
1950 		int16_t prev_hq_idx =
1951 				prev_op->ldpc_dec.harq_combined_output.offset
1952 				/ ACC100_HARQ_OFFSET;
1953 		harq_layout[hq_idx].val = harq_layout[prev_hq_idx].val;
1954 #ifndef ACC100_EXT_MEM
1955 		struct rte_bbdev_op_data ho =
1956 				op->ldpc_dec.harq_combined_output;
1957 		desc->data_ptrs[next_triplet].address =
1958 				rte_pktmbuf_iova_offset(ho.data, ho.offset);
1959 #endif
1960 		next_triplet++;
1961 	}
1962 
1963 	op->ldpc_dec.hard_output.length += *h_out_length;
1964 	desc->op_addr = op;
1965 }
1966 
1967 
1968 /* Enqueue a number of operations to HW and update software rings */
1969 static inline void
acc100_dma_enqueue(struct acc100_queue * q,uint16_t n,struct rte_bbdev_stats * queue_stats)1970 acc100_dma_enqueue(struct acc100_queue *q, uint16_t n,
1971 		struct rte_bbdev_stats *queue_stats)
1972 {
1973 	union acc100_enqueue_reg_fmt enq_req;
1974 #ifdef RTE_BBDEV_OFFLOAD_COST
1975 	uint64_t start_time = 0;
1976 	queue_stats->acc_offload_cycles = 0;
1977 #else
1978 	RTE_SET_USED(queue_stats);
1979 #endif
1980 
1981 	enq_req.val = 0;
1982 	/* Setting offset, 100b for 256 DMA Desc */
1983 	enq_req.addr_offset = ACC100_DESC_OFFSET;
1984 
1985 	/* Split ops into batches */
1986 	do {
1987 		union acc100_dma_desc *desc;
1988 		uint16_t enq_batch_size;
1989 		uint64_t offset;
1990 		rte_iova_t req_elem_addr;
1991 
1992 		enq_batch_size = RTE_MIN(n, MAX_ENQ_BATCH_SIZE);
1993 
1994 		/* Set flag on last descriptor in a batch */
1995 		desc = q->ring_addr + ((q->sw_ring_head + enq_batch_size - 1) &
1996 				q->sw_ring_wrap_mask);
1997 		desc->req.last_desc_in_batch = 1;
1998 
1999 		/* Calculate the 1st descriptor's address */
2000 		offset = ((q->sw_ring_head & q->sw_ring_wrap_mask) *
2001 				sizeof(union acc100_dma_desc));
2002 		req_elem_addr = q->ring_addr_iova + offset;
2003 
2004 		/* Fill enqueue struct */
2005 		enq_req.num_elem = enq_batch_size;
2006 		/* low 6 bits are not needed */
2007 		enq_req.req_elem_addr = (uint32_t)(req_elem_addr >> 6);
2008 
2009 #ifdef RTE_LIBRTE_BBDEV_DEBUG
2010 		rte_memdump(stderr, "Req sdone", desc, sizeof(*desc));
2011 #endif
2012 		rte_bbdev_log_debug(
2013 				"Enqueue %u reqs (phys %#"PRIx64") to reg %p",
2014 				enq_batch_size,
2015 				req_elem_addr,
2016 				(void *)q->mmio_reg_enqueue);
2017 
2018 		rte_wmb();
2019 
2020 #ifdef RTE_BBDEV_OFFLOAD_COST
2021 		/* Start time measurement for enqueue function offload. */
2022 		start_time = rte_rdtsc_precise();
2023 #endif
2024 		rte_bbdev_log(DEBUG, "Debug : MMIO Enqueue");
2025 		mmio_write(q->mmio_reg_enqueue, enq_req.val);
2026 
2027 #ifdef RTE_BBDEV_OFFLOAD_COST
2028 		queue_stats->acc_offload_cycles +=
2029 				rte_rdtsc_precise() - start_time;
2030 #endif
2031 
2032 		q->aq_enqueued++;
2033 		q->sw_ring_head += enq_batch_size;
2034 		n -= enq_batch_size;
2035 
2036 	} while (n);
2037 
2038 
2039 }
2040 
2041 #ifdef RTE_LIBRTE_BBDEV_DEBUG
2042 /* Validates turbo encoder parameters */
2043 static inline int
validate_enc_op(struct rte_bbdev_enc_op * op)2044 validate_enc_op(struct rte_bbdev_enc_op *op)
2045 {
2046 	struct rte_bbdev_op_turbo_enc *turbo_enc = &op->turbo_enc;
2047 	struct rte_bbdev_op_enc_turbo_cb_params *cb = NULL;
2048 	struct rte_bbdev_op_enc_turbo_tb_params *tb = NULL;
2049 	uint16_t kw, kw_neg, kw_pos;
2050 
2051 	if (op->mempool == NULL) {
2052 		rte_bbdev_log(ERR, "Invalid mempool pointer");
2053 		return -1;
2054 	}
2055 	if (turbo_enc->input.data == NULL) {
2056 		rte_bbdev_log(ERR, "Invalid input pointer");
2057 		return -1;
2058 	}
2059 	if (turbo_enc->output.data == NULL) {
2060 		rte_bbdev_log(ERR, "Invalid output pointer");
2061 		return -1;
2062 	}
2063 	if (turbo_enc->rv_index > 3) {
2064 		rte_bbdev_log(ERR,
2065 				"rv_index (%u) is out of range 0 <= value <= 3",
2066 				turbo_enc->rv_index);
2067 		return -1;
2068 	}
2069 	if (turbo_enc->code_block_mode != 0 &&
2070 			turbo_enc->code_block_mode != 1) {
2071 		rte_bbdev_log(ERR,
2072 				"code_block_mode (%u) is out of range 0 <= value <= 1",
2073 				turbo_enc->code_block_mode);
2074 		return -1;
2075 	}
2076 
2077 	if (turbo_enc->code_block_mode == 0) {
2078 		tb = &turbo_enc->tb_params;
2079 		if ((tb->k_neg < RTE_BBDEV_TURBO_MIN_CB_SIZE
2080 				|| tb->k_neg > RTE_BBDEV_TURBO_MAX_CB_SIZE)
2081 				&& tb->c_neg > 0) {
2082 			rte_bbdev_log(ERR,
2083 					"k_neg (%u) is out of range %u <= value <= %u",
2084 					tb->k_neg, RTE_BBDEV_TURBO_MIN_CB_SIZE,
2085 					RTE_BBDEV_TURBO_MAX_CB_SIZE);
2086 			return -1;
2087 		}
2088 		if (tb->k_pos < RTE_BBDEV_TURBO_MIN_CB_SIZE
2089 				|| tb->k_pos > RTE_BBDEV_TURBO_MAX_CB_SIZE) {
2090 			rte_bbdev_log(ERR,
2091 					"k_pos (%u) is out of range %u <= value <= %u",
2092 					tb->k_pos, RTE_BBDEV_TURBO_MIN_CB_SIZE,
2093 					RTE_BBDEV_TURBO_MAX_CB_SIZE);
2094 			return -1;
2095 		}
2096 		if (tb->c_neg > (RTE_BBDEV_TURBO_MAX_CODE_BLOCKS - 1))
2097 			rte_bbdev_log(ERR,
2098 					"c_neg (%u) is out of range 0 <= value <= %u",
2099 					tb->c_neg,
2100 					RTE_BBDEV_TURBO_MAX_CODE_BLOCKS - 1);
2101 		if (tb->c < 1 || tb->c > RTE_BBDEV_TURBO_MAX_CODE_BLOCKS) {
2102 			rte_bbdev_log(ERR,
2103 					"c (%u) is out of range 1 <= value <= %u",
2104 					tb->c, RTE_BBDEV_TURBO_MAX_CODE_BLOCKS);
2105 			return -1;
2106 		}
2107 		if (tb->cab > tb->c) {
2108 			rte_bbdev_log(ERR,
2109 					"cab (%u) is greater than c (%u)",
2110 					tb->cab, tb->c);
2111 			return -1;
2112 		}
2113 		if ((tb->ea < RTE_BBDEV_TURBO_MIN_CB_SIZE || (tb->ea % 2))
2114 				&& tb->r < tb->cab) {
2115 			rte_bbdev_log(ERR,
2116 					"ea (%u) is less than %u or it is not even",
2117 					tb->ea, RTE_BBDEV_TURBO_MIN_CB_SIZE);
2118 			return -1;
2119 		}
2120 		if ((tb->eb < RTE_BBDEV_TURBO_MIN_CB_SIZE || (tb->eb % 2))
2121 				&& tb->c > tb->cab) {
2122 			rte_bbdev_log(ERR,
2123 					"eb (%u) is less than %u or it is not even",
2124 					tb->eb, RTE_BBDEV_TURBO_MIN_CB_SIZE);
2125 			return -1;
2126 		}
2127 
2128 		kw_neg = 3 * RTE_ALIGN_CEIL(tb->k_neg + 4,
2129 					RTE_BBDEV_TURBO_C_SUBBLOCK);
2130 		if (tb->ncb_neg < tb->k_neg || tb->ncb_neg > kw_neg) {
2131 			rte_bbdev_log(ERR,
2132 					"ncb_neg (%u) is out of range (%u) k_neg <= value <= (%u) kw_neg",
2133 					tb->ncb_neg, tb->k_neg, kw_neg);
2134 			return -1;
2135 		}
2136 
2137 		kw_pos = 3 * RTE_ALIGN_CEIL(tb->k_pos + 4,
2138 					RTE_BBDEV_TURBO_C_SUBBLOCK);
2139 		if (tb->ncb_pos < tb->k_pos || tb->ncb_pos > kw_pos) {
2140 			rte_bbdev_log(ERR,
2141 					"ncb_pos (%u) is out of range (%u) k_pos <= value <= (%u) kw_pos",
2142 					tb->ncb_pos, tb->k_pos, kw_pos);
2143 			return -1;
2144 		}
2145 		if (tb->r > (tb->c - 1)) {
2146 			rte_bbdev_log(ERR,
2147 					"r (%u) is greater than c - 1 (%u)",
2148 					tb->r, tb->c - 1);
2149 			return -1;
2150 		}
2151 	} else {
2152 		cb = &turbo_enc->cb_params;
2153 		if (cb->k < RTE_BBDEV_TURBO_MIN_CB_SIZE
2154 				|| cb->k > RTE_BBDEV_TURBO_MAX_CB_SIZE) {
2155 			rte_bbdev_log(ERR,
2156 					"k (%u) is out of range %u <= value <= %u",
2157 					cb->k, RTE_BBDEV_TURBO_MIN_CB_SIZE,
2158 					RTE_BBDEV_TURBO_MAX_CB_SIZE);
2159 			return -1;
2160 		}
2161 
2162 		if (cb->e < RTE_BBDEV_TURBO_MIN_CB_SIZE || (cb->e % 2)) {
2163 			rte_bbdev_log(ERR,
2164 					"e (%u) is less than %u or it is not even",
2165 					cb->e, RTE_BBDEV_TURBO_MIN_CB_SIZE);
2166 			return -1;
2167 		}
2168 
2169 		kw = RTE_ALIGN_CEIL(cb->k + 4, RTE_BBDEV_TURBO_C_SUBBLOCK) * 3;
2170 		if (cb->ncb < cb->k || cb->ncb > kw) {
2171 			rte_bbdev_log(ERR,
2172 					"ncb (%u) is out of range (%u) k <= value <= (%u) kw",
2173 					cb->ncb, cb->k, kw);
2174 			return -1;
2175 		}
2176 	}
2177 
2178 	return 0;
2179 }
2180 /* Validates LDPC encoder parameters */
2181 static inline int
validate_ldpc_enc_op(struct rte_bbdev_enc_op * op)2182 validate_ldpc_enc_op(struct rte_bbdev_enc_op *op)
2183 {
2184 	struct rte_bbdev_op_ldpc_enc *ldpc_enc = &op->ldpc_enc;
2185 
2186 	if (op->mempool == NULL) {
2187 		rte_bbdev_log(ERR, "Invalid mempool pointer");
2188 		return -1;
2189 	}
2190 	if (ldpc_enc->input.data == NULL) {
2191 		rte_bbdev_log(ERR, "Invalid input pointer");
2192 		return -1;
2193 	}
2194 	if (ldpc_enc->output.data == NULL) {
2195 		rte_bbdev_log(ERR, "Invalid output pointer");
2196 		return -1;
2197 	}
2198 	if (ldpc_enc->input.length >
2199 			RTE_BBDEV_LDPC_MAX_CB_SIZE >> 3) {
2200 		rte_bbdev_log(ERR, "CB size (%u) is too big, max: %d",
2201 				ldpc_enc->input.length,
2202 				RTE_BBDEV_LDPC_MAX_CB_SIZE);
2203 		return -1;
2204 	}
2205 	if ((ldpc_enc->basegraph > 2) || (ldpc_enc->basegraph == 0)) {
2206 		rte_bbdev_log(ERR,
2207 				"BG (%u) is out of range 1 <= value <= 2",
2208 				ldpc_enc->basegraph);
2209 		return -1;
2210 	}
2211 	if (ldpc_enc->rv_index > 3) {
2212 		rte_bbdev_log(ERR,
2213 				"rv_index (%u) is out of range 0 <= value <= 3",
2214 				ldpc_enc->rv_index);
2215 		return -1;
2216 	}
2217 	if (ldpc_enc->code_block_mode > 1) {
2218 		rte_bbdev_log(ERR,
2219 				"code_block_mode (%u) is out of range 0 <= value <= 1",
2220 				ldpc_enc->code_block_mode);
2221 		return -1;
2222 	}
2223 	int K = (ldpc_enc->basegraph == 1 ? 22 : 10) * ldpc_enc->z_c;
2224 	if (ldpc_enc->n_filler >= K) {
2225 		rte_bbdev_log(ERR,
2226 				"K and F are not compatible %u %u",
2227 				K, ldpc_enc->n_filler);
2228 		return -1;
2229 	}
2230 	return 0;
2231 }
2232 
2233 /* Validates LDPC decoder parameters */
2234 static inline int
validate_ldpc_dec_op(struct rte_bbdev_dec_op * op)2235 validate_ldpc_dec_op(struct rte_bbdev_dec_op *op)
2236 {
2237 	struct rte_bbdev_op_ldpc_dec *ldpc_dec = &op->ldpc_dec;
2238 
2239 	if (op->mempool == NULL) {
2240 		rte_bbdev_log(ERR, "Invalid mempool pointer");
2241 		return -1;
2242 	}
2243 	if ((ldpc_dec->basegraph > 2) || (ldpc_dec->basegraph == 0)) {
2244 		rte_bbdev_log(ERR,
2245 				"BG (%u) is out of range 1 <= value <= 2",
2246 				ldpc_dec->basegraph);
2247 		return -1;
2248 	}
2249 	if (ldpc_dec->iter_max == 0) {
2250 		rte_bbdev_log(ERR,
2251 				"iter_max (%u) is equal to 0",
2252 				ldpc_dec->iter_max);
2253 		return -1;
2254 	}
2255 	if (ldpc_dec->rv_index > 3) {
2256 		rte_bbdev_log(ERR,
2257 				"rv_index (%u) is out of range 0 <= value <= 3",
2258 				ldpc_dec->rv_index);
2259 		return -1;
2260 	}
2261 	if (ldpc_dec->code_block_mode > 1) {
2262 		rte_bbdev_log(ERR,
2263 				"code_block_mode (%u) is out of range 0 <= value <= 1",
2264 				ldpc_dec->code_block_mode);
2265 		return -1;
2266 	}
2267 	int K = (ldpc_dec->basegraph == 1 ? 22 : 10) * ldpc_dec->z_c;
2268 	if (ldpc_dec->n_filler >= K) {
2269 		rte_bbdev_log(ERR,
2270 				"K and F are not compatible %u %u",
2271 				K, ldpc_dec->n_filler);
2272 		return -1;
2273 	}
2274 	return 0;
2275 }
2276 #endif
2277 
2278 /* Enqueue one encode operations for ACC100 device in CB mode */
2279 static inline int
enqueue_enc_one_op_cb(struct acc100_queue * q,struct rte_bbdev_enc_op * op,uint16_t total_enqueued_cbs)2280 enqueue_enc_one_op_cb(struct acc100_queue *q, struct rte_bbdev_enc_op *op,
2281 		uint16_t total_enqueued_cbs)
2282 {
2283 	union acc100_dma_desc *desc = NULL;
2284 	int ret;
2285 	uint32_t in_offset, out_offset, out_length, mbuf_total_left,
2286 		seg_total_left;
2287 	struct rte_mbuf *input, *output_head, *output;
2288 
2289 #ifdef RTE_LIBRTE_BBDEV_DEBUG
2290 	/* Validate op structure */
2291 	if (validate_enc_op(op) == -1) {
2292 		rte_bbdev_log(ERR, "Turbo encoder validation failed");
2293 		return -EINVAL;
2294 	}
2295 #endif
2296 
2297 	uint16_t desc_idx = ((q->sw_ring_head + total_enqueued_cbs)
2298 			& q->sw_ring_wrap_mask);
2299 	desc = q->ring_addr + desc_idx;
2300 	acc100_fcw_te_fill(op, &desc->req.fcw_te);
2301 
2302 	input = op->turbo_enc.input.data;
2303 	output_head = output = op->turbo_enc.output.data;
2304 	in_offset = op->turbo_enc.input.offset;
2305 	out_offset = op->turbo_enc.output.offset;
2306 	out_length = 0;
2307 	mbuf_total_left = op->turbo_enc.input.length;
2308 	seg_total_left = rte_pktmbuf_data_len(op->turbo_enc.input.data)
2309 			- in_offset;
2310 
2311 	ret = acc100_dma_desc_te_fill(op, &desc->req, &input, output,
2312 			&in_offset, &out_offset, &out_length, &mbuf_total_left,
2313 			&seg_total_left, 0);
2314 
2315 	if (unlikely(ret < 0))
2316 		return ret;
2317 
2318 	mbuf_append(output_head, output, out_length);
2319 
2320 #ifdef RTE_LIBRTE_BBDEV_DEBUG
2321 	rte_memdump(stderr, "FCW", &desc->req.fcw_te,
2322 			sizeof(desc->req.fcw_te) - 8);
2323 	rte_memdump(stderr, "Req Desc.", desc, sizeof(*desc));
2324 	if (check_mbuf_total_left(mbuf_total_left) != 0)
2325 		return -EINVAL;
2326 #endif
2327 	/* One CB (one op) was successfully prepared to enqueue */
2328 	return 1;
2329 }
2330 
2331 /* Enqueue one encode operations for ACC100 device in CB mode */
2332 static inline int
enqueue_ldpc_enc_n_op_cb(struct acc100_queue * q,struct rte_bbdev_enc_op ** ops,uint16_t total_enqueued_cbs,int16_t num)2333 enqueue_ldpc_enc_n_op_cb(struct acc100_queue *q, struct rte_bbdev_enc_op **ops,
2334 		uint16_t total_enqueued_cbs, int16_t num)
2335 {
2336 	union acc100_dma_desc *desc = NULL;
2337 	uint32_t out_length;
2338 	struct rte_mbuf *output_head, *output;
2339 	int i, next_triplet;
2340 	uint16_t  in_length_in_bytes;
2341 	struct rte_bbdev_op_ldpc_enc *enc = &ops[0]->ldpc_enc;
2342 
2343 #ifdef RTE_LIBRTE_BBDEV_DEBUG
2344 	/* Validate op structure */
2345 	if (validate_ldpc_enc_op(ops[0]) == -1) {
2346 		rte_bbdev_log(ERR, "LDPC encoder validation failed");
2347 		return -EINVAL;
2348 	}
2349 #endif
2350 
2351 	uint16_t desc_idx = ((q->sw_ring_head + total_enqueued_cbs)
2352 			& q->sw_ring_wrap_mask);
2353 	desc = q->ring_addr + desc_idx;
2354 	acc100_fcw_le_fill(ops[0], &desc->req.fcw_le, num);
2355 
2356 	/** This could be done at polling */
2357 	acc100_header_init(&desc->req);
2358 	desc->req.numCBs = num;
2359 
2360 	in_length_in_bytes = ops[0]->ldpc_enc.input.data->data_len;
2361 	out_length = (enc->cb_params.e + 7) >> 3;
2362 	desc->req.m2dlen = 1 + num;
2363 	desc->req.d2mlen = num;
2364 	next_triplet = 1;
2365 
2366 	for (i = 0; i < num; i++) {
2367 		desc->req.data_ptrs[next_triplet].address =
2368 			rte_pktmbuf_iova_offset(ops[i]->ldpc_enc.input.data, 0);
2369 		desc->req.data_ptrs[next_triplet].blen = in_length_in_bytes;
2370 		next_triplet++;
2371 		desc->req.data_ptrs[next_triplet].address =
2372 				rte_pktmbuf_iova_offset(
2373 				ops[i]->ldpc_enc.output.data, 0);
2374 		desc->req.data_ptrs[next_triplet].blen = out_length;
2375 		next_triplet++;
2376 		ops[i]->ldpc_enc.output.length = out_length;
2377 		output_head = output = ops[i]->ldpc_enc.output.data;
2378 		mbuf_append(output_head, output, out_length);
2379 		output->data_len = out_length;
2380 	}
2381 
2382 	desc->req.op_addr = ops[0];
2383 
2384 #ifdef RTE_LIBRTE_BBDEV_DEBUG
2385 	rte_memdump(stderr, "FCW", &desc->req.fcw_le,
2386 			sizeof(desc->req.fcw_le) - 8);
2387 	rte_memdump(stderr, "Req Desc.", desc, sizeof(*desc));
2388 #endif
2389 
2390 	/* One CB (one op) was successfully prepared to enqueue */
2391 	return num;
2392 }
2393 
2394 /* Enqueue one encode operations for ACC100 device in CB mode */
2395 static inline int
enqueue_ldpc_enc_one_op_cb(struct acc100_queue * q,struct rte_bbdev_enc_op * op,uint16_t total_enqueued_cbs)2396 enqueue_ldpc_enc_one_op_cb(struct acc100_queue *q, struct rte_bbdev_enc_op *op,
2397 		uint16_t total_enqueued_cbs)
2398 {
2399 	union acc100_dma_desc *desc = NULL;
2400 	int ret;
2401 	uint32_t in_offset, out_offset, out_length, mbuf_total_left,
2402 		seg_total_left;
2403 	struct rte_mbuf *input, *output_head, *output;
2404 
2405 #ifdef RTE_LIBRTE_BBDEV_DEBUG
2406 	/* Validate op structure */
2407 	if (validate_ldpc_enc_op(op) == -1) {
2408 		rte_bbdev_log(ERR, "LDPC encoder validation failed");
2409 		return -EINVAL;
2410 	}
2411 #endif
2412 
2413 	uint16_t desc_idx = ((q->sw_ring_head + total_enqueued_cbs)
2414 			& q->sw_ring_wrap_mask);
2415 	desc = q->ring_addr + desc_idx;
2416 	acc100_fcw_le_fill(op, &desc->req.fcw_le, 1);
2417 
2418 	input = op->ldpc_enc.input.data;
2419 	output_head = output = op->ldpc_enc.output.data;
2420 	in_offset = op->ldpc_enc.input.offset;
2421 	out_offset = op->ldpc_enc.output.offset;
2422 	out_length = 0;
2423 	mbuf_total_left = op->ldpc_enc.input.length;
2424 	seg_total_left = rte_pktmbuf_data_len(op->ldpc_enc.input.data)
2425 			- in_offset;
2426 
2427 	ret = acc100_dma_desc_le_fill(op, &desc->req, &input, output,
2428 			&in_offset, &out_offset, &out_length, &mbuf_total_left,
2429 			&seg_total_left);
2430 
2431 	if (unlikely(ret < 0))
2432 		return ret;
2433 
2434 	mbuf_append(output_head, output, out_length);
2435 
2436 #ifdef RTE_LIBRTE_BBDEV_DEBUG
2437 	rte_memdump(stderr, "FCW", &desc->req.fcw_le,
2438 			sizeof(desc->req.fcw_le) - 8);
2439 	rte_memdump(stderr, "Req Desc.", desc, sizeof(*desc));
2440 
2441 	if (check_mbuf_total_left(mbuf_total_left) != 0)
2442 		return -EINVAL;
2443 #endif
2444 	/* One CB (one op) was successfully prepared to enqueue */
2445 	return 1;
2446 }
2447 
2448 
2449 /* Enqueue one encode operations for ACC100 device in TB mode. */
2450 static inline int
enqueue_enc_one_op_tb(struct acc100_queue * q,struct rte_bbdev_enc_op * op,uint16_t total_enqueued_cbs,uint8_t cbs_in_tb)2451 enqueue_enc_one_op_tb(struct acc100_queue *q, struct rte_bbdev_enc_op *op,
2452 		uint16_t total_enqueued_cbs, uint8_t cbs_in_tb)
2453 {
2454 	union acc100_dma_desc *desc = NULL;
2455 	int ret;
2456 	uint8_t r, c;
2457 	uint32_t in_offset, out_offset, out_length, mbuf_total_left,
2458 		seg_total_left;
2459 	struct rte_mbuf *input, *output_head, *output;
2460 	uint16_t current_enqueued_cbs = 0;
2461 
2462 #ifdef RTE_LIBRTE_BBDEV_DEBUG
2463 	/* Validate op structure */
2464 	if (validate_enc_op(op) == -1) {
2465 		rte_bbdev_log(ERR, "Turbo encoder validation failed");
2466 		return -EINVAL;
2467 	}
2468 #endif
2469 
2470 	uint16_t desc_idx = ((q->sw_ring_head + total_enqueued_cbs)
2471 			& q->sw_ring_wrap_mask);
2472 	desc = q->ring_addr + desc_idx;
2473 	uint64_t fcw_offset = (desc_idx << 8) + ACC100_DESC_FCW_OFFSET;
2474 	acc100_fcw_te_fill(op, &desc->req.fcw_te);
2475 
2476 	input = op->turbo_enc.input.data;
2477 	output_head = output = op->turbo_enc.output.data;
2478 	in_offset = op->turbo_enc.input.offset;
2479 	out_offset = op->turbo_enc.output.offset;
2480 	out_length = 0;
2481 	mbuf_total_left = op->turbo_enc.input.length;
2482 
2483 	c = op->turbo_enc.tb_params.c;
2484 	r = op->turbo_enc.tb_params.r;
2485 
2486 	while (mbuf_total_left > 0 && r < c) {
2487 		seg_total_left = rte_pktmbuf_data_len(input) - in_offset;
2488 		/* Set up DMA descriptor */
2489 		desc = q->ring_addr + ((q->sw_ring_head + total_enqueued_cbs)
2490 				& q->sw_ring_wrap_mask);
2491 		desc->req.data_ptrs[0].address = q->ring_addr_iova + fcw_offset;
2492 		desc->req.data_ptrs[0].blen = ACC100_FCW_TE_BLEN;
2493 
2494 		ret = acc100_dma_desc_te_fill(op, &desc->req, &input, output,
2495 				&in_offset, &out_offset, &out_length,
2496 				&mbuf_total_left, &seg_total_left, r);
2497 		if (unlikely(ret < 0))
2498 			return ret;
2499 		mbuf_append(output_head, output, out_length);
2500 
2501 		/* Set total number of CBs in TB */
2502 		desc->req.cbs_in_tb = cbs_in_tb;
2503 #ifdef RTE_LIBRTE_BBDEV_DEBUG
2504 		rte_memdump(stderr, "FCW", &desc->req.fcw_te,
2505 				sizeof(desc->req.fcw_te) - 8);
2506 		rte_memdump(stderr, "Req Desc.", desc, sizeof(*desc));
2507 #endif
2508 
2509 		if (seg_total_left == 0) {
2510 			/* Go to the next mbuf */
2511 			input = input->next;
2512 			in_offset = 0;
2513 			output = output->next;
2514 			out_offset = 0;
2515 		}
2516 
2517 		total_enqueued_cbs++;
2518 		current_enqueued_cbs++;
2519 		r++;
2520 	}
2521 
2522 #ifdef RTE_LIBRTE_BBDEV_DEBUG
2523 	if (check_mbuf_total_left(mbuf_total_left) != 0)
2524 		return -EINVAL;
2525 #endif
2526 
2527 	/* Set SDone on last CB descriptor for TB mode. */
2528 	desc->req.sdone_enable = 1;
2529 	desc->req.irq_enable = q->irq_enable;
2530 
2531 	return current_enqueued_cbs;
2532 }
2533 
2534 #ifdef RTE_LIBRTE_BBDEV_DEBUG
2535 /* Validates turbo decoder parameters */
2536 static inline int
validate_dec_op(struct rte_bbdev_dec_op * op)2537 validate_dec_op(struct rte_bbdev_dec_op *op)
2538 {
2539 	struct rte_bbdev_op_turbo_dec *turbo_dec = &op->turbo_dec;
2540 	struct rte_bbdev_op_dec_turbo_cb_params *cb = NULL;
2541 	struct rte_bbdev_op_dec_turbo_tb_params *tb = NULL;
2542 
2543 	if (op->mempool == NULL) {
2544 		rte_bbdev_log(ERR, "Invalid mempool pointer");
2545 		return -1;
2546 	}
2547 	if (turbo_dec->input.data == NULL) {
2548 		rte_bbdev_log(ERR, "Invalid input pointer");
2549 		return -1;
2550 	}
2551 	if (turbo_dec->hard_output.data == NULL) {
2552 		rte_bbdev_log(ERR, "Invalid hard_output pointer");
2553 		return -1;
2554 	}
2555 	if (check_bit(turbo_dec->op_flags, RTE_BBDEV_TURBO_SOFT_OUTPUT) &&
2556 			turbo_dec->soft_output.data == NULL) {
2557 		rte_bbdev_log(ERR, "Invalid soft_output pointer");
2558 		return -1;
2559 	}
2560 	if (turbo_dec->rv_index > 3) {
2561 		rte_bbdev_log(ERR,
2562 				"rv_index (%u) is out of range 0 <= value <= 3",
2563 				turbo_dec->rv_index);
2564 		return -1;
2565 	}
2566 	if (turbo_dec->iter_min < 1) {
2567 		rte_bbdev_log(ERR,
2568 				"iter_min (%u) is less than 1",
2569 				turbo_dec->iter_min);
2570 		return -1;
2571 	}
2572 	if (turbo_dec->iter_max <= 2) {
2573 		rte_bbdev_log(ERR,
2574 				"iter_max (%u) is less than or equal to 2",
2575 				turbo_dec->iter_max);
2576 		return -1;
2577 	}
2578 	if (turbo_dec->iter_min > turbo_dec->iter_max) {
2579 		rte_bbdev_log(ERR,
2580 				"iter_min (%u) is greater than iter_max (%u)",
2581 				turbo_dec->iter_min, turbo_dec->iter_max);
2582 		return -1;
2583 	}
2584 	if (turbo_dec->code_block_mode != 0 &&
2585 			turbo_dec->code_block_mode != 1) {
2586 		rte_bbdev_log(ERR,
2587 				"code_block_mode (%u) is out of range 0 <= value <= 1",
2588 				turbo_dec->code_block_mode);
2589 		return -1;
2590 	}
2591 
2592 	if (turbo_dec->code_block_mode == 0) {
2593 		tb = &turbo_dec->tb_params;
2594 		if ((tb->k_neg < RTE_BBDEV_TURBO_MIN_CB_SIZE
2595 				|| tb->k_neg > RTE_BBDEV_TURBO_MAX_CB_SIZE)
2596 				&& tb->c_neg > 0) {
2597 			rte_bbdev_log(ERR,
2598 					"k_neg (%u) is out of range %u <= value <= %u",
2599 					tb->k_neg, RTE_BBDEV_TURBO_MIN_CB_SIZE,
2600 					RTE_BBDEV_TURBO_MAX_CB_SIZE);
2601 			return -1;
2602 		}
2603 		if ((tb->k_pos < RTE_BBDEV_TURBO_MIN_CB_SIZE
2604 				|| tb->k_pos > RTE_BBDEV_TURBO_MAX_CB_SIZE)
2605 				&& tb->c > tb->c_neg) {
2606 			rte_bbdev_log(ERR,
2607 					"k_pos (%u) is out of range %u <= value <= %u",
2608 					tb->k_pos, RTE_BBDEV_TURBO_MIN_CB_SIZE,
2609 					RTE_BBDEV_TURBO_MAX_CB_SIZE);
2610 			return -1;
2611 		}
2612 		if (tb->c_neg > (RTE_BBDEV_TURBO_MAX_CODE_BLOCKS - 1))
2613 			rte_bbdev_log(ERR,
2614 					"c_neg (%u) is out of range 0 <= value <= %u",
2615 					tb->c_neg,
2616 					RTE_BBDEV_TURBO_MAX_CODE_BLOCKS - 1);
2617 		if (tb->c < 1 || tb->c > RTE_BBDEV_TURBO_MAX_CODE_BLOCKS) {
2618 			rte_bbdev_log(ERR,
2619 					"c (%u) is out of range 1 <= value <= %u",
2620 					tb->c, RTE_BBDEV_TURBO_MAX_CODE_BLOCKS);
2621 			return -1;
2622 		}
2623 		if (tb->cab > tb->c) {
2624 			rte_bbdev_log(ERR,
2625 					"cab (%u) is greater than c (%u)",
2626 					tb->cab, tb->c);
2627 			return -1;
2628 		}
2629 		if (check_bit(turbo_dec->op_flags, RTE_BBDEV_TURBO_EQUALIZER) &&
2630 				(tb->ea < RTE_BBDEV_TURBO_MIN_CB_SIZE
2631 						|| (tb->ea % 2))
2632 				&& tb->cab > 0) {
2633 			rte_bbdev_log(ERR,
2634 					"ea (%u) is less than %u or it is not even",
2635 					tb->ea, RTE_BBDEV_TURBO_MIN_CB_SIZE);
2636 			return -1;
2637 		}
2638 		if (check_bit(turbo_dec->op_flags, RTE_BBDEV_TURBO_EQUALIZER) &&
2639 				(tb->eb < RTE_BBDEV_TURBO_MIN_CB_SIZE
2640 						|| (tb->eb % 2))
2641 				&& tb->c > tb->cab) {
2642 			rte_bbdev_log(ERR,
2643 					"eb (%u) is less than %u or it is not even",
2644 					tb->eb, RTE_BBDEV_TURBO_MIN_CB_SIZE);
2645 		}
2646 	} else {
2647 		cb = &turbo_dec->cb_params;
2648 		if (cb->k < RTE_BBDEV_TURBO_MIN_CB_SIZE
2649 				|| cb->k > RTE_BBDEV_TURBO_MAX_CB_SIZE) {
2650 			rte_bbdev_log(ERR,
2651 					"k (%u) is out of range %u <= value <= %u",
2652 					cb->k, RTE_BBDEV_TURBO_MIN_CB_SIZE,
2653 					RTE_BBDEV_TURBO_MAX_CB_SIZE);
2654 			return -1;
2655 		}
2656 		if (check_bit(turbo_dec->op_flags, RTE_BBDEV_TURBO_EQUALIZER) &&
2657 				(cb->e < RTE_BBDEV_TURBO_MIN_CB_SIZE ||
2658 				(cb->e % 2))) {
2659 			rte_bbdev_log(ERR,
2660 					"e (%u) is less than %u or it is not even",
2661 					cb->e, RTE_BBDEV_TURBO_MIN_CB_SIZE);
2662 			return -1;
2663 		}
2664 	}
2665 
2666 	return 0;
2667 }
2668 #endif
2669 
2670 /** Enqueue one decode operations for ACC100 device in CB mode */
2671 static inline int
enqueue_dec_one_op_cb(struct acc100_queue * q,struct rte_bbdev_dec_op * op,uint16_t total_enqueued_cbs)2672 enqueue_dec_one_op_cb(struct acc100_queue *q, struct rte_bbdev_dec_op *op,
2673 		uint16_t total_enqueued_cbs)
2674 {
2675 	union acc100_dma_desc *desc = NULL;
2676 	int ret;
2677 	uint32_t in_offset, h_out_offset, s_out_offset, s_out_length,
2678 		h_out_length, mbuf_total_left, seg_total_left;
2679 	struct rte_mbuf *input, *h_output_head, *h_output,
2680 		*s_output_head, *s_output;
2681 
2682 #ifdef RTE_LIBRTE_BBDEV_DEBUG
2683 	/* Validate op structure */
2684 	if (validate_dec_op(op) == -1) {
2685 		rte_bbdev_log(ERR, "Turbo decoder validation failed");
2686 		return -EINVAL;
2687 	}
2688 #endif
2689 
2690 	uint16_t desc_idx = ((q->sw_ring_head + total_enqueued_cbs)
2691 			& q->sw_ring_wrap_mask);
2692 	desc = q->ring_addr + desc_idx;
2693 	acc100_fcw_td_fill(op, &desc->req.fcw_td);
2694 
2695 	input = op->turbo_dec.input.data;
2696 	h_output_head = h_output = op->turbo_dec.hard_output.data;
2697 	s_output_head = s_output = op->turbo_dec.soft_output.data;
2698 	in_offset = op->turbo_dec.input.offset;
2699 	h_out_offset = op->turbo_dec.hard_output.offset;
2700 	s_out_offset = op->turbo_dec.soft_output.offset;
2701 	h_out_length = s_out_length = 0;
2702 	mbuf_total_left = op->turbo_dec.input.length;
2703 	seg_total_left = rte_pktmbuf_data_len(input) - in_offset;
2704 
2705 #ifdef RTE_LIBRTE_BBDEV_DEBUG
2706 	if (unlikely(input == NULL)) {
2707 		rte_bbdev_log(ERR, "Invalid mbuf pointer");
2708 		return -EFAULT;
2709 	}
2710 #endif
2711 
2712 	/* Set up DMA descriptor */
2713 	desc = q->ring_addr + ((q->sw_ring_head + total_enqueued_cbs)
2714 			& q->sw_ring_wrap_mask);
2715 
2716 	ret = acc100_dma_desc_td_fill(op, &desc->req, &input, h_output,
2717 			s_output, &in_offset, &h_out_offset, &s_out_offset,
2718 			&h_out_length, &s_out_length, &mbuf_total_left,
2719 			&seg_total_left, 0);
2720 
2721 	if (unlikely(ret < 0))
2722 		return ret;
2723 
2724 	/* Hard output */
2725 	mbuf_append(h_output_head, h_output, h_out_length);
2726 
2727 	/* Soft output */
2728 	if (check_bit(op->turbo_dec.op_flags, RTE_BBDEV_TURBO_SOFT_OUTPUT))
2729 		mbuf_append(s_output_head, s_output, s_out_length);
2730 
2731 #ifdef RTE_LIBRTE_BBDEV_DEBUG
2732 	rte_memdump(stderr, "FCW", &desc->req.fcw_td,
2733 			sizeof(desc->req.fcw_td) - 8);
2734 	rte_memdump(stderr, "Req Desc.", desc, sizeof(*desc));
2735 	if (check_mbuf_total_left(mbuf_total_left) != 0)
2736 		return -EINVAL;
2737 #endif
2738 
2739 	/* One CB (one op) was successfully prepared to enqueue */
2740 	return 1;
2741 }
2742 
2743 static inline int
harq_loopback(struct acc100_queue * q,struct rte_bbdev_dec_op * op,uint16_t total_enqueued_cbs)2744 harq_loopback(struct acc100_queue *q, struct rte_bbdev_dec_op *op,
2745 		uint16_t total_enqueued_cbs) {
2746 	struct acc100_fcw_ld *fcw;
2747 	union acc100_dma_desc *desc;
2748 	int next_triplet = 1;
2749 	struct rte_mbuf *hq_output_head, *hq_output;
2750 	uint16_t harq_dma_length_in, harq_dma_length_out;
2751 	uint16_t harq_in_length = op->ldpc_dec.harq_combined_input.length;
2752 	if (harq_in_length == 0) {
2753 		rte_bbdev_log(ERR, "Loopback of invalid null size\n");
2754 		return -EINVAL;
2755 	}
2756 
2757 	int h_comp = check_bit(op->ldpc_dec.op_flags,
2758 			RTE_BBDEV_LDPC_HARQ_6BIT_COMPRESSION
2759 			) ? 1 : 0;
2760 	if (h_comp == 1) {
2761 		harq_in_length = harq_in_length * 8 / 6;
2762 		harq_in_length = RTE_ALIGN(harq_in_length, 64);
2763 		harq_dma_length_in = harq_in_length * 6 / 8;
2764 	} else {
2765 		harq_in_length = RTE_ALIGN(harq_in_length, 64);
2766 		harq_dma_length_in = harq_in_length;
2767 	}
2768 	harq_dma_length_out = harq_dma_length_in;
2769 
2770 	bool ddr_mem_in = check_bit(op->ldpc_dec.op_flags,
2771 			RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_IN_ENABLE);
2772 	union acc100_harq_layout_data *harq_layout = q->d->harq_layout;
2773 	uint16_t harq_index = (ddr_mem_in ?
2774 			op->ldpc_dec.harq_combined_input.offset :
2775 			op->ldpc_dec.harq_combined_output.offset)
2776 			/ ACC100_HARQ_OFFSET;
2777 
2778 	uint16_t desc_idx = ((q->sw_ring_head + total_enqueued_cbs)
2779 			& q->sw_ring_wrap_mask);
2780 	desc = q->ring_addr + desc_idx;
2781 	fcw = &desc->req.fcw_ld;
2782 	/* Set the FCW from loopback into DDR */
2783 	memset(fcw, 0, sizeof(struct acc100_fcw_ld));
2784 	fcw->FCWversion = ACC100_FCW_VER;
2785 	fcw->qm = 2;
2786 	fcw->Zc = 384;
2787 	if (harq_in_length < 16 * ACC100_N_ZC_1)
2788 		fcw->Zc = 16;
2789 	fcw->ncb = fcw->Zc * ACC100_N_ZC_1;
2790 	fcw->rm_e = 2;
2791 	fcw->hcin_en = 1;
2792 	fcw->hcout_en = 1;
2793 
2794 	rte_bbdev_log(DEBUG, "Loopback IN %d Index %d offset %d length %d %d\n",
2795 			ddr_mem_in, harq_index,
2796 			harq_layout[harq_index].offset, harq_in_length,
2797 			harq_dma_length_in);
2798 
2799 	if (ddr_mem_in && (harq_layout[harq_index].offset > 0)) {
2800 		fcw->hcin_size0 = harq_layout[harq_index].size0;
2801 		fcw->hcin_offset = harq_layout[harq_index].offset;
2802 		fcw->hcin_size1 = harq_in_length - fcw->hcin_offset;
2803 		harq_dma_length_in = (fcw->hcin_size0 + fcw->hcin_size1);
2804 		if (h_comp == 1)
2805 			harq_dma_length_in = harq_dma_length_in * 6 / 8;
2806 	} else {
2807 		fcw->hcin_size0 = harq_in_length;
2808 	}
2809 	harq_layout[harq_index].val = 0;
2810 	rte_bbdev_log(DEBUG, "Loopback FCW Config %d %d %d\n",
2811 			fcw->hcin_size0, fcw->hcin_offset, fcw->hcin_size1);
2812 	fcw->hcout_size0 = harq_in_length;
2813 	fcw->hcin_decomp_mode = h_comp;
2814 	fcw->hcout_comp_mode = h_comp;
2815 	fcw->gain_i = 1;
2816 	fcw->gain_h = 1;
2817 
2818 	/* Set the prefix of descriptor. This could be done at polling */
2819 	acc100_header_init(&desc->req);
2820 
2821 	/* Null LLR input for Decoder */
2822 	desc->req.data_ptrs[next_triplet].address =
2823 			q->lb_in_addr_iova;
2824 	desc->req.data_ptrs[next_triplet].blen = 2;
2825 	desc->req.data_ptrs[next_triplet].blkid = ACC100_DMA_BLKID_IN;
2826 	desc->req.data_ptrs[next_triplet].last = 0;
2827 	desc->req.data_ptrs[next_triplet].dma_ext = 0;
2828 	next_triplet++;
2829 
2830 	/* HARQ Combine input from either Memory interface */
2831 	if (!ddr_mem_in) {
2832 		next_triplet = acc100_dma_fill_blk_type_out(&desc->req,
2833 				op->ldpc_dec.harq_combined_input.data,
2834 				op->ldpc_dec.harq_combined_input.offset,
2835 				harq_dma_length_in,
2836 				next_triplet,
2837 				ACC100_DMA_BLKID_IN_HARQ);
2838 	} else {
2839 		desc->req.data_ptrs[next_triplet].address =
2840 				op->ldpc_dec.harq_combined_input.offset;
2841 		desc->req.data_ptrs[next_triplet].blen =
2842 				harq_dma_length_in;
2843 		desc->req.data_ptrs[next_triplet].blkid =
2844 				ACC100_DMA_BLKID_IN_HARQ;
2845 		desc->req.data_ptrs[next_triplet].dma_ext = 1;
2846 		next_triplet++;
2847 	}
2848 	desc->req.data_ptrs[next_triplet - 1].last = 1;
2849 	desc->req.m2dlen = next_triplet;
2850 
2851 	/* Dropped decoder hard output */
2852 	desc->req.data_ptrs[next_triplet].address =
2853 			q->lb_out_addr_iova;
2854 	desc->req.data_ptrs[next_triplet].blen = ACC100_BYTES_IN_WORD;
2855 	desc->req.data_ptrs[next_triplet].blkid = ACC100_DMA_BLKID_OUT_HARD;
2856 	desc->req.data_ptrs[next_triplet].last = 0;
2857 	desc->req.data_ptrs[next_triplet].dma_ext = 0;
2858 	next_triplet++;
2859 
2860 	/* HARQ Combine output to either Memory interface */
2861 	if (check_bit(op->ldpc_dec.op_flags,
2862 			RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_OUT_ENABLE
2863 			)) {
2864 		desc->req.data_ptrs[next_triplet].address =
2865 				op->ldpc_dec.harq_combined_output.offset;
2866 		desc->req.data_ptrs[next_triplet].blen =
2867 				harq_dma_length_out;
2868 		desc->req.data_ptrs[next_triplet].blkid =
2869 				ACC100_DMA_BLKID_OUT_HARQ;
2870 		desc->req.data_ptrs[next_triplet].dma_ext = 1;
2871 		next_triplet++;
2872 	} else {
2873 		hq_output_head = op->ldpc_dec.harq_combined_output.data;
2874 		hq_output = op->ldpc_dec.harq_combined_output.data;
2875 		next_triplet = acc100_dma_fill_blk_type_out(
2876 				&desc->req,
2877 				op->ldpc_dec.harq_combined_output.data,
2878 				op->ldpc_dec.harq_combined_output.offset,
2879 				harq_dma_length_out,
2880 				next_triplet,
2881 				ACC100_DMA_BLKID_OUT_HARQ);
2882 		/* HARQ output */
2883 		mbuf_append(hq_output_head, hq_output, harq_dma_length_out);
2884 		op->ldpc_dec.harq_combined_output.length =
2885 				harq_dma_length_out;
2886 	}
2887 	desc->req.data_ptrs[next_triplet - 1].last = 1;
2888 	desc->req.d2mlen = next_triplet - desc->req.m2dlen;
2889 	desc->req.op_addr = op;
2890 
2891 	/* One CB (one op) was successfully prepared to enqueue */
2892 	return 1;
2893 }
2894 
2895 /** Enqueue one decode operations for ACC100 device in CB mode */
2896 static inline int
enqueue_ldpc_dec_one_op_cb(struct acc100_queue * q,struct rte_bbdev_dec_op * op,uint16_t total_enqueued_cbs,bool same_op)2897 enqueue_ldpc_dec_one_op_cb(struct acc100_queue *q, struct rte_bbdev_dec_op *op,
2898 		uint16_t total_enqueued_cbs, bool same_op)
2899 {
2900 	int ret;
2901 	if (unlikely(check_bit(op->ldpc_dec.op_flags,
2902 			RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK))) {
2903 		ret = harq_loopback(q, op, total_enqueued_cbs);
2904 		return ret;
2905 	}
2906 
2907 #ifdef RTE_LIBRTE_BBDEV_DEBUG
2908 	/* Validate op structure */
2909 	if (validate_ldpc_dec_op(op) == -1) {
2910 		rte_bbdev_log(ERR, "LDPC decoder validation failed");
2911 		return -EINVAL;
2912 	}
2913 #endif
2914 	union acc100_dma_desc *desc;
2915 	uint16_t desc_idx = ((q->sw_ring_head + total_enqueued_cbs)
2916 			& q->sw_ring_wrap_mask);
2917 	desc = q->ring_addr + desc_idx;
2918 	struct rte_mbuf *input, *h_output_head, *h_output;
2919 	uint32_t in_offset, h_out_offset, mbuf_total_left, h_out_length = 0;
2920 	input = op->ldpc_dec.input.data;
2921 	h_output_head = h_output = op->ldpc_dec.hard_output.data;
2922 	in_offset = op->ldpc_dec.input.offset;
2923 	h_out_offset = op->ldpc_dec.hard_output.offset;
2924 	mbuf_total_left = op->ldpc_dec.input.length;
2925 #ifdef RTE_LIBRTE_BBDEV_DEBUG
2926 	if (unlikely(input == NULL)) {
2927 		rte_bbdev_log(ERR, "Invalid mbuf pointer");
2928 		return -EFAULT;
2929 	}
2930 #endif
2931 	union acc100_harq_layout_data *harq_layout = q->d->harq_layout;
2932 
2933 	if (same_op) {
2934 		union acc100_dma_desc *prev_desc;
2935 		desc_idx = ((q->sw_ring_head + total_enqueued_cbs - 1)
2936 				& q->sw_ring_wrap_mask);
2937 		prev_desc = q->ring_addr + desc_idx;
2938 		uint8_t *prev_ptr = (uint8_t *) prev_desc;
2939 		uint8_t *new_ptr = (uint8_t *) desc;
2940 		/* Copy first 4 words and BDESCs */
2941 		rte_memcpy(new_ptr, prev_ptr, ACC100_5GUL_SIZE_0);
2942 		rte_memcpy(new_ptr + ACC100_5GUL_OFFSET_0,
2943 				prev_ptr + ACC100_5GUL_OFFSET_0,
2944 				ACC100_5GUL_SIZE_1);
2945 		desc->req.op_addr = prev_desc->req.op_addr;
2946 		/* Copy FCW */
2947 		rte_memcpy(new_ptr + ACC100_DESC_FCW_OFFSET,
2948 				prev_ptr + ACC100_DESC_FCW_OFFSET,
2949 				ACC100_FCW_LD_BLEN);
2950 		acc100_dma_desc_ld_update(op, &desc->req, input, h_output,
2951 				&in_offset, &h_out_offset,
2952 				&h_out_length, harq_layout);
2953 	} else {
2954 		struct acc100_fcw_ld *fcw;
2955 		uint32_t seg_total_left;
2956 		fcw = &desc->req.fcw_ld;
2957 		acc100_fcw_ld_fill(op, fcw, harq_layout);
2958 
2959 		/* Special handling when overusing mbuf */
2960 		if (fcw->rm_e < ACC100_MAX_E_MBUF)
2961 			seg_total_left = rte_pktmbuf_data_len(input)
2962 					- in_offset;
2963 		else
2964 			seg_total_left = fcw->rm_e;
2965 
2966 		ret = acc100_dma_desc_ld_fill(op, &desc->req, &input, h_output,
2967 				&in_offset, &h_out_offset,
2968 				&h_out_length, &mbuf_total_left,
2969 				&seg_total_left, fcw);
2970 		if (unlikely(ret < 0))
2971 			return ret;
2972 	}
2973 
2974 	/* Hard output */
2975 	mbuf_append(h_output_head, h_output, h_out_length);
2976 #ifndef ACC100_EXT_MEM
2977 	if (op->ldpc_dec.harq_combined_output.length > 0) {
2978 		/* Push the HARQ output into host memory */
2979 		struct rte_mbuf *hq_output_head, *hq_output;
2980 		hq_output_head = op->ldpc_dec.harq_combined_output.data;
2981 		hq_output = op->ldpc_dec.harq_combined_output.data;
2982 		mbuf_append(hq_output_head, hq_output,
2983 				op->ldpc_dec.harq_combined_output.length);
2984 	}
2985 #endif
2986 
2987 #ifdef RTE_LIBRTE_BBDEV_DEBUG
2988 	rte_memdump(stderr, "FCW", &desc->req.fcw_ld,
2989 			sizeof(desc->req.fcw_ld) - 8);
2990 	rte_memdump(stderr, "Req Desc.", desc, sizeof(*desc));
2991 #endif
2992 
2993 	/* One CB (one op) was successfully prepared to enqueue */
2994 	return 1;
2995 }
2996 
2997 
2998 /* Enqueue one decode operations for ACC100 device in TB mode */
2999 static inline int
enqueue_ldpc_dec_one_op_tb(struct acc100_queue * q,struct rte_bbdev_dec_op * op,uint16_t total_enqueued_cbs,uint8_t cbs_in_tb)3000 enqueue_ldpc_dec_one_op_tb(struct acc100_queue *q, struct rte_bbdev_dec_op *op,
3001 		uint16_t total_enqueued_cbs, uint8_t cbs_in_tb)
3002 {
3003 	union acc100_dma_desc *desc = NULL;
3004 	int ret;
3005 	uint8_t r, c;
3006 	uint32_t in_offset, h_out_offset,
3007 		h_out_length, mbuf_total_left, seg_total_left;
3008 	struct rte_mbuf *input, *h_output_head, *h_output;
3009 	uint16_t current_enqueued_cbs = 0;
3010 
3011 #ifdef RTE_LIBRTE_BBDEV_DEBUG
3012 	/* Validate op structure */
3013 	if (validate_ldpc_dec_op(op) == -1) {
3014 		rte_bbdev_log(ERR, "LDPC decoder validation failed");
3015 		return -EINVAL;
3016 	}
3017 #endif
3018 
3019 	uint16_t desc_idx = ((q->sw_ring_head + total_enqueued_cbs)
3020 			& q->sw_ring_wrap_mask);
3021 	desc = q->ring_addr + desc_idx;
3022 	uint64_t fcw_offset = (desc_idx << 8) + ACC100_DESC_FCW_OFFSET;
3023 	union acc100_harq_layout_data *harq_layout = q->d->harq_layout;
3024 	acc100_fcw_ld_fill(op, &desc->req.fcw_ld, harq_layout);
3025 
3026 	input = op->ldpc_dec.input.data;
3027 	h_output_head = h_output = op->ldpc_dec.hard_output.data;
3028 	in_offset = op->ldpc_dec.input.offset;
3029 	h_out_offset = op->ldpc_dec.hard_output.offset;
3030 	h_out_length = 0;
3031 	mbuf_total_left = op->ldpc_dec.input.length;
3032 	c = op->ldpc_dec.tb_params.c;
3033 	r = op->ldpc_dec.tb_params.r;
3034 
3035 	while (mbuf_total_left > 0 && r < c) {
3036 
3037 		seg_total_left = rte_pktmbuf_data_len(input) - in_offset;
3038 
3039 		/* Set up DMA descriptor */
3040 		desc = q->ring_addr + ((q->sw_ring_head + total_enqueued_cbs)
3041 				& q->sw_ring_wrap_mask);
3042 		desc->req.data_ptrs[0].address = q->ring_addr_iova + fcw_offset;
3043 		desc->req.data_ptrs[0].blen = ACC100_FCW_LD_BLEN;
3044 		ret = acc100_dma_desc_ld_fill(op, &desc->req, &input,
3045 				h_output, &in_offset, &h_out_offset,
3046 				&h_out_length,
3047 				&mbuf_total_left, &seg_total_left,
3048 				&desc->req.fcw_ld);
3049 
3050 		if (unlikely(ret < 0))
3051 			return ret;
3052 
3053 		/* Hard output */
3054 		mbuf_append(h_output_head, h_output, h_out_length);
3055 
3056 		/* Set total number of CBs in TB */
3057 		desc->req.cbs_in_tb = cbs_in_tb;
3058 #ifdef RTE_LIBRTE_BBDEV_DEBUG
3059 		rte_memdump(stderr, "FCW", &desc->req.fcw_td,
3060 				sizeof(desc->req.fcw_td) - 8);
3061 		rte_memdump(stderr, "Req Desc.", desc, sizeof(*desc));
3062 #endif
3063 
3064 		if (seg_total_left == 0) {
3065 			/* Go to the next mbuf */
3066 			input = input->next;
3067 			in_offset = 0;
3068 			h_output = h_output->next;
3069 			h_out_offset = 0;
3070 		}
3071 		total_enqueued_cbs++;
3072 		current_enqueued_cbs++;
3073 		r++;
3074 	}
3075 
3076 #ifdef RTE_LIBRTE_BBDEV_DEBUG
3077 	if (check_mbuf_total_left(mbuf_total_left) != 0)
3078 		return -EINVAL;
3079 #endif
3080 	/* Set SDone on last CB descriptor for TB mode */
3081 	desc->req.sdone_enable = 1;
3082 	desc->req.irq_enable = q->irq_enable;
3083 
3084 	return current_enqueued_cbs;
3085 }
3086 
3087 /* Enqueue one decode operations for ACC100 device in TB mode */
3088 static inline int
enqueue_dec_one_op_tb(struct acc100_queue * q,struct rte_bbdev_dec_op * op,uint16_t total_enqueued_cbs,uint8_t cbs_in_tb)3089 enqueue_dec_one_op_tb(struct acc100_queue *q, struct rte_bbdev_dec_op *op,
3090 		uint16_t total_enqueued_cbs, uint8_t cbs_in_tb)
3091 {
3092 	union acc100_dma_desc *desc = NULL;
3093 	int ret;
3094 	uint8_t r, c;
3095 	uint32_t in_offset, h_out_offset, s_out_offset, s_out_length,
3096 		h_out_length, mbuf_total_left, seg_total_left;
3097 	struct rte_mbuf *input, *h_output_head, *h_output,
3098 		*s_output_head, *s_output;
3099 	uint16_t current_enqueued_cbs = 0;
3100 
3101 #ifdef RTE_LIBRTE_BBDEV_DEBUG
3102 	/* Validate op structure */
3103 	if (validate_dec_op(op) == -1) {
3104 		rte_bbdev_log(ERR, "Turbo decoder validation failed");
3105 		return -EINVAL;
3106 	}
3107 #endif
3108 
3109 	uint16_t desc_idx = ((q->sw_ring_head + total_enqueued_cbs)
3110 			& q->sw_ring_wrap_mask);
3111 	desc = q->ring_addr + desc_idx;
3112 	uint64_t fcw_offset = (desc_idx << 8) + ACC100_DESC_FCW_OFFSET;
3113 	acc100_fcw_td_fill(op, &desc->req.fcw_td);
3114 
3115 	input = op->turbo_dec.input.data;
3116 	h_output_head = h_output = op->turbo_dec.hard_output.data;
3117 	s_output_head = s_output = op->turbo_dec.soft_output.data;
3118 	in_offset = op->turbo_dec.input.offset;
3119 	h_out_offset = op->turbo_dec.hard_output.offset;
3120 	s_out_offset = op->turbo_dec.soft_output.offset;
3121 	h_out_length = s_out_length = 0;
3122 	mbuf_total_left = op->turbo_dec.input.length;
3123 	c = op->turbo_dec.tb_params.c;
3124 	r = op->turbo_dec.tb_params.r;
3125 
3126 	while (mbuf_total_left > 0 && r < c) {
3127 
3128 		seg_total_left = rte_pktmbuf_data_len(input) - in_offset;
3129 
3130 		/* Set up DMA descriptor */
3131 		desc = q->ring_addr + ((q->sw_ring_head + total_enqueued_cbs)
3132 				& q->sw_ring_wrap_mask);
3133 		desc->req.data_ptrs[0].address = q->ring_addr_iova + fcw_offset;
3134 		desc->req.data_ptrs[0].blen = ACC100_FCW_TD_BLEN;
3135 		ret = acc100_dma_desc_td_fill(op, &desc->req, &input,
3136 				h_output, s_output, &in_offset, &h_out_offset,
3137 				&s_out_offset, &h_out_length, &s_out_length,
3138 				&mbuf_total_left, &seg_total_left, r);
3139 
3140 		if (unlikely(ret < 0))
3141 			return ret;
3142 
3143 		/* Hard output */
3144 		mbuf_append(h_output_head, h_output, h_out_length);
3145 
3146 		/* Soft output */
3147 		if (check_bit(op->turbo_dec.op_flags,
3148 				RTE_BBDEV_TURBO_SOFT_OUTPUT))
3149 			mbuf_append(s_output_head, s_output, s_out_length);
3150 
3151 		/* Set total number of CBs in TB */
3152 		desc->req.cbs_in_tb = cbs_in_tb;
3153 #ifdef RTE_LIBRTE_BBDEV_DEBUG
3154 		rte_memdump(stderr, "FCW", &desc->req.fcw_td,
3155 				sizeof(desc->req.fcw_td) - 8);
3156 		rte_memdump(stderr, "Req Desc.", desc, sizeof(*desc));
3157 #endif
3158 
3159 		if (seg_total_left == 0) {
3160 			/* Go to the next mbuf */
3161 			input = input->next;
3162 			in_offset = 0;
3163 			h_output = h_output->next;
3164 			h_out_offset = 0;
3165 
3166 			if (check_bit(op->turbo_dec.op_flags,
3167 					RTE_BBDEV_TURBO_SOFT_OUTPUT)) {
3168 				s_output = s_output->next;
3169 				s_out_offset = 0;
3170 			}
3171 		}
3172 
3173 		total_enqueued_cbs++;
3174 		current_enqueued_cbs++;
3175 		r++;
3176 	}
3177 
3178 #ifdef RTE_LIBRTE_BBDEV_DEBUG
3179 	if (check_mbuf_total_left(mbuf_total_left) != 0)
3180 		return -EINVAL;
3181 #endif
3182 	/* Set SDone on last CB descriptor for TB mode */
3183 	desc->req.sdone_enable = 1;
3184 	desc->req.irq_enable = q->irq_enable;
3185 
3186 	return current_enqueued_cbs;
3187 }
3188 
3189 /* Calculates number of CBs in processed encoder TB based on 'r' and input
3190  * length.
3191  */
3192 static inline uint8_t
get_num_cbs_in_tb_enc(struct rte_bbdev_op_turbo_enc * turbo_enc)3193 get_num_cbs_in_tb_enc(struct rte_bbdev_op_turbo_enc *turbo_enc)
3194 {
3195 	uint8_t c, c_neg, r, crc24_bits = 0;
3196 	uint16_t k, k_neg, k_pos;
3197 	uint8_t cbs_in_tb = 0;
3198 	int32_t length;
3199 
3200 	length = turbo_enc->input.length;
3201 	r = turbo_enc->tb_params.r;
3202 	c = turbo_enc->tb_params.c;
3203 	c_neg = turbo_enc->tb_params.c_neg;
3204 	k_neg = turbo_enc->tb_params.k_neg;
3205 	k_pos = turbo_enc->tb_params.k_pos;
3206 	crc24_bits = 0;
3207 	if (check_bit(turbo_enc->op_flags, RTE_BBDEV_TURBO_CRC_24B_ATTACH))
3208 		crc24_bits = 24;
3209 	while (length > 0 && r < c) {
3210 		k = (r < c_neg) ? k_neg : k_pos;
3211 		length -= (k - crc24_bits) >> 3;
3212 		r++;
3213 		cbs_in_tb++;
3214 	}
3215 
3216 	return cbs_in_tb;
3217 }
3218 
3219 /* Calculates number of CBs in processed decoder TB based on 'r' and input
3220  * length.
3221  */
3222 static inline uint16_t
get_num_cbs_in_tb_dec(struct rte_bbdev_op_turbo_dec * turbo_dec)3223 get_num_cbs_in_tb_dec(struct rte_bbdev_op_turbo_dec *turbo_dec)
3224 {
3225 	uint8_t c, c_neg, r = 0;
3226 	uint16_t kw, k, k_neg, k_pos, cbs_in_tb = 0;
3227 	int32_t length;
3228 
3229 	length = turbo_dec->input.length;
3230 	r = turbo_dec->tb_params.r;
3231 	c = turbo_dec->tb_params.c;
3232 	c_neg = turbo_dec->tb_params.c_neg;
3233 	k_neg = turbo_dec->tb_params.k_neg;
3234 	k_pos = turbo_dec->tb_params.k_pos;
3235 	while (length > 0 && r < c) {
3236 		k = (r < c_neg) ? k_neg : k_pos;
3237 		kw = RTE_ALIGN_CEIL(k + 4, 32) * 3;
3238 		length -= kw;
3239 		r++;
3240 		cbs_in_tb++;
3241 	}
3242 
3243 	return cbs_in_tb;
3244 }
3245 
3246 /* Calculates number of CBs in processed decoder TB based on 'r' and input
3247  * length.
3248  */
3249 static inline uint16_t
get_num_cbs_in_tb_ldpc_dec(struct rte_bbdev_op_ldpc_dec * ldpc_dec)3250 get_num_cbs_in_tb_ldpc_dec(struct rte_bbdev_op_ldpc_dec *ldpc_dec)
3251 {
3252 	uint16_t r, cbs_in_tb = 0;
3253 	int32_t length = ldpc_dec->input.length;
3254 	r = ldpc_dec->tb_params.r;
3255 	while (length > 0 && r < ldpc_dec->tb_params.c) {
3256 		length -=  (r < ldpc_dec->tb_params.cab) ?
3257 				ldpc_dec->tb_params.ea :
3258 				ldpc_dec->tb_params.eb;
3259 		r++;
3260 		cbs_in_tb++;
3261 	}
3262 	return cbs_in_tb;
3263 }
3264 
3265 /* Enqueue encode operations for ACC100 device in CB mode. */
3266 static uint16_t
acc100_enqueue_enc_cb(struct rte_bbdev_queue_data * q_data,struct rte_bbdev_enc_op ** ops,uint16_t num)3267 acc100_enqueue_enc_cb(struct rte_bbdev_queue_data *q_data,
3268 		struct rte_bbdev_enc_op **ops, uint16_t num)
3269 {
3270 	struct acc100_queue *q = q_data->queue_private;
3271 	int32_t avail = q->sw_ring_depth + q->sw_ring_tail - q->sw_ring_head;
3272 	uint16_t i;
3273 	union acc100_dma_desc *desc;
3274 	int ret;
3275 
3276 	for (i = 0; i < num; ++i) {
3277 		/* Check if there are available space for further processing */
3278 		if (unlikely(avail - 1 < 0))
3279 			break;
3280 		avail -= 1;
3281 
3282 		ret = enqueue_enc_one_op_cb(q, ops[i], i);
3283 		if (ret < 0)
3284 			break;
3285 	}
3286 
3287 	if (unlikely(i == 0))
3288 		return 0; /* Nothing to enqueue */
3289 
3290 	/* Set SDone in last CB in enqueued ops for CB mode*/
3291 	desc = q->ring_addr + ((q->sw_ring_head + i - 1)
3292 			& q->sw_ring_wrap_mask);
3293 	desc->req.sdone_enable = 1;
3294 	desc->req.irq_enable = q->irq_enable;
3295 
3296 	acc100_dma_enqueue(q, i, &q_data->queue_stats);
3297 
3298 	/* Update stats */
3299 	q_data->queue_stats.enqueued_count += i;
3300 	q_data->queue_stats.enqueue_err_count += num - i;
3301 	return i;
3302 }
3303 
3304 /* Check we can mux encode operations with common FCW */
3305 static inline bool
check_mux(struct rte_bbdev_enc_op ** ops,uint16_t num)3306 check_mux(struct rte_bbdev_enc_op **ops, uint16_t num) {
3307 	uint16_t i;
3308 	if (num <= 1)
3309 		return false;
3310 	for (i = 1; i < num; ++i) {
3311 		/* Only mux compatible code blocks */
3312 		if (memcmp((uint8_t *)(&ops[i]->ldpc_enc) + ACC100_ENC_OFFSET,
3313 				(uint8_t *)(&ops[0]->ldpc_enc) +
3314 				ACC100_ENC_OFFSET,
3315 				ACC100_CMP_ENC_SIZE) != 0)
3316 			return false;
3317 	}
3318 	return true;
3319 }
3320 
3321 /** Enqueue encode operations for ACC100 device in CB mode. */
3322 static inline uint16_t
acc100_enqueue_ldpc_enc_cb(struct rte_bbdev_queue_data * q_data,struct rte_bbdev_enc_op ** ops,uint16_t num)3323 acc100_enqueue_ldpc_enc_cb(struct rte_bbdev_queue_data *q_data,
3324 		struct rte_bbdev_enc_op **ops, uint16_t num)
3325 {
3326 	struct acc100_queue *q = q_data->queue_private;
3327 	int32_t avail = q->sw_ring_depth + q->sw_ring_tail - q->sw_ring_head;
3328 	uint16_t i = 0;
3329 	union acc100_dma_desc *desc;
3330 	int ret, desc_idx = 0;
3331 	int16_t enq, left = num;
3332 
3333 	while (left > 0) {
3334 		if (unlikely(avail < 1))
3335 			break;
3336 		avail--;
3337 		enq = RTE_MIN(left, ACC100_MUX_5GDL_DESC);
3338 		if (check_mux(&ops[i], enq)) {
3339 			ret = enqueue_ldpc_enc_n_op_cb(q, &ops[i],
3340 					desc_idx, enq);
3341 			if (ret < 0)
3342 				break;
3343 			i += enq;
3344 		} else {
3345 			ret = enqueue_ldpc_enc_one_op_cb(q, ops[i], desc_idx);
3346 			if (ret < 0)
3347 				break;
3348 			i++;
3349 		}
3350 		desc_idx++;
3351 		left = num - i;
3352 	}
3353 
3354 	if (unlikely(i == 0))
3355 		return 0; /* Nothing to enqueue */
3356 
3357 	/* Set SDone in last CB in enqueued ops for CB mode*/
3358 	desc = q->ring_addr + ((q->sw_ring_head + desc_idx - 1)
3359 			& q->sw_ring_wrap_mask);
3360 	desc->req.sdone_enable = 1;
3361 	desc->req.irq_enable = q->irq_enable;
3362 
3363 	acc100_dma_enqueue(q, desc_idx, &q_data->queue_stats);
3364 
3365 	/* Update stats */
3366 	q_data->queue_stats.enqueued_count += i;
3367 	q_data->queue_stats.enqueue_err_count += num - i;
3368 
3369 	return i;
3370 }
3371 
3372 /* Enqueue encode operations for ACC100 device in TB mode. */
3373 static uint16_t
acc100_enqueue_enc_tb(struct rte_bbdev_queue_data * q_data,struct rte_bbdev_enc_op ** ops,uint16_t num)3374 acc100_enqueue_enc_tb(struct rte_bbdev_queue_data *q_data,
3375 		struct rte_bbdev_enc_op **ops, uint16_t num)
3376 {
3377 	struct acc100_queue *q = q_data->queue_private;
3378 	int32_t avail = q->sw_ring_depth + q->sw_ring_tail - q->sw_ring_head;
3379 	uint16_t i, enqueued_cbs = 0;
3380 	uint8_t cbs_in_tb;
3381 	int ret;
3382 
3383 	for (i = 0; i < num; ++i) {
3384 		cbs_in_tb = get_num_cbs_in_tb_enc(&ops[i]->turbo_enc);
3385 		/* Check if there are available space for further processing */
3386 		if (unlikely(avail - cbs_in_tb < 0))
3387 			break;
3388 		avail -= cbs_in_tb;
3389 
3390 		ret = enqueue_enc_one_op_tb(q, ops[i], enqueued_cbs, cbs_in_tb);
3391 		if (ret < 0)
3392 			break;
3393 		enqueued_cbs += ret;
3394 	}
3395 	if (unlikely(enqueued_cbs == 0))
3396 		return 0; /* Nothing to enqueue */
3397 
3398 	acc100_dma_enqueue(q, enqueued_cbs, &q_data->queue_stats);
3399 
3400 	/* Update stats */
3401 	q_data->queue_stats.enqueued_count += i;
3402 	q_data->queue_stats.enqueue_err_count += num - i;
3403 
3404 	return i;
3405 }
3406 
3407 /* Enqueue encode operations for ACC100 device. */
3408 static uint16_t
acc100_enqueue_enc(struct rte_bbdev_queue_data * q_data,struct rte_bbdev_enc_op ** ops,uint16_t num)3409 acc100_enqueue_enc(struct rte_bbdev_queue_data *q_data,
3410 		struct rte_bbdev_enc_op **ops, uint16_t num)
3411 {
3412 	if (unlikely(num == 0))
3413 		return 0;
3414 	if (ops[0]->turbo_enc.code_block_mode == 0)
3415 		return acc100_enqueue_enc_tb(q_data, ops, num);
3416 	else
3417 		return acc100_enqueue_enc_cb(q_data, ops, num);
3418 }
3419 
3420 /* Enqueue encode operations for ACC100 device. */
3421 static uint16_t
acc100_enqueue_ldpc_enc(struct rte_bbdev_queue_data * q_data,struct rte_bbdev_enc_op ** ops,uint16_t num)3422 acc100_enqueue_ldpc_enc(struct rte_bbdev_queue_data *q_data,
3423 		struct rte_bbdev_enc_op **ops, uint16_t num)
3424 {
3425 	if (unlikely(num == 0))
3426 		return 0;
3427 	if (ops[0]->ldpc_enc.code_block_mode == 0)
3428 		return acc100_enqueue_enc_tb(q_data, ops, num);
3429 	else
3430 		return acc100_enqueue_ldpc_enc_cb(q_data, ops, num);
3431 }
3432 
3433 
3434 /* Enqueue decode operations for ACC100 device in CB mode */
3435 static uint16_t
acc100_enqueue_dec_cb(struct rte_bbdev_queue_data * q_data,struct rte_bbdev_dec_op ** ops,uint16_t num)3436 acc100_enqueue_dec_cb(struct rte_bbdev_queue_data *q_data,
3437 		struct rte_bbdev_dec_op **ops, uint16_t num)
3438 {
3439 	struct acc100_queue *q = q_data->queue_private;
3440 	int32_t avail = q->sw_ring_depth + q->sw_ring_tail - q->sw_ring_head;
3441 	uint16_t i;
3442 	union acc100_dma_desc *desc;
3443 	int ret;
3444 
3445 	for (i = 0; i < num; ++i) {
3446 		/* Check if there are available space for further processing */
3447 		if (unlikely(avail - 1 < 0))
3448 			break;
3449 		avail -= 1;
3450 
3451 		ret = enqueue_dec_one_op_cb(q, ops[i], i);
3452 		if (ret < 0)
3453 			break;
3454 	}
3455 
3456 	if (unlikely(i == 0))
3457 		return 0; /* Nothing to enqueue */
3458 
3459 	/* Set SDone in last CB in enqueued ops for CB mode*/
3460 	desc = q->ring_addr + ((q->sw_ring_head + i - 1)
3461 			& q->sw_ring_wrap_mask);
3462 	desc->req.sdone_enable = 1;
3463 	desc->req.irq_enable = q->irq_enable;
3464 
3465 	acc100_dma_enqueue(q, i, &q_data->queue_stats);
3466 
3467 	/* Update stats */
3468 	q_data->queue_stats.enqueued_count += i;
3469 	q_data->queue_stats.enqueue_err_count += num - i;
3470 
3471 	return i;
3472 }
3473 
3474 /* Check we can mux encode operations with common FCW */
3475 static inline bool
cmp_ldpc_dec_op(struct rte_bbdev_dec_op ** ops)3476 cmp_ldpc_dec_op(struct rte_bbdev_dec_op **ops) {
3477 	/* Only mux compatible code blocks */
3478 	if (memcmp((uint8_t *)(&ops[0]->ldpc_dec) + ACC100_DEC_OFFSET,
3479 			(uint8_t *)(&ops[1]->ldpc_dec) +
3480 			ACC100_DEC_OFFSET, ACC100_CMP_DEC_SIZE) != 0) {
3481 		return false;
3482 	} else
3483 		return true;
3484 }
3485 
3486 
3487 /* Enqueue decode operations for ACC100 device in TB mode */
3488 static uint16_t
acc100_enqueue_ldpc_dec_tb(struct rte_bbdev_queue_data * q_data,struct rte_bbdev_dec_op ** ops,uint16_t num)3489 acc100_enqueue_ldpc_dec_tb(struct rte_bbdev_queue_data *q_data,
3490 		struct rte_bbdev_dec_op **ops, uint16_t num)
3491 {
3492 	struct acc100_queue *q = q_data->queue_private;
3493 	int32_t avail = q->sw_ring_depth + q->sw_ring_tail - q->sw_ring_head;
3494 	uint16_t i, enqueued_cbs = 0;
3495 	uint8_t cbs_in_tb;
3496 	int ret;
3497 
3498 	for (i = 0; i < num; ++i) {
3499 		cbs_in_tb = get_num_cbs_in_tb_ldpc_dec(&ops[i]->ldpc_dec);
3500 		/* Check if there are available space for further processing */
3501 		if (unlikely(avail - cbs_in_tb < 0))
3502 			break;
3503 		avail -= cbs_in_tb;
3504 
3505 		ret = enqueue_ldpc_dec_one_op_tb(q, ops[i],
3506 				enqueued_cbs, cbs_in_tb);
3507 		if (ret < 0)
3508 			break;
3509 		enqueued_cbs += ret;
3510 	}
3511 
3512 	acc100_dma_enqueue(q, enqueued_cbs, &q_data->queue_stats);
3513 
3514 	/* Update stats */
3515 	q_data->queue_stats.enqueued_count += i;
3516 	q_data->queue_stats.enqueue_err_count += num - i;
3517 	return i;
3518 }
3519 
3520 /* Enqueue decode operations for ACC100 device in CB mode */
3521 static uint16_t
acc100_enqueue_ldpc_dec_cb(struct rte_bbdev_queue_data * q_data,struct rte_bbdev_dec_op ** ops,uint16_t num)3522 acc100_enqueue_ldpc_dec_cb(struct rte_bbdev_queue_data *q_data,
3523 		struct rte_bbdev_dec_op **ops, uint16_t num)
3524 {
3525 	struct acc100_queue *q = q_data->queue_private;
3526 	int32_t avail = q->sw_ring_depth + q->sw_ring_tail - q->sw_ring_head;
3527 	uint16_t i;
3528 	union acc100_dma_desc *desc;
3529 	int ret;
3530 	bool same_op = false;
3531 	for (i = 0; i < num; ++i) {
3532 		/* Check if there are available space for further processing */
3533 		if (unlikely(avail < 1))
3534 			break;
3535 		avail -= 1;
3536 
3537 		if (i > 0)
3538 			same_op = cmp_ldpc_dec_op(&ops[i-1]);
3539 		rte_bbdev_log(INFO, "Op %d %d %d %d %d %d %d %d %d %d %d %d\n",
3540 			i, ops[i]->ldpc_dec.op_flags, ops[i]->ldpc_dec.rv_index,
3541 			ops[i]->ldpc_dec.iter_max, ops[i]->ldpc_dec.iter_count,
3542 			ops[i]->ldpc_dec.basegraph, ops[i]->ldpc_dec.z_c,
3543 			ops[i]->ldpc_dec.n_cb, ops[i]->ldpc_dec.q_m,
3544 			ops[i]->ldpc_dec.n_filler, ops[i]->ldpc_dec.cb_params.e,
3545 			same_op);
3546 		ret = enqueue_ldpc_dec_one_op_cb(q, ops[i], i, same_op);
3547 		if (ret < 0)
3548 			break;
3549 	}
3550 
3551 	if (unlikely(i == 0))
3552 		return 0; /* Nothing to enqueue */
3553 
3554 	/* Set SDone in last CB in enqueued ops for CB mode*/
3555 	desc = q->ring_addr + ((q->sw_ring_head + i - 1)
3556 			& q->sw_ring_wrap_mask);
3557 
3558 	desc->req.sdone_enable = 1;
3559 	desc->req.irq_enable = q->irq_enable;
3560 
3561 	acc100_dma_enqueue(q, i, &q_data->queue_stats);
3562 
3563 	/* Update stats */
3564 	q_data->queue_stats.enqueued_count += i;
3565 	q_data->queue_stats.enqueue_err_count += num - i;
3566 	return i;
3567 }
3568 
3569 
3570 /* Enqueue decode operations for ACC100 device in TB mode */
3571 static uint16_t
acc100_enqueue_dec_tb(struct rte_bbdev_queue_data * q_data,struct rte_bbdev_dec_op ** ops,uint16_t num)3572 acc100_enqueue_dec_tb(struct rte_bbdev_queue_data *q_data,
3573 		struct rte_bbdev_dec_op **ops, uint16_t num)
3574 {
3575 	struct acc100_queue *q = q_data->queue_private;
3576 	int32_t avail = q->sw_ring_depth + q->sw_ring_tail - q->sw_ring_head;
3577 	uint16_t i, enqueued_cbs = 0;
3578 	uint8_t cbs_in_tb;
3579 	int ret;
3580 
3581 	for (i = 0; i < num; ++i) {
3582 		cbs_in_tb = get_num_cbs_in_tb_dec(&ops[i]->turbo_dec);
3583 		/* Check if there are available space for further processing */
3584 		if (unlikely(avail - cbs_in_tb < 0))
3585 			break;
3586 		avail -= cbs_in_tb;
3587 
3588 		ret = enqueue_dec_one_op_tb(q, ops[i], enqueued_cbs, cbs_in_tb);
3589 		if (ret < 0)
3590 			break;
3591 		enqueued_cbs += ret;
3592 	}
3593 
3594 	acc100_dma_enqueue(q, enqueued_cbs, &q_data->queue_stats);
3595 
3596 	/* Update stats */
3597 	q_data->queue_stats.enqueued_count += i;
3598 	q_data->queue_stats.enqueue_err_count += num - i;
3599 
3600 	return i;
3601 }
3602 
3603 /* Enqueue decode operations for ACC100 device. */
3604 static uint16_t
acc100_enqueue_dec(struct rte_bbdev_queue_data * q_data,struct rte_bbdev_dec_op ** ops,uint16_t num)3605 acc100_enqueue_dec(struct rte_bbdev_queue_data *q_data,
3606 		struct rte_bbdev_dec_op **ops, uint16_t num)
3607 {
3608 	if (unlikely(num == 0))
3609 		return 0;
3610 	if (ops[0]->turbo_dec.code_block_mode == 0)
3611 		return acc100_enqueue_dec_tb(q_data, ops, num);
3612 	else
3613 		return acc100_enqueue_dec_cb(q_data, ops, num);
3614 }
3615 
3616 /* Enqueue decode operations for ACC100 device. */
3617 static uint16_t
acc100_enqueue_ldpc_dec(struct rte_bbdev_queue_data * q_data,struct rte_bbdev_dec_op ** ops,uint16_t num)3618 acc100_enqueue_ldpc_dec(struct rte_bbdev_queue_data *q_data,
3619 		struct rte_bbdev_dec_op **ops, uint16_t num)
3620 {
3621 	struct acc100_queue *q = q_data->queue_private;
3622 	int32_t aq_avail = q->aq_depth +
3623 			(q->aq_dequeued - q->aq_enqueued) / 128;
3624 
3625 	if (unlikely((aq_avail == 0) || (num == 0)))
3626 		return 0;
3627 
3628 	if (ops[0]->ldpc_dec.code_block_mode == 0)
3629 		return acc100_enqueue_ldpc_dec_tb(q_data, ops, num);
3630 	else
3631 		return acc100_enqueue_ldpc_dec_cb(q_data, ops, num);
3632 }
3633 
3634 
3635 /* Dequeue one encode operations from ACC100 device in CB mode */
3636 static inline int
dequeue_enc_one_op_cb(struct acc100_queue * q,struct rte_bbdev_enc_op ** ref_op,uint16_t total_dequeued_cbs,uint32_t * aq_dequeued)3637 dequeue_enc_one_op_cb(struct acc100_queue *q, struct rte_bbdev_enc_op **ref_op,
3638 		uint16_t total_dequeued_cbs, uint32_t *aq_dequeued)
3639 {
3640 	union acc100_dma_desc *desc, atom_desc;
3641 	union acc100_dma_rsp_desc rsp;
3642 	struct rte_bbdev_enc_op *op;
3643 	int i;
3644 
3645 	desc = q->ring_addr + ((q->sw_ring_tail + total_dequeued_cbs)
3646 			& q->sw_ring_wrap_mask);
3647 	atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
3648 			__ATOMIC_RELAXED);
3649 
3650 	/* Check fdone bit */
3651 	if (!(atom_desc.rsp.val & ACC100_FDONE))
3652 		return -1;
3653 
3654 	rsp.val = atom_desc.rsp.val;
3655 	rte_bbdev_log_debug("Resp. desc %p: %x", desc, rsp.val);
3656 
3657 	/* Dequeue */
3658 	op = desc->req.op_addr;
3659 
3660 	/* Clearing status, it will be set based on response */
3661 	op->status = 0;
3662 
3663 	op->status |= ((rsp.input_err)
3664 			? (1 << RTE_BBDEV_DATA_ERROR) : 0);
3665 	op->status |= ((rsp.dma_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0);
3666 	op->status |= ((rsp.fcw_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0);
3667 
3668 	if (desc->req.last_desc_in_batch) {
3669 		(*aq_dequeued)++;
3670 		desc->req.last_desc_in_batch = 0;
3671 	}
3672 	desc->rsp.val = ACC100_DMA_DESC_TYPE;
3673 	desc->rsp.add_info_0 = 0; /*Reserved bits */
3674 	desc->rsp.add_info_1 = 0; /*Reserved bits */
3675 
3676 	/* Flag that the muxing cause loss of opaque data */
3677 	op->opaque_data = (void *)-1;
3678 	for (i = 0 ; i < desc->req.numCBs; i++)
3679 		ref_op[i] = op;
3680 
3681 	/* One CB (op) was successfully dequeued */
3682 	return desc->req.numCBs;
3683 }
3684 
3685 /* Dequeue one encode operations from ACC100 device in TB mode */
3686 static inline int
dequeue_enc_one_op_tb(struct acc100_queue * q,struct rte_bbdev_enc_op ** ref_op,uint16_t total_dequeued_cbs,uint32_t * aq_dequeued)3687 dequeue_enc_one_op_tb(struct acc100_queue *q, struct rte_bbdev_enc_op **ref_op,
3688 		uint16_t total_dequeued_cbs, uint32_t *aq_dequeued)
3689 {
3690 	union acc100_dma_desc *desc, *last_desc, atom_desc;
3691 	union acc100_dma_rsp_desc rsp;
3692 	struct rte_bbdev_enc_op *op;
3693 	uint8_t i = 0;
3694 	uint16_t current_dequeued_cbs = 0, cbs_in_tb;
3695 
3696 	desc = q->ring_addr + ((q->sw_ring_tail + total_dequeued_cbs)
3697 			& q->sw_ring_wrap_mask);
3698 	atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
3699 			__ATOMIC_RELAXED);
3700 
3701 	/* Check fdone bit */
3702 	if (!(atom_desc.rsp.val & ACC100_FDONE))
3703 		return -1;
3704 
3705 	/* Get number of CBs in dequeued TB */
3706 	cbs_in_tb = desc->req.cbs_in_tb;
3707 	/* Get last CB */
3708 	last_desc = q->ring_addr + ((q->sw_ring_tail
3709 			+ total_dequeued_cbs + cbs_in_tb - 1)
3710 			& q->sw_ring_wrap_mask);
3711 	/* Check if last CB in TB is ready to dequeue (and thus
3712 	 * the whole TB) - checking sdone bit. If not return.
3713 	 */
3714 	atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc,
3715 			__ATOMIC_RELAXED);
3716 	if (!(atom_desc.rsp.val & ACC100_SDONE))
3717 		return -1;
3718 
3719 	/* Dequeue */
3720 	op = desc->req.op_addr;
3721 
3722 	/* Clearing status, it will be set based on response */
3723 	op->status = 0;
3724 
3725 	while (i < cbs_in_tb) {
3726 		desc = q->ring_addr + ((q->sw_ring_tail
3727 				+ total_dequeued_cbs)
3728 				& q->sw_ring_wrap_mask);
3729 		atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
3730 				__ATOMIC_RELAXED);
3731 		rsp.val = atom_desc.rsp.val;
3732 		rte_bbdev_log_debug("Resp. desc %p: %x", desc,
3733 				rsp.val);
3734 
3735 		op->status |= ((rsp.input_err)
3736 				? (1 << RTE_BBDEV_DATA_ERROR) : 0);
3737 		op->status |= ((rsp.dma_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0);
3738 		op->status |= ((rsp.fcw_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0);
3739 
3740 		if (desc->req.last_desc_in_batch) {
3741 			(*aq_dequeued)++;
3742 			desc->req.last_desc_in_batch = 0;
3743 		}
3744 		desc->rsp.val = ACC100_DMA_DESC_TYPE;
3745 		desc->rsp.add_info_0 = 0;
3746 		desc->rsp.add_info_1 = 0;
3747 		total_dequeued_cbs++;
3748 		current_dequeued_cbs++;
3749 		i++;
3750 	}
3751 
3752 	*ref_op = op;
3753 
3754 	return current_dequeued_cbs;
3755 }
3756 
3757 /* Dequeue one decode operation from ACC100 device in CB mode */
3758 static inline int
dequeue_dec_one_op_cb(struct rte_bbdev_queue_data * q_data,struct acc100_queue * q,struct rte_bbdev_dec_op ** ref_op,uint16_t dequeued_cbs,uint32_t * aq_dequeued)3759 dequeue_dec_one_op_cb(struct rte_bbdev_queue_data *q_data,
3760 		struct acc100_queue *q, struct rte_bbdev_dec_op **ref_op,
3761 		uint16_t dequeued_cbs, uint32_t *aq_dequeued)
3762 {
3763 	union acc100_dma_desc *desc, atom_desc;
3764 	union acc100_dma_rsp_desc rsp;
3765 	struct rte_bbdev_dec_op *op;
3766 
3767 	desc = q->ring_addr + ((q->sw_ring_tail + dequeued_cbs)
3768 			& q->sw_ring_wrap_mask);
3769 	atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
3770 			__ATOMIC_RELAXED);
3771 
3772 	/* Check fdone bit */
3773 	if (!(atom_desc.rsp.val & ACC100_FDONE))
3774 		return -1;
3775 
3776 	rsp.val = atom_desc.rsp.val;
3777 	rte_bbdev_log_debug("Resp. desc %p: %x", desc, rsp.val);
3778 
3779 	/* Dequeue */
3780 	op = desc->req.op_addr;
3781 
3782 	/* Clearing status, it will be set based on response */
3783 	op->status = 0;
3784 	op->status |= ((rsp.input_err)
3785 			? (1 << RTE_BBDEV_DATA_ERROR) : 0);
3786 	op->status |= ((rsp.dma_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0);
3787 	op->status |= ((rsp.fcw_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0);
3788 	if (op->status != 0) {
3789 		q_data->queue_stats.dequeue_err_count++;
3790 		acc100_check_ir(q->d);
3791 	}
3792 
3793 	/* CRC invalid if error exists */
3794 	if (!op->status)
3795 		op->status |= rsp.crc_status << RTE_BBDEV_CRC_ERROR;
3796 	op->turbo_dec.iter_count = (uint8_t) rsp.iter_cnt / 2;
3797 	/* Check if this is the last desc in batch (Atomic Queue) */
3798 	if (desc->req.last_desc_in_batch) {
3799 		(*aq_dequeued)++;
3800 		desc->req.last_desc_in_batch = 0;
3801 	}
3802 	desc->rsp.val = ACC100_DMA_DESC_TYPE;
3803 	desc->rsp.add_info_0 = 0;
3804 	desc->rsp.add_info_1 = 0;
3805 	*ref_op = op;
3806 
3807 	/* One CB (op) was successfully dequeued */
3808 	return 1;
3809 }
3810 
3811 /* Dequeue one decode operations from ACC100 device in CB mode */
3812 static inline int
dequeue_ldpc_dec_one_op_cb(struct rte_bbdev_queue_data * q_data,struct acc100_queue * q,struct rte_bbdev_dec_op ** ref_op,uint16_t dequeued_cbs,uint32_t * aq_dequeued)3813 dequeue_ldpc_dec_one_op_cb(struct rte_bbdev_queue_data *q_data,
3814 		struct acc100_queue *q, struct rte_bbdev_dec_op **ref_op,
3815 		uint16_t dequeued_cbs, uint32_t *aq_dequeued)
3816 {
3817 	union acc100_dma_desc *desc, atom_desc;
3818 	union acc100_dma_rsp_desc rsp;
3819 	struct rte_bbdev_dec_op *op;
3820 
3821 	desc = q->ring_addr + ((q->sw_ring_tail + dequeued_cbs)
3822 			& q->sw_ring_wrap_mask);
3823 	atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
3824 			__ATOMIC_RELAXED);
3825 
3826 	/* Check fdone bit */
3827 	if (!(atom_desc.rsp.val & ACC100_FDONE))
3828 		return -1;
3829 
3830 	rsp.val = atom_desc.rsp.val;
3831 
3832 	/* Dequeue */
3833 	op = desc->req.op_addr;
3834 
3835 	/* Clearing status, it will be set based on response */
3836 	op->status = 0;
3837 	op->status |= rsp.input_err << RTE_BBDEV_DATA_ERROR;
3838 	op->status |= rsp.dma_err << RTE_BBDEV_DRV_ERROR;
3839 	op->status |= rsp.fcw_err << RTE_BBDEV_DRV_ERROR;
3840 	if (op->status != 0)
3841 		q_data->queue_stats.dequeue_err_count++;
3842 
3843 	op->status |= rsp.crc_status << RTE_BBDEV_CRC_ERROR;
3844 	if (op->ldpc_dec.hard_output.length > 0 && !rsp.synd_ok)
3845 		op->status |= 1 << RTE_BBDEV_SYNDROME_ERROR;
3846 	op->ldpc_dec.iter_count = (uint8_t) rsp.iter_cnt;
3847 
3848 	if (op->status & (1 << RTE_BBDEV_DRV_ERROR))
3849 		acc100_check_ir(q->d);
3850 
3851 	/* Check if this is the last desc in batch (Atomic Queue) */
3852 	if (desc->req.last_desc_in_batch) {
3853 		(*aq_dequeued)++;
3854 		desc->req.last_desc_in_batch = 0;
3855 	}
3856 
3857 	desc->rsp.val = ACC100_DMA_DESC_TYPE;
3858 	desc->rsp.add_info_0 = 0;
3859 	desc->rsp.add_info_1 = 0;
3860 
3861 	*ref_op = op;
3862 
3863 	/* One CB (op) was successfully dequeued */
3864 	return 1;
3865 }
3866 
3867 /* Dequeue one decode operations from ACC100 device in TB mode. */
3868 static inline int
dequeue_dec_one_op_tb(struct acc100_queue * q,struct rte_bbdev_dec_op ** ref_op,uint16_t dequeued_cbs,uint32_t * aq_dequeued)3869 dequeue_dec_one_op_tb(struct acc100_queue *q, struct rte_bbdev_dec_op **ref_op,
3870 		uint16_t dequeued_cbs, uint32_t *aq_dequeued)
3871 {
3872 	union acc100_dma_desc *desc, *last_desc, atom_desc;
3873 	union acc100_dma_rsp_desc rsp;
3874 	struct rte_bbdev_dec_op *op;
3875 	uint8_t cbs_in_tb = 1, cb_idx = 0;
3876 
3877 	desc = q->ring_addr + ((q->sw_ring_tail + dequeued_cbs)
3878 			& q->sw_ring_wrap_mask);
3879 	atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
3880 			__ATOMIC_RELAXED);
3881 
3882 	/* Check fdone bit */
3883 	if (!(atom_desc.rsp.val & ACC100_FDONE))
3884 		return -1;
3885 
3886 	/* Dequeue */
3887 	op = desc->req.op_addr;
3888 
3889 	/* Get number of CBs in dequeued TB */
3890 	cbs_in_tb = desc->req.cbs_in_tb;
3891 	/* Get last CB */
3892 	last_desc = q->ring_addr + ((q->sw_ring_tail
3893 			+ dequeued_cbs + cbs_in_tb - 1)
3894 			& q->sw_ring_wrap_mask);
3895 	/* Check if last CB in TB is ready to dequeue (and thus
3896 	 * the whole TB) - checking sdone bit. If not return.
3897 	 */
3898 	atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc,
3899 			__ATOMIC_RELAXED);
3900 	if (!(atom_desc.rsp.val & ACC100_SDONE))
3901 		return -1;
3902 
3903 	/* Clearing status, it will be set based on response */
3904 	op->status = 0;
3905 
3906 	/* Read remaining CBs if exists */
3907 	while (cb_idx < cbs_in_tb) {
3908 		desc = q->ring_addr + ((q->sw_ring_tail + dequeued_cbs)
3909 				& q->sw_ring_wrap_mask);
3910 		atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
3911 				__ATOMIC_RELAXED);
3912 		rsp.val = atom_desc.rsp.val;
3913 		rte_bbdev_log_debug("Resp. desc %p: %x", desc,
3914 				rsp.val);
3915 
3916 		op->status |= ((rsp.input_err)
3917 				? (1 << RTE_BBDEV_DATA_ERROR) : 0);
3918 		op->status |= ((rsp.dma_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0);
3919 		op->status |= ((rsp.fcw_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0);
3920 
3921 		/* CRC invalid if error exists */
3922 		if (!op->status)
3923 			op->status |= rsp.crc_status << RTE_BBDEV_CRC_ERROR;
3924 		op->turbo_dec.iter_count = RTE_MAX((uint8_t) rsp.iter_cnt,
3925 				op->turbo_dec.iter_count);
3926 
3927 		/* Check if this is the last desc in batch (Atomic Queue) */
3928 		if (desc->req.last_desc_in_batch) {
3929 			(*aq_dequeued)++;
3930 			desc->req.last_desc_in_batch = 0;
3931 		}
3932 		desc->rsp.val = ACC100_DMA_DESC_TYPE;
3933 		desc->rsp.add_info_0 = 0;
3934 		desc->rsp.add_info_1 = 0;
3935 		dequeued_cbs++;
3936 		cb_idx++;
3937 	}
3938 
3939 	*ref_op = op;
3940 
3941 	return cb_idx;
3942 }
3943 
3944 /* Dequeue encode operations from ACC100 device. */
3945 static uint16_t
acc100_dequeue_enc(struct rte_bbdev_queue_data * q_data,struct rte_bbdev_enc_op ** ops,uint16_t num)3946 acc100_dequeue_enc(struct rte_bbdev_queue_data *q_data,
3947 		struct rte_bbdev_enc_op **ops, uint16_t num)
3948 {
3949 	struct acc100_queue *q = q_data->queue_private;
3950 	uint16_t dequeue_num;
3951 	uint32_t avail = q->sw_ring_head - q->sw_ring_tail;
3952 	uint32_t aq_dequeued = 0;
3953 	uint16_t i, dequeued_cbs = 0;
3954 	struct rte_bbdev_enc_op *op;
3955 	int ret;
3956 
3957 #ifdef RTE_LIBRTE_BBDEV_DEBUG
3958 	if (unlikely(ops == NULL || q == NULL)) {
3959 		rte_bbdev_log_debug("Unexpected undefined pointer");
3960 		return 0;
3961 	}
3962 #endif
3963 
3964 	dequeue_num = (avail < num) ? avail : num;
3965 
3966 	for (i = 0; i < dequeue_num; ++i) {
3967 		op = (q->ring_addr + ((q->sw_ring_tail + dequeued_cbs)
3968 			& q->sw_ring_wrap_mask))->req.op_addr;
3969 		if (op->turbo_enc.code_block_mode == 0)
3970 			ret = dequeue_enc_one_op_tb(q, &ops[i], dequeued_cbs,
3971 					&aq_dequeued);
3972 		else
3973 			ret = dequeue_enc_one_op_cb(q, &ops[i], dequeued_cbs,
3974 					&aq_dequeued);
3975 
3976 		if (ret < 0)
3977 			break;
3978 		dequeued_cbs += ret;
3979 	}
3980 
3981 	q->aq_dequeued += aq_dequeued;
3982 	q->sw_ring_tail += dequeued_cbs;
3983 
3984 	/* Update enqueue stats */
3985 	q_data->queue_stats.dequeued_count += i;
3986 
3987 	return i;
3988 }
3989 
3990 /* Dequeue LDPC encode operations from ACC100 device. */
3991 static uint16_t
acc100_dequeue_ldpc_enc(struct rte_bbdev_queue_data * q_data,struct rte_bbdev_enc_op ** ops,uint16_t num)3992 acc100_dequeue_ldpc_enc(struct rte_bbdev_queue_data *q_data,
3993 		struct rte_bbdev_enc_op **ops, uint16_t num)
3994 {
3995 	struct acc100_queue *q = q_data->queue_private;
3996 	uint32_t avail = q->sw_ring_head - q->sw_ring_tail;
3997 	uint32_t aq_dequeued = 0;
3998 	uint16_t dequeue_num, i, dequeued_cbs = 0, dequeued_descs = 0;
3999 	int ret;
4000 
4001 #ifdef RTE_LIBRTE_BBDEV_DEBUG
4002 	if (unlikely(ops == 0 && q == NULL))
4003 		return 0;
4004 #endif
4005 
4006 	dequeue_num = RTE_MIN(avail, num);
4007 
4008 	for (i = 0; i < dequeue_num; i++) {
4009 		ret = dequeue_enc_one_op_cb(q, &ops[dequeued_cbs],
4010 				dequeued_descs, &aq_dequeued);
4011 		if (ret < 0)
4012 			break;
4013 		dequeued_cbs += ret;
4014 		dequeued_descs++;
4015 		if (dequeued_cbs >= num)
4016 			break;
4017 	}
4018 
4019 	q->aq_dequeued += aq_dequeued;
4020 	q->sw_ring_tail += dequeued_descs;
4021 
4022 	/* Update enqueue stats */
4023 	q_data->queue_stats.dequeued_count += dequeued_cbs;
4024 
4025 	return dequeued_cbs;
4026 }
4027 
4028 
4029 /* Dequeue decode operations from ACC100 device. */
4030 static uint16_t
acc100_dequeue_dec(struct rte_bbdev_queue_data * q_data,struct rte_bbdev_dec_op ** ops,uint16_t num)4031 acc100_dequeue_dec(struct rte_bbdev_queue_data *q_data,
4032 		struct rte_bbdev_dec_op **ops, uint16_t num)
4033 {
4034 	struct acc100_queue *q = q_data->queue_private;
4035 	uint16_t dequeue_num;
4036 	uint32_t avail = q->sw_ring_head - q->sw_ring_tail;
4037 	uint32_t aq_dequeued = 0;
4038 	uint16_t i;
4039 	uint16_t dequeued_cbs = 0;
4040 	struct rte_bbdev_dec_op *op;
4041 	int ret;
4042 
4043 #ifdef RTE_LIBRTE_BBDEV_DEBUG
4044 	if (unlikely(ops == 0 && q == NULL))
4045 		return 0;
4046 #endif
4047 
4048 	dequeue_num = (avail < num) ? avail : num;
4049 
4050 	for (i = 0; i < dequeue_num; ++i) {
4051 		op = (q->ring_addr + ((q->sw_ring_tail + dequeued_cbs)
4052 			& q->sw_ring_wrap_mask))->req.op_addr;
4053 		if (op->turbo_dec.code_block_mode == 0)
4054 			ret = dequeue_dec_one_op_tb(q, &ops[i], dequeued_cbs,
4055 					&aq_dequeued);
4056 		else
4057 			ret = dequeue_dec_one_op_cb(q_data, q, &ops[i],
4058 					dequeued_cbs, &aq_dequeued);
4059 
4060 		if (ret < 0)
4061 			break;
4062 		dequeued_cbs += ret;
4063 	}
4064 
4065 	q->aq_dequeued += aq_dequeued;
4066 	q->sw_ring_tail += dequeued_cbs;
4067 
4068 	/* Update enqueue stats */
4069 	q_data->queue_stats.dequeued_count += i;
4070 
4071 	return i;
4072 }
4073 
4074 /* Dequeue decode operations from ACC100 device. */
4075 static uint16_t
acc100_dequeue_ldpc_dec(struct rte_bbdev_queue_data * q_data,struct rte_bbdev_dec_op ** ops,uint16_t num)4076 acc100_dequeue_ldpc_dec(struct rte_bbdev_queue_data *q_data,
4077 		struct rte_bbdev_dec_op **ops, uint16_t num)
4078 {
4079 	struct acc100_queue *q = q_data->queue_private;
4080 	uint16_t dequeue_num;
4081 	uint32_t avail = q->sw_ring_head - q->sw_ring_tail;
4082 	uint32_t aq_dequeued = 0;
4083 	uint16_t i;
4084 	uint16_t dequeued_cbs = 0;
4085 	struct rte_bbdev_dec_op *op;
4086 	int ret;
4087 
4088 #ifdef RTE_LIBRTE_BBDEV_DEBUG
4089 	if (unlikely(ops == 0 && q == NULL))
4090 		return 0;
4091 #endif
4092 
4093 	dequeue_num = RTE_MIN(avail, num);
4094 
4095 	for (i = 0; i < dequeue_num; ++i) {
4096 		op = (q->ring_addr + ((q->sw_ring_tail + dequeued_cbs)
4097 			& q->sw_ring_wrap_mask))->req.op_addr;
4098 		if (op->ldpc_dec.code_block_mode == 0)
4099 			ret = dequeue_dec_one_op_tb(q, &ops[i], dequeued_cbs,
4100 					&aq_dequeued);
4101 		else
4102 			ret = dequeue_ldpc_dec_one_op_cb(
4103 					q_data, q, &ops[i], dequeued_cbs,
4104 					&aq_dequeued);
4105 
4106 		if (ret < 0)
4107 			break;
4108 		dequeued_cbs += ret;
4109 	}
4110 
4111 	q->aq_dequeued += aq_dequeued;
4112 	q->sw_ring_tail += dequeued_cbs;
4113 
4114 	/* Update enqueue stats */
4115 	q_data->queue_stats.dequeued_count += i;
4116 
4117 	return i;
4118 }
4119 
4120 /* Initialization Function */
4121 static void
acc100_bbdev_init(struct rte_bbdev * dev,struct rte_pci_driver * drv)4122 acc100_bbdev_init(struct rte_bbdev *dev, struct rte_pci_driver *drv)
4123 {
4124 	struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
4125 
4126 	dev->dev_ops = &acc100_bbdev_ops;
4127 	dev->enqueue_enc_ops = acc100_enqueue_enc;
4128 	dev->enqueue_dec_ops = acc100_enqueue_dec;
4129 	dev->dequeue_enc_ops = acc100_dequeue_enc;
4130 	dev->dequeue_dec_ops = acc100_dequeue_dec;
4131 	dev->enqueue_ldpc_enc_ops = acc100_enqueue_ldpc_enc;
4132 	dev->enqueue_ldpc_dec_ops = acc100_enqueue_ldpc_dec;
4133 	dev->dequeue_ldpc_enc_ops = acc100_dequeue_ldpc_enc;
4134 	dev->dequeue_ldpc_dec_ops = acc100_dequeue_ldpc_dec;
4135 
4136 	((struct acc100_device *) dev->data->dev_private)->pf_device =
4137 			!strcmp(drv->driver.name,
4138 					RTE_STR(ACC100PF_DRIVER_NAME));
4139 	((struct acc100_device *) dev->data->dev_private)->mmio_base =
4140 			pci_dev->mem_resource[0].addr;
4141 
4142 	rte_bbdev_log_debug("Init device %s [%s] @ vaddr %p paddr %#"PRIx64"",
4143 			drv->driver.name, dev->data->name,
4144 			(void *)pci_dev->mem_resource[0].addr,
4145 			pci_dev->mem_resource[0].phys_addr);
4146 }
4147 
acc100_pci_probe(struct rte_pci_driver * pci_drv,struct rte_pci_device * pci_dev)4148 static int acc100_pci_probe(struct rte_pci_driver *pci_drv,
4149 	struct rte_pci_device *pci_dev)
4150 {
4151 	struct rte_bbdev *bbdev = NULL;
4152 	char dev_name[RTE_BBDEV_NAME_MAX_LEN];
4153 
4154 	if (pci_dev == NULL) {
4155 		rte_bbdev_log(ERR, "NULL PCI device");
4156 		return -EINVAL;
4157 	}
4158 
4159 	rte_pci_device_name(&pci_dev->addr, dev_name, sizeof(dev_name));
4160 
4161 	/* Allocate memory to be used privately by drivers */
4162 	bbdev = rte_bbdev_allocate(pci_dev->device.name);
4163 	if (bbdev == NULL)
4164 		return -ENODEV;
4165 
4166 	/* allocate device private memory */
4167 	bbdev->data->dev_private = rte_zmalloc_socket(dev_name,
4168 			sizeof(struct acc100_device), RTE_CACHE_LINE_SIZE,
4169 			pci_dev->device.numa_node);
4170 
4171 	if (bbdev->data->dev_private == NULL) {
4172 		rte_bbdev_log(CRIT,
4173 				"Allocate of %zu bytes for device \"%s\" failed",
4174 				sizeof(struct acc100_device), dev_name);
4175 				rte_bbdev_release(bbdev);
4176 			return -ENOMEM;
4177 	}
4178 
4179 	/* Fill HW specific part of device structure */
4180 	bbdev->device = &pci_dev->device;
4181 	bbdev->intr_handle = &pci_dev->intr_handle;
4182 	bbdev->data->socket_id = pci_dev->device.numa_node;
4183 
4184 	/* Invoke ACC100 device initialization function */
4185 	acc100_bbdev_init(bbdev, pci_drv);
4186 
4187 	rte_bbdev_log_debug("Initialised bbdev %s (id = %u)",
4188 			dev_name, bbdev->data->dev_id);
4189 	return 0;
4190 }
4191 
acc100_pci_remove(struct rte_pci_device * pci_dev)4192 static int acc100_pci_remove(struct rte_pci_device *pci_dev)
4193 {
4194 	struct rte_bbdev *bbdev;
4195 	int ret;
4196 	uint8_t dev_id;
4197 
4198 	if (pci_dev == NULL)
4199 		return -EINVAL;
4200 
4201 	/* Find device */
4202 	bbdev = rte_bbdev_get_named_dev(pci_dev->device.name);
4203 	if (bbdev == NULL) {
4204 		rte_bbdev_log(CRIT,
4205 				"Couldn't find HW dev \"%s\" to uninitialise it",
4206 				pci_dev->device.name);
4207 		return -ENODEV;
4208 	}
4209 	dev_id = bbdev->data->dev_id;
4210 
4211 	/* free device private memory before close */
4212 	rte_free(bbdev->data->dev_private);
4213 
4214 	/* Close device */
4215 	ret = rte_bbdev_close(dev_id);
4216 	if (ret < 0)
4217 		rte_bbdev_log(ERR,
4218 				"Device %i failed to close during uninit: %i",
4219 				dev_id, ret);
4220 
4221 	/* release bbdev from library */
4222 	rte_bbdev_release(bbdev);
4223 
4224 	rte_bbdev_log_debug("Destroyed bbdev = %u", dev_id);
4225 
4226 	return 0;
4227 }
4228 
4229 static struct rte_pci_driver acc100_pci_pf_driver = {
4230 		.probe = acc100_pci_probe,
4231 		.remove = acc100_pci_remove,
4232 		.id_table = pci_id_acc100_pf_map,
4233 		.drv_flags = RTE_PCI_DRV_NEED_MAPPING
4234 };
4235 
4236 static struct rte_pci_driver acc100_pci_vf_driver = {
4237 		.probe = acc100_pci_probe,
4238 		.remove = acc100_pci_remove,
4239 		.id_table = pci_id_acc100_vf_map,
4240 		.drv_flags = RTE_PCI_DRV_NEED_MAPPING
4241 };
4242 
4243 RTE_PMD_REGISTER_PCI(ACC100PF_DRIVER_NAME, acc100_pci_pf_driver);
4244 RTE_PMD_REGISTER_PCI_TABLE(ACC100PF_DRIVER_NAME, pci_id_acc100_pf_map);
4245 RTE_PMD_REGISTER_PCI(ACC100VF_DRIVER_NAME, acc100_pci_vf_driver);
4246 RTE_PMD_REGISTER_PCI_TABLE(ACC100VF_DRIVER_NAME, pci_id_acc100_vf_map);
4247 
4248 /*
4249  * Workaround implementation to fix the power on status of some 5GUL engines
4250  * This requires DMA permission if ported outside DPDK
4251  * It consists in resolving the state of these engines by running a
4252  * dummy operation and resetting the engines to ensure state are reliably
4253  * defined.
4254  */
4255 static void
poweron_cleanup(struct rte_bbdev * bbdev,struct acc100_device * d,struct rte_acc100_conf * conf)4256 poweron_cleanup(struct rte_bbdev *bbdev, struct acc100_device *d,
4257 		struct rte_acc100_conf *conf)
4258 {
4259 	int i, template_idx, qg_idx;
4260 	uint32_t address, status, value;
4261 	printf("Need to clear power-on 5GUL status in internal memory\n");
4262 	/* Reset LDPC Cores */
4263 	for (i = 0; i < ACC100_ENGINES_MAX; i++)
4264 		acc100_reg_write(d, HWPfFecUl5gCntrlReg +
4265 				ACC100_ENGINE_OFFSET * i, ACC100_RESET_HI);
4266 	usleep(ACC100_LONG_WAIT);
4267 	for (i = 0; i < ACC100_ENGINES_MAX; i++)
4268 		acc100_reg_write(d, HWPfFecUl5gCntrlReg +
4269 				ACC100_ENGINE_OFFSET * i, ACC100_RESET_LO);
4270 	usleep(ACC100_LONG_WAIT);
4271 	/* Prepare dummy workload */
4272 	alloc_2x64mb_sw_rings_mem(bbdev, d, 0);
4273 	/* Set base addresses */
4274 	uint32_t phys_high = (uint32_t)(d->sw_rings_iova >> 32);
4275 	uint32_t phys_low  = (uint32_t)(d->sw_rings_iova &
4276 			~(ACC100_SIZE_64MBYTE-1));
4277 	acc100_reg_write(d, HWPfDmaFec5GulDescBaseHiRegVf, phys_high);
4278 	acc100_reg_write(d, HWPfDmaFec5GulDescBaseLoRegVf, phys_low);
4279 
4280 	/* Descriptor for a dummy 5GUL code block processing*/
4281 	union acc100_dma_desc *desc = NULL;
4282 	desc = d->sw_rings;
4283 	desc->req.data_ptrs[0].address = d->sw_rings_iova +
4284 			ACC100_DESC_FCW_OFFSET;
4285 	desc->req.data_ptrs[0].blen = ACC100_FCW_LD_BLEN;
4286 	desc->req.data_ptrs[0].blkid = ACC100_DMA_BLKID_FCW;
4287 	desc->req.data_ptrs[0].last = 0;
4288 	desc->req.data_ptrs[0].dma_ext = 0;
4289 	desc->req.data_ptrs[1].address = d->sw_rings_iova + 512;
4290 	desc->req.data_ptrs[1].blkid = ACC100_DMA_BLKID_IN;
4291 	desc->req.data_ptrs[1].last = 1;
4292 	desc->req.data_ptrs[1].dma_ext = 0;
4293 	desc->req.data_ptrs[1].blen = 44;
4294 	desc->req.data_ptrs[2].address = d->sw_rings_iova + 1024;
4295 	desc->req.data_ptrs[2].blkid = ACC100_DMA_BLKID_OUT_ENC;
4296 	desc->req.data_ptrs[2].last = 1;
4297 	desc->req.data_ptrs[2].dma_ext = 0;
4298 	desc->req.data_ptrs[2].blen = 5;
4299 	/* Dummy FCW */
4300 	desc->req.fcw_ld.FCWversion = ACC100_FCW_VER;
4301 	desc->req.fcw_ld.qm = 1;
4302 	desc->req.fcw_ld.nfiller = 30;
4303 	desc->req.fcw_ld.BG = 2 - 1;
4304 	desc->req.fcw_ld.Zc = 7;
4305 	desc->req.fcw_ld.ncb = 350;
4306 	desc->req.fcw_ld.rm_e = 4;
4307 	desc->req.fcw_ld.itmax = 10;
4308 	desc->req.fcw_ld.gain_i = 1;
4309 	desc->req.fcw_ld.gain_h = 1;
4310 
4311 	int engines_to_restart[ACC100_SIG_UL_5G_LAST + 1] = {0};
4312 	int num_failed_engine = 0;
4313 	/* Detect engines in undefined state */
4314 	for (template_idx = ACC100_SIG_UL_5G;
4315 			template_idx <= ACC100_SIG_UL_5G_LAST;
4316 			template_idx++) {
4317 		/* Check engine power-on status */
4318 		address = HwPfFecUl5gIbDebugReg +
4319 				ACC100_ENGINE_OFFSET * template_idx;
4320 		status = (acc100_reg_read(d, address) >> 4) & 0xF;
4321 		if (status == 0) {
4322 			engines_to_restart[num_failed_engine] = template_idx;
4323 			num_failed_engine++;
4324 		}
4325 	}
4326 
4327 	int numQqsAcc = conf->q_ul_5g.num_qgroups;
4328 	int numQgs = conf->q_ul_5g.num_qgroups;
4329 	value = 0;
4330 	for (qg_idx = numQqsAcc; qg_idx < (numQgs + numQqsAcc); qg_idx++)
4331 		value |= (1 << qg_idx);
4332 	/* Force each engine which is in unspecified state */
4333 	for (i = 0; i < num_failed_engine; i++) {
4334 		int failed_engine = engines_to_restart[i];
4335 		printf("Force engine %d\n", failed_engine);
4336 		for (template_idx = ACC100_SIG_UL_5G;
4337 				template_idx <= ACC100_SIG_UL_5G_LAST;
4338 				template_idx++) {
4339 			address = HWPfQmgrGrpTmplateReg4Indx
4340 					+ ACC100_BYTES_IN_WORD * template_idx;
4341 			if (template_idx == failed_engine)
4342 				acc100_reg_write(d, address, value);
4343 			else
4344 				acc100_reg_write(d, address, 0);
4345 		}
4346 		/* Reset descriptor header */
4347 		desc->req.word0 = ACC100_DMA_DESC_TYPE;
4348 		desc->req.word1 = 0;
4349 		desc->req.word2 = 0;
4350 		desc->req.word3 = 0;
4351 		desc->req.numCBs = 1;
4352 		desc->req.m2dlen = 2;
4353 		desc->req.d2mlen = 1;
4354 		/* Enqueue the code block for processing */
4355 		union acc100_enqueue_reg_fmt enq_req;
4356 		enq_req.val = 0;
4357 		enq_req.addr_offset = ACC100_DESC_OFFSET;
4358 		enq_req.num_elem = 1;
4359 		enq_req.req_elem_addr = 0;
4360 		rte_wmb();
4361 		acc100_reg_write(d, HWPfQmgrIngressAq + 0x100, enq_req.val);
4362 		usleep(ACC100_LONG_WAIT * 100);
4363 		if (desc->req.word0 != 2)
4364 			printf("DMA Response %#"PRIx32"\n", desc->req.word0);
4365 	}
4366 
4367 	/* Reset LDPC Cores */
4368 	for (i = 0; i < ACC100_ENGINES_MAX; i++)
4369 		acc100_reg_write(d, HWPfFecUl5gCntrlReg +
4370 				ACC100_ENGINE_OFFSET * i,
4371 				ACC100_RESET_HI);
4372 	usleep(ACC100_LONG_WAIT);
4373 	for (i = 0; i < ACC100_ENGINES_MAX; i++)
4374 		acc100_reg_write(d, HWPfFecUl5gCntrlReg +
4375 				ACC100_ENGINE_OFFSET * i,
4376 				ACC100_RESET_LO);
4377 	usleep(ACC100_LONG_WAIT);
4378 	acc100_reg_write(d, HWPfHi5GHardResetReg, ACC100_RESET_HARD);
4379 	usleep(ACC100_LONG_WAIT);
4380 	int numEngines = 0;
4381 	/* Check engine power-on status again */
4382 	for (template_idx = ACC100_SIG_UL_5G;
4383 			template_idx <= ACC100_SIG_UL_5G_LAST;
4384 			template_idx++) {
4385 		address = HwPfFecUl5gIbDebugReg +
4386 				ACC100_ENGINE_OFFSET * template_idx;
4387 		status = (acc100_reg_read(d, address) >> 4) & 0xF;
4388 		address = HWPfQmgrGrpTmplateReg4Indx
4389 				+ ACC100_BYTES_IN_WORD * template_idx;
4390 		if (status == 1) {
4391 			acc100_reg_write(d, address, value);
4392 			numEngines++;
4393 		} else
4394 			acc100_reg_write(d, address, 0);
4395 	}
4396 	printf("Number of 5GUL engines %d\n", numEngines);
4397 
4398 	if (d->sw_rings_base != NULL)
4399 		rte_free(d->sw_rings_base);
4400 	usleep(ACC100_LONG_WAIT);
4401 }
4402 
4403 /* Initial configuration of a ACC100 device prior to running configure() */
4404 int
rte_acc100_configure(const char * dev_name,struct rte_acc100_conf * conf)4405 rte_acc100_configure(const char *dev_name, struct rte_acc100_conf *conf)
4406 {
4407 	rte_bbdev_log(INFO, "rte_acc100_configure");
4408 	uint32_t value, address, status;
4409 	int qg_idx, template_idx, vf_idx, acc, i;
4410 	struct rte_bbdev *bbdev = rte_bbdev_get_named_dev(dev_name);
4411 
4412 	/* Compile time checks */
4413 	RTE_BUILD_BUG_ON(sizeof(struct acc100_dma_req_desc) != 256);
4414 	RTE_BUILD_BUG_ON(sizeof(union acc100_dma_desc) != 256);
4415 	RTE_BUILD_BUG_ON(sizeof(struct acc100_fcw_td) != 24);
4416 	RTE_BUILD_BUG_ON(sizeof(struct acc100_fcw_te) != 32);
4417 
4418 	if (bbdev == NULL) {
4419 		rte_bbdev_log(ERR,
4420 		"Invalid dev_name (%s), or device is not yet initialised",
4421 		dev_name);
4422 		return -ENODEV;
4423 	}
4424 	struct acc100_device *d = bbdev->data->dev_private;
4425 
4426 	/* Store configuration */
4427 	rte_memcpy(&d->acc100_conf, conf, sizeof(d->acc100_conf));
4428 
4429 	/* PCIe Bridge configuration */
4430 	acc100_reg_write(d, HwPfPcieGpexBridgeControl, ACC100_CFG_PCI_BRIDGE);
4431 	for (i = 1; i < ACC100_GPEX_AXIMAP_NUM; i++)
4432 		acc100_reg_write(d,
4433 				HwPfPcieGpexAxiAddrMappingWindowPexBaseHigh
4434 				+ i * 16, 0);
4435 
4436 	/* Prevent blocking AXI read on BRESP for AXI Write */
4437 	address = HwPfPcieGpexAxiPioControl;
4438 	value = ACC100_CFG_PCI_AXI;
4439 	acc100_reg_write(d, address, value);
4440 
4441 	/* 5GDL PLL phase shift */
4442 	acc100_reg_write(d, HWPfChaDl5gPllPhshft0, 0x1);
4443 
4444 	/* Explicitly releasing AXI as this may be stopped after PF FLR/BME */
4445 	address = HWPfDmaAxiControl;
4446 	value = 1;
4447 	acc100_reg_write(d, address, value);
4448 
4449 	/* DDR Configuration */
4450 	address = HWPfDdrBcTim6;
4451 	value = acc100_reg_read(d, address);
4452 	value &= 0xFFFFFFFB; /* Bit 2 */
4453 #ifdef ACC100_DDR_ECC_ENABLE
4454 	value |= 0x4;
4455 #endif
4456 	acc100_reg_write(d, address, value);
4457 	address = HWPfDdrPhyDqsCountNum;
4458 #ifdef ACC100_DDR_ECC_ENABLE
4459 	value = 9;
4460 #else
4461 	value = 8;
4462 #endif
4463 	acc100_reg_write(d, address, value);
4464 
4465 	/* Set default descriptor signature */
4466 	address = HWPfDmaDescriptorSignatuture;
4467 	value = 0;
4468 	acc100_reg_write(d, address, value);
4469 
4470 	/* Enable the Error Detection in DMA */
4471 	value = ACC100_CFG_DMA_ERROR;
4472 	address = HWPfDmaErrorDetectionEn;
4473 	acc100_reg_write(d, address, value);
4474 
4475 	/* AXI Cache configuration */
4476 	value = ACC100_CFG_AXI_CACHE;
4477 	address = HWPfDmaAxcacheReg;
4478 	acc100_reg_write(d, address, value);
4479 
4480 	/* Default DMA Configuration (Qmgr Enabled) */
4481 	address = HWPfDmaConfig0Reg;
4482 	value = 0;
4483 	acc100_reg_write(d, address, value);
4484 	address = HWPfDmaQmanen;
4485 	value = 0;
4486 	acc100_reg_write(d, address, value);
4487 
4488 	/* Default RLIM/ALEN configuration */
4489 	address = HWPfDmaConfig1Reg;
4490 	value = (1 << 31) + (23 << 8) + (1 << 6) + 7;
4491 	acc100_reg_write(d, address, value);
4492 
4493 	/* Configure DMA Qmanager addresses */
4494 	address = HWPfDmaQmgrAddrReg;
4495 	value = HWPfQmgrEgressQueuesTemplate;
4496 	acc100_reg_write(d, address, value);
4497 
4498 	/* ===== Qmgr Configuration ===== */
4499 	/* Configuration of the AQueue Depth QMGR_GRP_0_DEPTH_LOG2 for UL */
4500 	int totalQgs = conf->q_ul_4g.num_qgroups +
4501 			conf->q_ul_5g.num_qgroups +
4502 			conf->q_dl_4g.num_qgroups +
4503 			conf->q_dl_5g.num_qgroups;
4504 	for (qg_idx = 0; qg_idx < totalQgs; qg_idx++) {
4505 		address = HWPfQmgrDepthLog2Grp +
4506 		ACC100_BYTES_IN_WORD * qg_idx;
4507 		value = aqDepth(qg_idx, conf);
4508 		acc100_reg_write(d, address, value);
4509 		address = HWPfQmgrTholdGrp +
4510 		ACC100_BYTES_IN_WORD * qg_idx;
4511 		value = (1 << 16) + (1 << (aqDepth(qg_idx, conf) - 1));
4512 		acc100_reg_write(d, address, value);
4513 	}
4514 
4515 	/* Template Priority in incremental order */
4516 	for (template_idx = 0; template_idx < ACC100_NUM_TMPL;
4517 			template_idx++) {
4518 		address = HWPfQmgrGrpTmplateReg0Indx +
4519 		ACC100_BYTES_IN_WORD * (template_idx % 8);
4520 		value = ACC100_TMPL_PRI_0;
4521 		acc100_reg_write(d, address, value);
4522 		address = HWPfQmgrGrpTmplateReg1Indx +
4523 		ACC100_BYTES_IN_WORD * (template_idx % 8);
4524 		value = ACC100_TMPL_PRI_1;
4525 		acc100_reg_write(d, address, value);
4526 		address = HWPfQmgrGrpTmplateReg2indx +
4527 		ACC100_BYTES_IN_WORD * (template_idx % 8);
4528 		value = ACC100_TMPL_PRI_2;
4529 		acc100_reg_write(d, address, value);
4530 		address = HWPfQmgrGrpTmplateReg3Indx +
4531 		ACC100_BYTES_IN_WORD * (template_idx % 8);
4532 		value = ACC100_TMPL_PRI_3;
4533 		acc100_reg_write(d, address, value);
4534 	}
4535 
4536 	address = HWPfQmgrGrpPriority;
4537 	value = ACC100_CFG_QMGR_HI_P;
4538 	acc100_reg_write(d, address, value);
4539 
4540 	/* Template Configuration */
4541 	for (template_idx = 0; template_idx < ACC100_NUM_TMPL;
4542 			template_idx++) {
4543 		value = 0;
4544 		address = HWPfQmgrGrpTmplateReg4Indx
4545 				+ ACC100_BYTES_IN_WORD * template_idx;
4546 		acc100_reg_write(d, address, value);
4547 	}
4548 	/* 4GUL */
4549 	int numQgs = conf->q_ul_4g.num_qgroups;
4550 	int numQqsAcc = 0;
4551 	value = 0;
4552 	for (qg_idx = numQqsAcc; qg_idx < (numQgs + numQqsAcc); qg_idx++)
4553 		value |= (1 << qg_idx);
4554 	for (template_idx = ACC100_SIG_UL_4G;
4555 			template_idx <= ACC100_SIG_UL_4G_LAST;
4556 			template_idx++) {
4557 		address = HWPfQmgrGrpTmplateReg4Indx
4558 				+ ACC100_BYTES_IN_WORD * template_idx;
4559 		acc100_reg_write(d, address, value);
4560 	}
4561 	/* 5GUL */
4562 	numQqsAcc += numQgs;
4563 	numQgs	= conf->q_ul_5g.num_qgroups;
4564 	value = 0;
4565 	int numEngines = 0;
4566 	for (qg_idx = numQqsAcc; qg_idx < (numQgs + numQqsAcc); qg_idx++)
4567 		value |= (1 << qg_idx);
4568 	for (template_idx = ACC100_SIG_UL_5G;
4569 			template_idx <= ACC100_SIG_UL_5G_LAST;
4570 			template_idx++) {
4571 		/* Check engine power-on status */
4572 		address = HwPfFecUl5gIbDebugReg +
4573 				ACC100_ENGINE_OFFSET * template_idx;
4574 		status = (acc100_reg_read(d, address) >> 4) & 0xF;
4575 		address = HWPfQmgrGrpTmplateReg4Indx
4576 				+ ACC100_BYTES_IN_WORD * template_idx;
4577 		if (status == 1) {
4578 			acc100_reg_write(d, address, value);
4579 			numEngines++;
4580 		} else
4581 			acc100_reg_write(d, address, 0);
4582 #if RTE_ACC100_SINGLE_FEC == 1
4583 		value = 0;
4584 #endif
4585 	}
4586 	printf("Number of 5GUL engines %d\n", numEngines);
4587 	/* 4GDL */
4588 	numQqsAcc += numQgs;
4589 	numQgs	= conf->q_dl_4g.num_qgroups;
4590 	value = 0;
4591 	for (qg_idx = numQqsAcc; qg_idx < (numQgs + numQqsAcc); qg_idx++)
4592 		value |= (1 << qg_idx);
4593 	for (template_idx = ACC100_SIG_DL_4G;
4594 			template_idx <= ACC100_SIG_DL_4G_LAST;
4595 			template_idx++) {
4596 		address = HWPfQmgrGrpTmplateReg4Indx
4597 				+ ACC100_BYTES_IN_WORD * template_idx;
4598 		acc100_reg_write(d, address, value);
4599 #if RTE_ACC100_SINGLE_FEC == 1
4600 			value = 0;
4601 #endif
4602 	}
4603 	/* 5GDL */
4604 	numQqsAcc += numQgs;
4605 	numQgs	= conf->q_dl_5g.num_qgroups;
4606 	value = 0;
4607 	for (qg_idx = numQqsAcc; qg_idx < (numQgs + numQqsAcc); qg_idx++)
4608 		value |= (1 << qg_idx);
4609 	for (template_idx = ACC100_SIG_DL_5G;
4610 			template_idx <= ACC100_SIG_DL_5G_LAST;
4611 			template_idx++) {
4612 		address = HWPfQmgrGrpTmplateReg4Indx
4613 				+ ACC100_BYTES_IN_WORD * template_idx;
4614 		acc100_reg_write(d, address, value);
4615 #if RTE_ACC100_SINGLE_FEC == 1
4616 		value = 0;
4617 #endif
4618 	}
4619 
4620 	/* Queue Group Function mapping */
4621 	int qman_func_id[5] = {0, 2, 1, 3, 4};
4622 	address = HWPfQmgrGrpFunction0;
4623 	value = 0;
4624 	for (qg_idx = 0; qg_idx < 8; qg_idx++) {
4625 		acc = accFromQgid(qg_idx, conf);
4626 		value |= qman_func_id[acc]<<(qg_idx * 4);
4627 	}
4628 	acc100_reg_write(d, address, value);
4629 
4630 	/* Configuration of the Arbitration QGroup depth to 1 */
4631 	for (qg_idx = 0; qg_idx < totalQgs; qg_idx++) {
4632 		address = HWPfQmgrArbQDepthGrp +
4633 		ACC100_BYTES_IN_WORD * qg_idx;
4634 		value = 0;
4635 		acc100_reg_write(d, address, value);
4636 	}
4637 
4638 	/* Enabling AQueues through the Queue hierarchy*/
4639 	for (vf_idx = 0; vf_idx < ACC100_NUM_VFS; vf_idx++) {
4640 		for (qg_idx = 0; qg_idx < ACC100_NUM_QGRPS; qg_idx++) {
4641 			value = 0;
4642 			if (vf_idx < conf->num_vf_bundles &&
4643 					qg_idx < totalQgs)
4644 				value = (1 << aqNum(qg_idx, conf)) - 1;
4645 			address = HWPfQmgrAqEnableVf
4646 					+ vf_idx * ACC100_BYTES_IN_WORD;
4647 			value += (qg_idx << 16);
4648 			acc100_reg_write(d, address, value);
4649 		}
4650 	}
4651 
4652 	/* This pointer to ARAM (256kB) is shifted by 2 (4B per register) */
4653 	uint32_t aram_address = 0;
4654 	for (qg_idx = 0; qg_idx < totalQgs; qg_idx++) {
4655 		for (vf_idx = 0; vf_idx < conf->num_vf_bundles; vf_idx++) {
4656 			address = HWPfQmgrVfBaseAddr + vf_idx
4657 					* ACC100_BYTES_IN_WORD + qg_idx
4658 					* ACC100_BYTES_IN_WORD * 64;
4659 			value = aram_address;
4660 			acc100_reg_write(d, address, value);
4661 			/* Offset ARAM Address for next memory bank
4662 			 * - increment of 4B
4663 			 */
4664 			aram_address += aqNum(qg_idx, conf) *
4665 					(1 << aqDepth(qg_idx, conf));
4666 		}
4667 	}
4668 
4669 	if (aram_address > ACC100_WORDS_IN_ARAM_SIZE) {
4670 		rte_bbdev_log(ERR, "ARAM Configuration not fitting %d %d\n",
4671 				aram_address, ACC100_WORDS_IN_ARAM_SIZE);
4672 		return -EINVAL;
4673 	}
4674 
4675 	/* ==== HI Configuration ==== */
4676 
4677 	/* Prevent Block on Transmit Error */
4678 	address = HWPfHiBlockTransmitOnErrorEn;
4679 	value = 0;
4680 	acc100_reg_write(d, address, value);
4681 	/* Prevents to drop MSI */
4682 	address = HWPfHiMsiDropEnableReg;
4683 	value = 0;
4684 	acc100_reg_write(d, address, value);
4685 	/* Set the PF Mode register */
4686 	address = HWPfHiPfMode;
4687 	value = (conf->pf_mode_en) ? ACC100_PF_VAL : 0;
4688 	acc100_reg_write(d, address, value);
4689 	/* Enable Error Detection in HW */
4690 	address = HWPfDmaErrorDetectionEn;
4691 	value = 0x3D7;
4692 	acc100_reg_write(d, address, value);
4693 
4694 	/* QoS overflow init */
4695 	value = 1;
4696 	address = HWPfQosmonAEvalOverflow0;
4697 	acc100_reg_write(d, address, value);
4698 	address = HWPfQosmonBEvalOverflow0;
4699 	acc100_reg_write(d, address, value);
4700 
4701 	/* HARQ DDR Configuration */
4702 	unsigned int ddrSizeInMb = 512; /* Fixed to 512 MB per VF for now */
4703 	for (vf_idx = 0; vf_idx < conf->num_vf_bundles; vf_idx++) {
4704 		address = HWPfDmaVfDdrBaseRw + vf_idx
4705 				* 0x10;
4706 		value = ((vf_idx * (ddrSizeInMb / 64)) << 16) +
4707 				(ddrSizeInMb - 1);
4708 		acc100_reg_write(d, address, value);
4709 	}
4710 	usleep(ACC100_LONG_WAIT);
4711 
4712 	/* Workaround in case some 5GUL engines are in an unexpected state */
4713 	if (numEngines < (ACC100_SIG_UL_5G_LAST + 1))
4714 		poweron_cleanup(bbdev, d, conf);
4715 
4716 	rte_bbdev_log_debug("PF Tip configuration complete for %s", dev_name);
4717 	return 0;
4718 }
4719