1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2020 Intel Corporation
3  */
4 
5 #include "dlb_hw_types.h"
6 #include "../../dlb_user.h"
7 #include "dlb_resource.h"
8 #include "dlb_osdep.h"
9 #include "dlb_osdep_bitmap.h"
10 #include "dlb_osdep_types.h"
11 #include "dlb_regs.h"
12 #include "../../dlb_priv.h"
13 #include "../../dlb_inline_fns.h"
14 
15 #define DLB_DOM_LIST_HEAD(head, type) \
16 	DLB_LIST_HEAD((head), type, domain_list)
17 
18 #define DLB_FUNC_LIST_HEAD(head, type) \
19 	DLB_LIST_HEAD((head), type, func_list)
20 
21 #define DLB_DOM_LIST_FOR(head, ptr, iter) \
22 	DLB_LIST_FOR_EACH(head, ptr, domain_list, iter)
23 
24 #define DLB_FUNC_LIST_FOR(head, ptr, iter) \
25 	DLB_LIST_FOR_EACH(head, ptr, func_list, iter)
26 
27 #define DLB_DOM_LIST_FOR_SAFE(head, ptr, ptr_tmp, it, it_tmp) \
28 	DLB_LIST_FOR_EACH_SAFE((head), ptr, ptr_tmp, domain_list, it, it_tmp)
29 
30 #define DLB_FUNC_LIST_FOR_SAFE(head, ptr, ptr_tmp, it, it_tmp) \
31 	DLB_LIST_FOR_EACH_SAFE((head), ptr, ptr_tmp, func_list, it, it_tmp)
32 
dlb_flush_csr(struct dlb_hw * hw)33 static inline void dlb_flush_csr(struct dlb_hw *hw)
34 {
35 	DLB_CSR_RD(hw, DLB_SYS_TOTAL_VAS);
36 }
37 
dlb_init_fn_rsrc_lists(struct dlb_function_resources * rsrc)38 static void dlb_init_fn_rsrc_lists(struct dlb_function_resources *rsrc)
39 {
40 	dlb_list_init_head(&rsrc->avail_domains);
41 	dlb_list_init_head(&rsrc->used_domains);
42 	dlb_list_init_head(&rsrc->avail_ldb_queues);
43 	dlb_list_init_head(&rsrc->avail_ldb_ports);
44 	dlb_list_init_head(&rsrc->avail_dir_pq_pairs);
45 	dlb_list_init_head(&rsrc->avail_ldb_credit_pools);
46 	dlb_list_init_head(&rsrc->avail_dir_credit_pools);
47 }
48 
dlb_init_domain_rsrc_lists(struct dlb_domain * domain)49 static void dlb_init_domain_rsrc_lists(struct dlb_domain *domain)
50 {
51 	dlb_list_init_head(&domain->used_ldb_queues);
52 	dlb_list_init_head(&domain->used_ldb_ports);
53 	dlb_list_init_head(&domain->used_dir_pq_pairs);
54 	dlb_list_init_head(&domain->used_ldb_credit_pools);
55 	dlb_list_init_head(&domain->used_dir_credit_pools);
56 	dlb_list_init_head(&domain->avail_ldb_queues);
57 	dlb_list_init_head(&domain->avail_ldb_ports);
58 	dlb_list_init_head(&domain->avail_dir_pq_pairs);
59 	dlb_list_init_head(&domain->avail_ldb_credit_pools);
60 	dlb_list_init_head(&domain->avail_dir_credit_pools);
61 }
62 
dlb_resource_init(struct dlb_hw * hw)63 int dlb_resource_init(struct dlb_hw *hw)
64 {
65 	struct dlb_list_entry *list;
66 	unsigned int i;
67 
68 	/* For optimal load-balancing, ports that map to one or more QIDs in
69 	 * common should not be in numerical sequence. This is application
70 	 * dependent, but the driver interleaves port IDs as much as possible
71 	 * to reduce the likelihood of this. This initial allocation maximizes
72 	 * the average distance between an ID and its immediate neighbors (i.e.
73 	 * the distance from 1 to 0 and to 2, the distance from 2 to 1 and to
74 	 * 3, etc.).
75 	 */
76 	u32 init_ldb_port_allocation[DLB_MAX_NUM_LDB_PORTS] = {
77 		0,  31, 62, 29, 60, 27, 58, 25, 56, 23, 54, 21, 52, 19, 50, 17,
78 		48, 15, 46, 13, 44, 11, 42,  9, 40,  7, 38,  5, 36,  3, 34, 1,
79 		32, 63, 30, 61, 28, 59, 26, 57, 24, 55, 22, 53, 20, 51, 18, 49,
80 		16, 47, 14, 45, 12, 43, 10, 41,  8, 39,  6, 37,  4, 35,  2, 33
81 	};
82 
83 	/* Zero-out resource tracking data structures */
84 	memset(&hw->rsrcs, 0, sizeof(hw->rsrcs));
85 	memset(&hw->pf, 0, sizeof(hw->pf));
86 
87 	dlb_init_fn_rsrc_lists(&hw->pf);
88 
89 	for (i = 0; i < DLB_MAX_NUM_DOMAINS; i++) {
90 		memset(&hw->domains[i], 0, sizeof(hw->domains[i]));
91 		dlb_init_domain_rsrc_lists(&hw->domains[i]);
92 		hw->domains[i].parent_func = &hw->pf;
93 	}
94 
95 	/* Give all resources to the PF driver */
96 	hw->pf.num_avail_domains = DLB_MAX_NUM_DOMAINS;
97 	for (i = 0; i < hw->pf.num_avail_domains; i++) {
98 		list = &hw->domains[i].func_list;
99 
100 		dlb_list_add(&hw->pf.avail_domains, list);
101 	}
102 
103 	hw->pf.num_avail_ldb_queues = DLB_MAX_NUM_LDB_QUEUES;
104 	for (i = 0; i < hw->pf.num_avail_ldb_queues; i++) {
105 		list = &hw->rsrcs.ldb_queues[i].func_list;
106 
107 		dlb_list_add(&hw->pf.avail_ldb_queues, list);
108 	}
109 
110 	hw->pf.num_avail_ldb_ports = DLB_MAX_NUM_LDB_PORTS;
111 	for (i = 0; i < hw->pf.num_avail_ldb_ports; i++) {
112 		struct dlb_ldb_port *port;
113 
114 		port = &hw->rsrcs.ldb_ports[init_ldb_port_allocation[i]];
115 
116 		dlb_list_add(&hw->pf.avail_ldb_ports, &port->func_list);
117 	}
118 
119 	hw->pf.num_avail_dir_pq_pairs = DLB_MAX_NUM_DIR_PORTS;
120 	for (i = 0; i < hw->pf.num_avail_dir_pq_pairs; i++) {
121 		list = &hw->rsrcs.dir_pq_pairs[i].func_list;
122 
123 		dlb_list_add(&hw->pf.avail_dir_pq_pairs, list);
124 	}
125 
126 	hw->pf.num_avail_ldb_credit_pools = DLB_MAX_NUM_LDB_CREDIT_POOLS;
127 	for (i = 0; i < hw->pf.num_avail_ldb_credit_pools; i++) {
128 		list = &hw->rsrcs.ldb_credit_pools[i].func_list;
129 
130 		dlb_list_add(&hw->pf.avail_ldb_credit_pools, list);
131 	}
132 
133 	hw->pf.num_avail_dir_credit_pools = DLB_MAX_NUM_DIR_CREDIT_POOLS;
134 	for (i = 0; i < hw->pf.num_avail_dir_credit_pools; i++) {
135 		list = &hw->rsrcs.dir_credit_pools[i].func_list;
136 
137 		dlb_list_add(&hw->pf.avail_dir_credit_pools, list);
138 	}
139 
140 	/* There are 5120 history list entries, which allows us to overprovision
141 	 * the inflight limit (4096) by 1k.
142 	 */
143 	if (dlb_bitmap_alloc(hw,
144 			     &hw->pf.avail_hist_list_entries,
145 			     DLB_MAX_NUM_HIST_LIST_ENTRIES))
146 		return -1;
147 
148 	if (dlb_bitmap_fill(hw->pf.avail_hist_list_entries))
149 		return -1;
150 
151 	if (dlb_bitmap_alloc(hw,
152 			     &hw->pf.avail_qed_freelist_entries,
153 			     DLB_MAX_NUM_LDB_CREDITS))
154 		return -1;
155 
156 	if (dlb_bitmap_fill(hw->pf.avail_qed_freelist_entries))
157 		return -1;
158 
159 	if (dlb_bitmap_alloc(hw,
160 			     &hw->pf.avail_dqed_freelist_entries,
161 			     DLB_MAX_NUM_DIR_CREDITS))
162 		return -1;
163 
164 	if (dlb_bitmap_fill(hw->pf.avail_dqed_freelist_entries))
165 		return -1;
166 
167 	if (dlb_bitmap_alloc(hw,
168 			     &hw->pf.avail_aqed_freelist_entries,
169 			     DLB_MAX_NUM_AQOS_ENTRIES))
170 		return -1;
171 
172 	if (dlb_bitmap_fill(hw->pf.avail_aqed_freelist_entries))
173 		return -1;
174 
175 	/* Initialize the hardware resource IDs */
176 	for (i = 0; i < DLB_MAX_NUM_DOMAINS; i++)
177 		hw->domains[i].id = i;
178 
179 	for (i = 0; i < DLB_MAX_NUM_LDB_QUEUES; i++)
180 		hw->rsrcs.ldb_queues[i].id = i;
181 
182 	for (i = 0; i < DLB_MAX_NUM_LDB_PORTS; i++)
183 		hw->rsrcs.ldb_ports[i].id = i;
184 
185 	for (i = 0; i < DLB_MAX_NUM_DIR_PORTS; i++)
186 		hw->rsrcs.dir_pq_pairs[i].id = i;
187 
188 	for (i = 0; i < DLB_MAX_NUM_LDB_CREDIT_POOLS; i++)
189 		hw->rsrcs.ldb_credit_pools[i].id = i;
190 
191 	for (i = 0; i < DLB_MAX_NUM_DIR_CREDIT_POOLS; i++)
192 		hw->rsrcs.dir_credit_pools[i].id = i;
193 
194 	for (i = 0; i < DLB_MAX_NUM_SEQUENCE_NUMBER_GROUPS; i++) {
195 		hw->rsrcs.sn_groups[i].id = i;
196 		/* Default mode (0) is 32 sequence numbers per queue */
197 		hw->rsrcs.sn_groups[i].mode = 0;
198 		hw->rsrcs.sn_groups[i].sequence_numbers_per_queue = 32;
199 		hw->rsrcs.sn_groups[i].slot_use_bitmap = 0;
200 	}
201 
202 	return 0;
203 }
204 
dlb_resource_free(struct dlb_hw * hw)205 void dlb_resource_free(struct dlb_hw *hw)
206 {
207 	dlb_bitmap_free(hw->pf.avail_hist_list_entries);
208 
209 	dlb_bitmap_free(hw->pf.avail_qed_freelist_entries);
210 
211 	dlb_bitmap_free(hw->pf.avail_dqed_freelist_entries);
212 
213 	dlb_bitmap_free(hw->pf.avail_aqed_freelist_entries);
214 }
215 
dlb_get_domain_from_id(struct dlb_hw * hw,u32 id)216 static struct dlb_domain *dlb_get_domain_from_id(struct dlb_hw *hw, u32 id)
217 {
218 	if (id >= DLB_MAX_NUM_DOMAINS)
219 		return NULL;
220 
221 	return &hw->domains[id];
222 }
223 
dlb_attach_ldb_queues(struct dlb_hw * hw,struct dlb_function_resources * rsrcs,struct dlb_domain * domain,u32 num_queues,struct dlb_cmd_response * resp)224 static int dlb_attach_ldb_queues(struct dlb_hw *hw,
225 				 struct dlb_function_resources *rsrcs,
226 				 struct dlb_domain *domain,
227 				 u32 num_queues,
228 				 struct dlb_cmd_response *resp)
229 {
230 	unsigned int i, j;
231 
232 	if (rsrcs->num_avail_ldb_queues < num_queues) {
233 		resp->status = DLB_ST_LDB_QUEUES_UNAVAILABLE;
234 		return -1;
235 	}
236 
237 	for (i = 0; i < num_queues; i++) {
238 		struct dlb_ldb_queue *queue;
239 
240 		queue = DLB_FUNC_LIST_HEAD(rsrcs->avail_ldb_queues,
241 					   typeof(*queue));
242 		if (queue == NULL) {
243 			DLB_HW_ERR(hw,
244 				   "[%s()] Internal error: domain validation failed\n",
245 				   __func__);
246 			goto cleanup;
247 		}
248 
249 		dlb_list_del(&rsrcs->avail_ldb_queues, &queue->func_list);
250 
251 		queue->domain_id = domain->id;
252 		queue->owned = true;
253 
254 		dlb_list_add(&domain->avail_ldb_queues, &queue->domain_list);
255 	}
256 
257 	rsrcs->num_avail_ldb_queues -= num_queues;
258 
259 	return 0;
260 
261 cleanup:
262 
263 	/* Return the assigned queues */
264 	for (j = 0; j < i; j++) {
265 		struct dlb_ldb_queue *queue;
266 
267 		queue = DLB_FUNC_LIST_HEAD(domain->avail_ldb_queues,
268 					   typeof(*queue));
269 		/* Unrecoverable internal error */
270 		if (queue == NULL)
271 			break;
272 
273 		queue->owned = false;
274 
275 		dlb_list_del(&domain->avail_ldb_queues, &queue->domain_list);
276 
277 		dlb_list_add(&rsrcs->avail_ldb_queues, &queue->func_list);
278 	}
279 
280 	return -EFAULT;
281 }
282 
283 static struct dlb_ldb_port *
dlb_get_next_ldb_port(struct dlb_hw * hw,struct dlb_function_resources * rsrcs,u32 domain_id)284 dlb_get_next_ldb_port(struct dlb_hw *hw,
285 		      struct dlb_function_resources *rsrcs,
286 		      u32 domain_id)
287 {
288 	struct dlb_list_entry *iter;
289 	RTE_SET_USED(iter);
290 	struct dlb_ldb_port *port;
291 
292 	/* To reduce the odds of consecutive load-balanced ports mapping to the
293 	 * same queue(s), the driver attempts to allocate ports whose neighbors
294 	 * are owned by a different domain.
295 	 */
296 	DLB_FUNC_LIST_FOR(rsrcs->avail_ldb_ports, port, iter) {
297 		u32 next, prev;
298 		u32 phys_id;
299 
300 		phys_id = port->id;
301 		next = phys_id + 1;
302 		prev = phys_id - 1;
303 
304 		if (phys_id == DLB_MAX_NUM_LDB_PORTS - 1)
305 			next = 0;
306 		if (phys_id == 0)
307 			prev = DLB_MAX_NUM_LDB_PORTS - 1;
308 
309 		if (!hw->rsrcs.ldb_ports[next].owned ||
310 		    hw->rsrcs.ldb_ports[next].domain_id == domain_id)
311 			continue;
312 
313 		if (!hw->rsrcs.ldb_ports[prev].owned ||
314 		    hw->rsrcs.ldb_ports[prev].domain_id == domain_id)
315 			continue;
316 
317 		return port;
318 	}
319 
320 	/* Failing that, the driver looks for a port with one neighbor owned by
321 	 * a different domain and the other unallocated.
322 	 */
323 	DLB_FUNC_LIST_FOR(rsrcs->avail_ldb_ports, port, iter) {
324 		u32 next, prev;
325 		u32 phys_id;
326 
327 		phys_id = port->id;
328 		next = phys_id + 1;
329 		prev = phys_id - 1;
330 
331 		if (phys_id == DLB_MAX_NUM_LDB_PORTS - 1)
332 			next = 0;
333 		if (phys_id == 0)
334 			prev = DLB_MAX_NUM_LDB_PORTS - 1;
335 
336 		if (!hw->rsrcs.ldb_ports[prev].owned &&
337 		    hw->rsrcs.ldb_ports[next].owned &&
338 		    hw->rsrcs.ldb_ports[next].domain_id != domain_id)
339 			return port;
340 
341 		if (!hw->rsrcs.ldb_ports[next].owned &&
342 		    hw->rsrcs.ldb_ports[prev].owned &&
343 		    hw->rsrcs.ldb_ports[prev].domain_id != domain_id)
344 			return port;
345 	}
346 
347 	/* Failing that, the driver looks for a port with both neighbors
348 	 * unallocated.
349 	 */
350 	DLB_FUNC_LIST_FOR(rsrcs->avail_ldb_ports, port, iter) {
351 		u32 next, prev;
352 		u32 phys_id;
353 
354 		phys_id = port->id;
355 		next = phys_id + 1;
356 		prev = phys_id - 1;
357 
358 		if (phys_id == DLB_MAX_NUM_LDB_PORTS - 1)
359 			next = 0;
360 		if (phys_id == 0)
361 			prev = DLB_MAX_NUM_LDB_PORTS - 1;
362 
363 		if (!hw->rsrcs.ldb_ports[prev].owned &&
364 		    !hw->rsrcs.ldb_ports[next].owned)
365 			return port;
366 	}
367 
368 	/* If all else fails, the driver returns the next available port. */
369 	return DLB_FUNC_LIST_HEAD(rsrcs->avail_ldb_ports, typeof(*port));
370 }
371 
dlb_attach_ldb_ports(struct dlb_hw * hw,struct dlb_function_resources * rsrcs,struct dlb_domain * domain,u32 num_ports,struct dlb_cmd_response * resp)372 static int dlb_attach_ldb_ports(struct dlb_hw *hw,
373 				struct dlb_function_resources *rsrcs,
374 				struct dlb_domain *domain,
375 				u32 num_ports,
376 				struct dlb_cmd_response *resp)
377 {
378 	unsigned int i, j;
379 
380 	if (rsrcs->num_avail_ldb_ports < num_ports) {
381 		resp->status = DLB_ST_LDB_PORTS_UNAVAILABLE;
382 		return -1;
383 	}
384 
385 	for (i = 0; i < num_ports; i++) {
386 		struct dlb_ldb_port *port;
387 
388 		port = dlb_get_next_ldb_port(hw, rsrcs, domain->id);
389 
390 		if (port == NULL) {
391 			DLB_HW_ERR(hw,
392 				   "[%s()] Internal error: domain validation failed\n",
393 				   __func__);
394 			goto cleanup;
395 		}
396 
397 		dlb_list_del(&rsrcs->avail_ldb_ports, &port->func_list);
398 
399 		port->domain_id = domain->id;
400 		port->owned = true;
401 
402 		dlb_list_add(&domain->avail_ldb_ports, &port->domain_list);
403 	}
404 
405 	rsrcs->num_avail_ldb_ports -= num_ports;
406 
407 	return 0;
408 
409 cleanup:
410 
411 	/* Return the assigned ports */
412 	for (j = 0; j < i; j++) {
413 		struct dlb_ldb_port *port;
414 
415 		port = DLB_FUNC_LIST_HEAD(domain->avail_ldb_ports,
416 					  typeof(*port));
417 		/* Unrecoverable internal error */
418 		if (port == NULL)
419 			break;
420 
421 		port->owned = false;
422 
423 		dlb_list_del(&domain->avail_ldb_ports, &port->domain_list);
424 
425 		dlb_list_add(&rsrcs->avail_ldb_ports, &port->func_list);
426 	}
427 
428 	return -EFAULT;
429 }
430 
dlb_attach_dir_ports(struct dlb_hw * hw,struct dlb_function_resources * rsrcs,struct dlb_domain * domain,u32 num_ports,struct dlb_cmd_response * resp)431 static int dlb_attach_dir_ports(struct dlb_hw *hw,
432 				struct dlb_function_resources *rsrcs,
433 				struct dlb_domain *domain,
434 				u32 num_ports,
435 				struct dlb_cmd_response *resp)
436 {
437 	unsigned int i, j;
438 
439 	if (rsrcs->num_avail_dir_pq_pairs < num_ports) {
440 		resp->status = DLB_ST_DIR_PORTS_UNAVAILABLE;
441 		return -1;
442 	}
443 
444 	for (i = 0; i < num_ports; i++) {
445 		struct dlb_dir_pq_pair *port;
446 
447 		port = DLB_FUNC_LIST_HEAD(rsrcs->avail_dir_pq_pairs,
448 					  typeof(*port));
449 		if (port == NULL) {
450 			DLB_HW_ERR(hw,
451 				   "[%s()] Internal error: domain validation failed\n",
452 				   __func__);
453 			goto cleanup;
454 		}
455 
456 		dlb_list_del(&rsrcs->avail_dir_pq_pairs, &port->func_list);
457 
458 		port->domain_id = domain->id;
459 		port->owned = true;
460 
461 		dlb_list_add(&domain->avail_dir_pq_pairs, &port->domain_list);
462 	}
463 
464 	rsrcs->num_avail_dir_pq_pairs -= num_ports;
465 
466 	return 0;
467 
468 cleanup:
469 
470 	/* Return the assigned ports */
471 	for (j = 0; j < i; j++) {
472 		struct dlb_dir_pq_pair *port;
473 
474 		port = DLB_FUNC_LIST_HEAD(domain->avail_dir_pq_pairs,
475 					  typeof(*port));
476 		/* Unrecoverable internal error */
477 		if (port == NULL)
478 			break;
479 
480 		port->owned = false;
481 
482 		dlb_list_del(&domain->avail_dir_pq_pairs, &port->domain_list);
483 
484 		dlb_list_add(&rsrcs->avail_dir_pq_pairs, &port->func_list);
485 	}
486 
487 	return -EFAULT;
488 }
489 
dlb_attach_ldb_credits(struct dlb_function_resources * rsrcs,struct dlb_domain * domain,u32 num_credits,struct dlb_cmd_response * resp)490 static int dlb_attach_ldb_credits(struct dlb_function_resources *rsrcs,
491 				  struct dlb_domain *domain,
492 				  u32 num_credits,
493 				  struct dlb_cmd_response *resp)
494 {
495 	struct dlb_bitmap *bitmap = rsrcs->avail_qed_freelist_entries;
496 
497 	if (dlb_bitmap_count(bitmap) < (int)num_credits) {
498 		resp->status = DLB_ST_LDB_CREDITS_UNAVAILABLE;
499 		return -1;
500 	}
501 
502 	if (num_credits) {
503 		int base;
504 
505 		base = dlb_bitmap_find_set_bit_range(bitmap, num_credits);
506 		if (base < 0)
507 			goto error;
508 
509 		domain->qed_freelist.base = base;
510 		domain->qed_freelist.bound = base + num_credits;
511 		domain->qed_freelist.offset = 0;
512 
513 		dlb_bitmap_clear_range(bitmap, base, num_credits);
514 	}
515 
516 	return 0;
517 
518 error:
519 	resp->status = DLB_ST_QED_FREELIST_ENTRIES_UNAVAILABLE;
520 	return -1;
521 }
522 
dlb_attach_dir_credits(struct dlb_function_resources * rsrcs,struct dlb_domain * domain,u32 num_credits,struct dlb_cmd_response * resp)523 static int dlb_attach_dir_credits(struct dlb_function_resources *rsrcs,
524 				  struct dlb_domain *domain,
525 				  u32 num_credits,
526 				  struct dlb_cmd_response *resp)
527 {
528 	struct dlb_bitmap *bitmap = rsrcs->avail_dqed_freelist_entries;
529 
530 	if (dlb_bitmap_count(bitmap) < (int)num_credits) {
531 		resp->status = DLB_ST_DIR_CREDITS_UNAVAILABLE;
532 		return -1;
533 	}
534 
535 	if (num_credits) {
536 		int base;
537 
538 		base = dlb_bitmap_find_set_bit_range(bitmap, num_credits);
539 		if (base < 0)
540 			goto error;
541 
542 		domain->dqed_freelist.base = base;
543 		domain->dqed_freelist.bound = base + num_credits;
544 		domain->dqed_freelist.offset = 0;
545 
546 		dlb_bitmap_clear_range(bitmap, base, num_credits);
547 	}
548 
549 	return 0;
550 
551 error:
552 	resp->status = DLB_ST_DQED_FREELIST_ENTRIES_UNAVAILABLE;
553 	return -1;
554 }
555 
dlb_attach_ldb_credit_pools(struct dlb_hw * hw,struct dlb_function_resources * rsrcs,struct dlb_domain * domain,u32 num_credit_pools,struct dlb_cmd_response * resp)556 static int dlb_attach_ldb_credit_pools(struct dlb_hw *hw,
557 				       struct dlb_function_resources *rsrcs,
558 				       struct dlb_domain *domain,
559 				       u32 num_credit_pools,
560 				       struct dlb_cmd_response *resp)
561 {
562 	unsigned int i, j;
563 
564 	if (rsrcs->num_avail_ldb_credit_pools < num_credit_pools) {
565 		resp->status = DLB_ST_LDB_CREDIT_POOLS_UNAVAILABLE;
566 		return -1;
567 	}
568 
569 	for (i = 0; i < num_credit_pools; i++) {
570 		struct dlb_credit_pool *pool;
571 
572 		pool = DLB_FUNC_LIST_HEAD(rsrcs->avail_ldb_credit_pools,
573 					  typeof(*pool));
574 		if (pool == NULL) {
575 			DLB_HW_ERR(hw,
576 				   "[%s()] Internal error: domain validation failed\n",
577 				   __func__);
578 			goto cleanup;
579 		}
580 
581 		dlb_list_del(&rsrcs->avail_ldb_credit_pools,
582 			     &pool->func_list);
583 
584 		pool->domain_id = domain->id;
585 		pool->owned = true;
586 
587 		dlb_list_add(&domain->avail_ldb_credit_pools,
588 			     &pool->domain_list);
589 	}
590 
591 	rsrcs->num_avail_ldb_credit_pools -= num_credit_pools;
592 
593 	return 0;
594 
595 cleanup:
596 
597 	/* Return the assigned credit pools */
598 	for (j = 0; j < i; j++) {
599 		struct dlb_credit_pool *pool;
600 
601 		pool = DLB_FUNC_LIST_HEAD(domain->avail_ldb_credit_pools,
602 					  typeof(*pool));
603 		/* Unrecoverable internal error */
604 		if (pool == NULL)
605 			break;
606 
607 		pool->owned = false;
608 
609 		dlb_list_del(&domain->avail_ldb_credit_pools,
610 			     &pool->domain_list);
611 
612 		dlb_list_add(&rsrcs->avail_ldb_credit_pools,
613 			     &pool->func_list);
614 	}
615 
616 	return -EFAULT;
617 }
618 
dlb_attach_dir_credit_pools(struct dlb_hw * hw,struct dlb_function_resources * rsrcs,struct dlb_domain * domain,u32 num_credit_pools,struct dlb_cmd_response * resp)619 static int dlb_attach_dir_credit_pools(struct dlb_hw *hw,
620 				       struct dlb_function_resources *rsrcs,
621 				       struct dlb_domain *domain,
622 				       u32 num_credit_pools,
623 				       struct dlb_cmd_response *resp)
624 {
625 	unsigned int i, j;
626 
627 	if (rsrcs->num_avail_dir_credit_pools < num_credit_pools) {
628 		resp->status = DLB_ST_DIR_CREDIT_POOLS_UNAVAILABLE;
629 		return -1;
630 	}
631 
632 	for (i = 0; i < num_credit_pools; i++) {
633 		struct dlb_credit_pool *pool;
634 
635 		pool = DLB_FUNC_LIST_HEAD(rsrcs->avail_dir_credit_pools,
636 					  typeof(*pool));
637 		if (pool == NULL) {
638 			DLB_HW_ERR(hw,
639 				   "[%s()] Internal error: domain validation failed\n",
640 				   __func__);
641 			goto cleanup;
642 		}
643 
644 		dlb_list_del(&rsrcs->avail_dir_credit_pools,
645 			     &pool->func_list);
646 
647 		pool->domain_id = domain->id;
648 		pool->owned = true;
649 
650 		dlb_list_add(&domain->avail_dir_credit_pools,
651 			     &pool->domain_list);
652 	}
653 
654 	rsrcs->num_avail_dir_credit_pools -= num_credit_pools;
655 
656 	return 0;
657 
658 cleanup:
659 
660 	/* Return the assigned credit pools */
661 	for (j = 0; j < i; j++) {
662 		struct dlb_credit_pool *pool;
663 
664 		pool = DLB_FUNC_LIST_HEAD(domain->avail_dir_credit_pools,
665 					  typeof(*pool));
666 		/* Unrecoverable internal error */
667 		if (pool == NULL)
668 			break;
669 
670 		pool->owned = false;
671 
672 		dlb_list_del(&domain->avail_dir_credit_pools,
673 			     &pool->domain_list);
674 
675 		dlb_list_add(&rsrcs->avail_dir_credit_pools,
676 			     &pool->func_list);
677 	}
678 
679 	return -EFAULT;
680 }
681 
682 static int
dlb_attach_domain_hist_list_entries(struct dlb_function_resources * rsrcs,struct dlb_domain * domain,u32 num_hist_list_entries,struct dlb_cmd_response * resp)683 dlb_attach_domain_hist_list_entries(struct dlb_function_resources *rsrcs,
684 				    struct dlb_domain *domain,
685 				    u32 num_hist_list_entries,
686 				    struct dlb_cmd_response *resp)
687 {
688 	struct dlb_bitmap *bitmap;
689 	int base;
690 
691 	if (num_hist_list_entries) {
692 		bitmap = rsrcs->avail_hist_list_entries;
693 
694 		base = dlb_bitmap_find_set_bit_range(bitmap,
695 						     num_hist_list_entries);
696 		if (base < 0)
697 			goto error;
698 
699 		domain->total_hist_list_entries = num_hist_list_entries;
700 		domain->avail_hist_list_entries = num_hist_list_entries;
701 		domain->hist_list_entry_base = base;
702 		domain->hist_list_entry_offset = 0;
703 
704 		dlb_bitmap_clear_range(bitmap, base, num_hist_list_entries);
705 	}
706 	return 0;
707 
708 error:
709 	resp->status = DLB_ST_HIST_LIST_ENTRIES_UNAVAILABLE;
710 	return -1;
711 }
712 
dlb_attach_atomic_inflights(struct dlb_function_resources * rsrcs,struct dlb_domain * domain,u32 num_atomic_inflights,struct dlb_cmd_response * resp)713 static int dlb_attach_atomic_inflights(struct dlb_function_resources *rsrcs,
714 				       struct dlb_domain *domain,
715 				       u32 num_atomic_inflights,
716 				       struct dlb_cmd_response *resp)
717 {
718 	if (num_atomic_inflights) {
719 		struct dlb_bitmap *bitmap =
720 			rsrcs->avail_aqed_freelist_entries;
721 		int base;
722 
723 		base = dlb_bitmap_find_set_bit_range(bitmap,
724 						     num_atomic_inflights);
725 		if (base < 0)
726 			goto error;
727 
728 		domain->aqed_freelist.base = base;
729 		domain->aqed_freelist.bound = base + num_atomic_inflights;
730 		domain->aqed_freelist.offset = 0;
731 
732 		dlb_bitmap_clear_range(bitmap, base, num_atomic_inflights);
733 	}
734 
735 	return 0;
736 
737 error:
738 	resp->status = DLB_ST_ATOMIC_INFLIGHTS_UNAVAILABLE;
739 	return -1;
740 }
741 
742 
743 static int
dlb_domain_attach_resources(struct dlb_hw * hw,struct dlb_function_resources * rsrcs,struct dlb_domain * domain,struct dlb_create_sched_domain_args * args,struct dlb_cmd_response * resp)744 dlb_domain_attach_resources(struct dlb_hw *hw,
745 			    struct dlb_function_resources *rsrcs,
746 			    struct dlb_domain *domain,
747 			    struct dlb_create_sched_domain_args *args,
748 			    struct dlb_cmd_response *resp)
749 {
750 	int ret;
751 
752 	ret = dlb_attach_ldb_queues(hw,
753 				    rsrcs,
754 				    domain,
755 				    args->num_ldb_queues,
756 				    resp);
757 	if (ret < 0)
758 		return ret;
759 
760 	ret = dlb_attach_ldb_ports(hw,
761 				   rsrcs,
762 				   domain,
763 				   args->num_ldb_ports,
764 				   resp);
765 	if (ret < 0)
766 		return ret;
767 
768 	ret = dlb_attach_dir_ports(hw,
769 				   rsrcs,
770 				   domain,
771 				   args->num_dir_ports,
772 				   resp);
773 	if (ret < 0)
774 		return ret;
775 
776 	ret = dlb_attach_ldb_credits(rsrcs,
777 				     domain,
778 				     args->num_ldb_credits,
779 				     resp);
780 	if (ret < 0)
781 		return ret;
782 
783 	ret = dlb_attach_dir_credits(rsrcs,
784 				     domain,
785 				     args->num_dir_credits,
786 				     resp);
787 	if (ret < 0)
788 		return ret;
789 
790 	ret = dlb_attach_ldb_credit_pools(hw,
791 					  rsrcs,
792 					  domain,
793 					  args->num_ldb_credit_pools,
794 					  resp);
795 	if (ret < 0)
796 		return ret;
797 
798 	ret = dlb_attach_dir_credit_pools(hw,
799 					  rsrcs,
800 					  domain,
801 					  args->num_dir_credit_pools,
802 					  resp);
803 	if (ret < 0)
804 		return ret;
805 
806 	ret = dlb_attach_domain_hist_list_entries(rsrcs,
807 						  domain,
808 						  args->num_hist_list_entries,
809 						  resp);
810 	if (ret < 0)
811 		return ret;
812 
813 	ret = dlb_attach_atomic_inflights(rsrcs,
814 					  domain,
815 					  args->num_atomic_inflights,
816 					  resp);
817 	if (ret < 0)
818 		return ret;
819 
820 	domain->configured = true;
821 
822 	domain->started = false;
823 
824 	rsrcs->num_avail_domains--;
825 
826 	return 0;
827 }
828 
dlb_ldb_port_cq_enable(struct dlb_hw * hw,struct dlb_ldb_port * port)829 static void dlb_ldb_port_cq_enable(struct dlb_hw *hw,
830 				   struct dlb_ldb_port *port)
831 {
832 	union dlb_lsp_cq_ldb_dsbl reg;
833 
834 	/* Don't re-enable the port if a removal is pending. The caller should
835 	 * mark this port as enabled (if it isn't already), and when the
836 	 * removal completes the port will be enabled.
837 	 */
838 	if (port->num_pending_removals)
839 		return;
840 
841 	reg.field.disabled = 0;
842 
843 	DLB_CSR_WR(hw, DLB_LSP_CQ_LDB_DSBL(port->id), reg.val);
844 
845 	dlb_flush_csr(hw);
846 }
847 
dlb_dir_port_cq_enable(struct dlb_hw * hw,struct dlb_dir_pq_pair * port)848 static void dlb_dir_port_cq_enable(struct dlb_hw *hw,
849 				   struct dlb_dir_pq_pair *port)
850 {
851 	union dlb_lsp_cq_dir_dsbl reg;
852 
853 	reg.field.disabled = 0;
854 
855 	DLB_CSR_WR(hw, DLB_LSP_CQ_DIR_DSBL(port->id), reg.val);
856 
857 	dlb_flush_csr(hw);
858 }
859 
860 
dlb_ldb_port_cq_disable(struct dlb_hw * hw,struct dlb_ldb_port * port)861 static void dlb_ldb_port_cq_disable(struct dlb_hw *hw,
862 				    struct dlb_ldb_port *port)
863 {
864 	union dlb_lsp_cq_ldb_dsbl reg;
865 
866 	reg.field.disabled = 1;
867 
868 	DLB_CSR_WR(hw, DLB_LSP_CQ_LDB_DSBL(port->id), reg.val);
869 
870 	dlb_flush_csr(hw);
871 }
872 
dlb_dir_port_cq_disable(struct dlb_hw * hw,struct dlb_dir_pq_pair * port)873 static void dlb_dir_port_cq_disable(struct dlb_hw *hw,
874 				    struct dlb_dir_pq_pair *port)
875 {
876 	union dlb_lsp_cq_dir_dsbl reg;
877 
878 	reg.field.disabled = 1;
879 
880 	DLB_CSR_WR(hw, DLB_LSP_CQ_DIR_DSBL(port->id), reg.val);
881 
882 	dlb_flush_csr(hw);
883 }
884 
885 
886 
dlb_disable_dp_vasr_feature(struct dlb_hw * hw)887 void dlb_disable_dp_vasr_feature(struct dlb_hw *hw)
888 {
889 	union dlb_dp_dir_csr_ctrl r0;
890 
891 	r0.val = DLB_CSR_RD(hw, DLB_DP_DIR_CSR_CTRL);
892 
893 	r0.field.cfg_vasr_dis = 1;
894 
895 	DLB_CSR_WR(hw, DLB_DP_DIR_CSR_CTRL, r0.val);
896 }
897 
dlb_enable_excess_tokens_alarm(struct dlb_hw * hw)898 void dlb_enable_excess_tokens_alarm(struct dlb_hw *hw)
899 {
900 	union dlb_chp_cfg_chp_csr_ctrl r0;
901 
902 	r0.val = DLB_CSR_RD(hw, DLB_CHP_CFG_CHP_CSR_CTRL);
903 
904 	r0.val |= 1 << DLB_CHP_CFG_EXCESS_TOKENS_SHIFT;
905 
906 	DLB_CSR_WR(hw, DLB_CHP_CFG_CHP_CSR_CTRL, r0.val);
907 }
908 
dlb_hw_enable_sparse_ldb_cq_mode(struct dlb_hw * hw)909 void dlb_hw_enable_sparse_ldb_cq_mode(struct dlb_hw *hw)
910 {
911 	union dlb_sys_cq_mode r0;
912 
913 	r0.val = DLB_CSR_RD(hw, DLB_SYS_CQ_MODE);
914 
915 	r0.field.ldb_cq64 = 1;
916 
917 	DLB_CSR_WR(hw, DLB_SYS_CQ_MODE, r0.val);
918 }
919 
dlb_hw_enable_sparse_dir_cq_mode(struct dlb_hw * hw)920 void dlb_hw_enable_sparse_dir_cq_mode(struct dlb_hw *hw)
921 {
922 	union dlb_sys_cq_mode r0;
923 
924 	r0.val = DLB_CSR_RD(hw, DLB_SYS_CQ_MODE);
925 
926 	r0.field.dir_cq64 = 1;
927 
928 	DLB_CSR_WR(hw, DLB_SYS_CQ_MODE, r0.val);
929 }
930 
dlb_hw_disable_pf_to_vf_isr_pend_err(struct dlb_hw * hw)931 void dlb_hw_disable_pf_to_vf_isr_pend_err(struct dlb_hw *hw)
932 {
933 	union dlb_sys_sys_alarm_int_enable r0;
934 
935 	r0.val = DLB_CSR_RD(hw, DLB_SYS_SYS_ALARM_INT_ENABLE);
936 
937 	r0.field.pf_to_vf_isr_pend_error = 0;
938 
939 	DLB_CSR_WR(hw, DLB_SYS_SYS_ALARM_INT_ENABLE, r0.val);
940 }
941 
942 static unsigned int
dlb_get_num_ports_in_use(struct dlb_hw * hw)943 dlb_get_num_ports_in_use(struct dlb_hw *hw)
944 {
945 	unsigned int i, n = 0;
946 
947 	for (i = 0; i < DLB_MAX_NUM_LDB_PORTS; i++)
948 		if (hw->rsrcs.ldb_ports[i].owned)
949 			n++;
950 
951 	for (i = 0; i < DLB_MAX_NUM_DIR_PORTS; i++)
952 		if (hw->rsrcs.dir_pq_pairs[i].owned)
953 			n++;
954 
955 	return n;
956 }
957 
dlb_port_find_slot(struct dlb_ldb_port * port,enum dlb_qid_map_state state,int * slot)958 static bool dlb_port_find_slot(struct dlb_ldb_port *port,
959 			       enum dlb_qid_map_state state,
960 			       int *slot)
961 {
962 	int i;
963 
964 	for (i = 0; i < DLB_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
965 		if (port->qid_map[i].state == state)
966 			break;
967 	}
968 
969 	*slot = i;
970 
971 	return (i < DLB_MAX_NUM_QIDS_PER_LDB_CQ);
972 }
973 
dlb_port_find_slot_queue(struct dlb_ldb_port * port,enum dlb_qid_map_state state,struct dlb_ldb_queue * queue,int * slot)974 static bool dlb_port_find_slot_queue(struct dlb_ldb_port *port,
975 				     enum dlb_qid_map_state state,
976 				     struct dlb_ldb_queue *queue,
977 				     int *slot)
978 {
979 	int i;
980 
981 	for (i = 0; i < DLB_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
982 		if (port->qid_map[i].state == state &&
983 		    port->qid_map[i].qid == queue->id)
984 			break;
985 	}
986 
987 	*slot = i;
988 
989 	return (i < DLB_MAX_NUM_QIDS_PER_LDB_CQ);
990 }
991 
dlb_port_slot_state_transition(struct dlb_hw * hw,struct dlb_ldb_port * port,struct dlb_ldb_queue * queue,int slot,enum dlb_qid_map_state new_state)992 static int dlb_port_slot_state_transition(struct dlb_hw *hw,
993 					  struct dlb_ldb_port *port,
994 					  struct dlb_ldb_queue *queue,
995 					  int slot,
996 					  enum dlb_qid_map_state new_state)
997 {
998 	enum dlb_qid_map_state curr_state = port->qid_map[slot].state;
999 	struct dlb_domain *domain;
1000 
1001 	domain = dlb_get_domain_from_id(hw, port->domain_id);
1002 	if (domain == NULL) {
1003 		DLB_HW_ERR(hw,
1004 			   "[%s()] Internal error: unable to find domain %d\n",
1005 			   __func__, port->domain_id);
1006 		return -EFAULT;
1007 	}
1008 
1009 	switch (curr_state) {
1010 	case DLB_QUEUE_UNMAPPED:
1011 		switch (new_state) {
1012 		case DLB_QUEUE_MAPPED:
1013 			queue->num_mappings++;
1014 			port->num_mappings++;
1015 			break;
1016 		case DLB_QUEUE_MAP_IN_PROGRESS:
1017 			queue->num_pending_additions++;
1018 			domain->num_pending_additions++;
1019 			break;
1020 		default:
1021 			goto error;
1022 		}
1023 		break;
1024 	case DLB_QUEUE_MAPPED:
1025 		switch (new_state) {
1026 		case DLB_QUEUE_UNMAPPED:
1027 			queue->num_mappings--;
1028 			port->num_mappings--;
1029 			break;
1030 		case DLB_QUEUE_UNMAP_IN_PROGRESS:
1031 			port->num_pending_removals++;
1032 			domain->num_pending_removals++;
1033 			break;
1034 		case DLB_QUEUE_MAPPED:
1035 			/* Priority change, nothing to update */
1036 			break;
1037 		default:
1038 			goto error;
1039 		}
1040 		break;
1041 	case DLB_QUEUE_MAP_IN_PROGRESS:
1042 		switch (new_state) {
1043 		case DLB_QUEUE_UNMAPPED:
1044 			queue->num_pending_additions--;
1045 			domain->num_pending_additions--;
1046 			break;
1047 		case DLB_QUEUE_MAPPED:
1048 			queue->num_mappings++;
1049 			port->num_mappings++;
1050 			queue->num_pending_additions--;
1051 			domain->num_pending_additions--;
1052 			break;
1053 		default:
1054 			goto error;
1055 		}
1056 		break;
1057 	case DLB_QUEUE_UNMAP_IN_PROGRESS:
1058 		switch (new_state) {
1059 		case DLB_QUEUE_UNMAPPED:
1060 			port->num_pending_removals--;
1061 			domain->num_pending_removals--;
1062 			queue->num_mappings--;
1063 			port->num_mappings--;
1064 			break;
1065 		case DLB_QUEUE_MAPPED:
1066 			port->num_pending_removals--;
1067 			domain->num_pending_removals--;
1068 			break;
1069 		case DLB_QUEUE_UNMAP_IN_PROGRESS_PENDING_MAP:
1070 			/* Nothing to update */
1071 			break;
1072 		default:
1073 			goto error;
1074 		}
1075 		break;
1076 	case DLB_QUEUE_UNMAP_IN_PROGRESS_PENDING_MAP:
1077 		switch (new_state) {
1078 		case DLB_QUEUE_UNMAP_IN_PROGRESS:
1079 			/* Nothing to update */
1080 			break;
1081 		case DLB_QUEUE_UNMAPPED:
1082 			/* An UNMAP_IN_PROGRESS_PENDING_MAP slot briefly
1083 			 * becomes UNMAPPED before it transitions to
1084 			 * MAP_IN_PROGRESS.
1085 			 */
1086 			queue->num_mappings--;
1087 			port->num_mappings--;
1088 			port->num_pending_removals--;
1089 			domain->num_pending_removals--;
1090 			break;
1091 		default:
1092 			goto error;
1093 		}
1094 		break;
1095 	default:
1096 		goto error;
1097 	}
1098 
1099 	port->qid_map[slot].state = new_state;
1100 
1101 	DLB_HW_INFO(hw,
1102 		    "[%s()] queue %d -> port %d state transition (%d -> %d)\n",
1103 		    __func__, queue->id, port->id, curr_state,
1104 		    new_state);
1105 	return 0;
1106 
1107 error:
1108 	DLB_HW_ERR(hw,
1109 		   "[%s()] Internal error: invalid queue %d -> port %d state transition (%d -> %d)\n",
1110 		   __func__, queue->id, port->id, curr_state,
1111 		   new_state);
1112 	return -EFAULT;
1113 }
1114 
1115 /* dlb_ldb_queue_{enable, disable}_mapped_cqs() don't operate exactly as their
1116  * function names imply, and should only be called by the dynamic CQ mapping
1117  * code.
1118  */
dlb_ldb_queue_disable_mapped_cqs(struct dlb_hw * hw,struct dlb_domain * domain,struct dlb_ldb_queue * queue)1119 static void dlb_ldb_queue_disable_mapped_cqs(struct dlb_hw *hw,
1120 					     struct dlb_domain *domain,
1121 					     struct dlb_ldb_queue *queue)
1122 {
1123 	struct dlb_list_entry *iter;
1124 	RTE_SET_USED(iter);
1125 	struct dlb_ldb_port *port;
1126 	int slot;
1127 
1128 	DLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter) {
1129 		enum dlb_qid_map_state state = DLB_QUEUE_MAPPED;
1130 
1131 		if (!dlb_port_find_slot_queue(port, state, queue, &slot))
1132 			continue;
1133 
1134 		if (port->enabled)
1135 			dlb_ldb_port_cq_disable(hw, port);
1136 	}
1137 }
1138 
dlb_ldb_queue_enable_mapped_cqs(struct dlb_hw * hw,struct dlb_domain * domain,struct dlb_ldb_queue * queue)1139 static void dlb_ldb_queue_enable_mapped_cqs(struct dlb_hw *hw,
1140 					    struct dlb_domain *domain,
1141 					    struct dlb_ldb_queue *queue)
1142 {
1143 	struct dlb_list_entry *iter;
1144 	RTE_SET_USED(iter);
1145 	struct dlb_ldb_port *port;
1146 	int slot;
1147 
1148 	DLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter) {
1149 		enum dlb_qid_map_state state = DLB_QUEUE_MAPPED;
1150 
1151 		if (!dlb_port_find_slot_queue(port, state, queue, &slot))
1152 			continue;
1153 
1154 		if (port->enabled)
1155 			dlb_ldb_port_cq_enable(hw, port);
1156 	}
1157 }
1158 
dlb_ldb_port_map_qid_static(struct dlb_hw * hw,struct dlb_ldb_port * p,struct dlb_ldb_queue * q,u8 priority)1159 static int dlb_ldb_port_map_qid_static(struct dlb_hw *hw,
1160 				       struct dlb_ldb_port *p,
1161 				       struct dlb_ldb_queue *q,
1162 				       u8 priority)
1163 {
1164 	union dlb_lsp_cq2priov r0;
1165 	union dlb_lsp_cq2qid r1;
1166 	union dlb_atm_pipe_qid_ldb_qid2cqidx r2;
1167 	union dlb_lsp_qid_ldb_qid2cqidx r3;
1168 	union dlb_lsp_qid_ldb_qid2cqidx2 r4;
1169 	enum dlb_qid_map_state state;
1170 	int i;
1171 
1172 	/* Look for a pending or already mapped slot, else an unused slot */
1173 	if (!dlb_port_find_slot_queue(p, DLB_QUEUE_MAP_IN_PROGRESS, q, &i) &&
1174 	    !dlb_port_find_slot_queue(p, DLB_QUEUE_MAPPED, q, &i) &&
1175 	    !dlb_port_find_slot(p, DLB_QUEUE_UNMAPPED, &i)) {
1176 		DLB_HW_ERR(hw,
1177 			   "[%s():%d] Internal error: CQ has no available QID mapping slots\n",
1178 			   __func__, __LINE__);
1179 		return -EFAULT;
1180 	}
1181 
1182 	if (i >= DLB_MAX_NUM_QIDS_PER_LDB_CQ) {
1183 		DLB_HW_ERR(hw,
1184 			   "[%s():%d] Internal error: port slot tracking failed\n",
1185 			   __func__, __LINE__);
1186 		return -EFAULT;
1187 	}
1188 
1189 	/* Read-modify-write the priority and valid bit register */
1190 	r0.val = DLB_CSR_RD(hw, DLB_LSP_CQ2PRIOV(p->id));
1191 
1192 	r0.field.v |= 1 << i;
1193 	r0.field.prio |= (priority & 0x7) << i * 3;
1194 
1195 	DLB_CSR_WR(hw, DLB_LSP_CQ2PRIOV(p->id), r0.val);
1196 
1197 	/* Read-modify-write the QID map register */
1198 	r1.val = DLB_CSR_RD(hw, DLB_LSP_CQ2QID(p->id, i / 4));
1199 
1200 	if (i == 0 || i == 4)
1201 		r1.field.qid_p0 = q->id;
1202 	if (i == 1 || i == 5)
1203 		r1.field.qid_p1 = q->id;
1204 	if (i == 2 || i == 6)
1205 		r1.field.qid_p2 = q->id;
1206 	if (i == 3 || i == 7)
1207 		r1.field.qid_p3 = q->id;
1208 
1209 	DLB_CSR_WR(hw, DLB_LSP_CQ2QID(p->id, i / 4), r1.val);
1210 
1211 	r2.val = DLB_CSR_RD(hw,
1212 			    DLB_ATM_PIPE_QID_LDB_QID2CQIDX(q->id,
1213 							   p->id / 4));
1214 
1215 	r3.val = DLB_CSR_RD(hw,
1216 			    DLB_LSP_QID_LDB_QID2CQIDX(q->id,
1217 						      p->id / 4));
1218 
1219 	r4.val = DLB_CSR_RD(hw,
1220 			    DLB_LSP_QID_LDB_QID2CQIDX2(q->id,
1221 						       p->id / 4));
1222 
1223 	switch (p->id % 4) {
1224 	case 0:
1225 		r2.field.cq_p0 |= 1 << i;
1226 		r3.field.cq_p0 |= 1 << i;
1227 		r4.field.cq_p0 |= 1 << i;
1228 		break;
1229 
1230 	case 1:
1231 		r2.field.cq_p1 |= 1 << i;
1232 		r3.field.cq_p1 |= 1 << i;
1233 		r4.field.cq_p1 |= 1 << i;
1234 		break;
1235 
1236 	case 2:
1237 		r2.field.cq_p2 |= 1 << i;
1238 		r3.field.cq_p2 |= 1 << i;
1239 		r4.field.cq_p2 |= 1 << i;
1240 		break;
1241 
1242 	case 3:
1243 		r2.field.cq_p3 |= 1 << i;
1244 		r3.field.cq_p3 |= 1 << i;
1245 		r4.field.cq_p3 |= 1 << i;
1246 		break;
1247 	}
1248 
1249 	DLB_CSR_WR(hw,
1250 		   DLB_ATM_PIPE_QID_LDB_QID2CQIDX(q->id,
1251 						  p->id / 4),
1252 		   r2.val);
1253 
1254 	DLB_CSR_WR(hw,
1255 		   DLB_LSP_QID_LDB_QID2CQIDX(q->id,
1256 					     p->id / 4),
1257 		   r3.val);
1258 
1259 	DLB_CSR_WR(hw,
1260 		   DLB_LSP_QID_LDB_QID2CQIDX2(q->id,
1261 					      p->id / 4),
1262 		   r4.val);
1263 
1264 	dlb_flush_csr(hw);
1265 
1266 	p->qid_map[i].qid = q->id;
1267 	p->qid_map[i].priority = priority;
1268 
1269 	state = DLB_QUEUE_MAPPED;
1270 
1271 	return dlb_port_slot_state_transition(hw, p, q, i, state);
1272 }
1273 
dlb_ldb_port_set_has_work_bits(struct dlb_hw * hw,struct dlb_ldb_port * port,struct dlb_ldb_queue * queue,int slot)1274 static int dlb_ldb_port_set_has_work_bits(struct dlb_hw *hw,
1275 					  struct dlb_ldb_port *port,
1276 					  struct dlb_ldb_queue *queue,
1277 					  int slot)
1278 {
1279 	union dlb_lsp_qid_aqed_active_cnt r0;
1280 	union dlb_lsp_qid_ldb_enqueue_cnt r1;
1281 	union dlb_lsp_ldb_sched_ctrl r2 = { {0} };
1282 
1283 	/* Set the atomic scheduling haswork bit */
1284 	r0.val = DLB_CSR_RD(hw, DLB_LSP_QID_AQED_ACTIVE_CNT(queue->id));
1285 
1286 	r2.field.cq = port->id;
1287 	r2.field.qidix = slot;
1288 	r2.field.value = 1;
1289 	r2.field.rlist_haswork_v = r0.field.count > 0;
1290 
1291 	/* Set the non-atomic scheduling haswork bit */
1292 	DLB_CSR_WR(hw, DLB_LSP_LDB_SCHED_CTRL, r2.val);
1293 
1294 	r1.val = DLB_CSR_RD(hw, DLB_LSP_QID_LDB_ENQUEUE_CNT(queue->id));
1295 
1296 	memset(&r2, 0, sizeof(r2));
1297 
1298 	r2.field.cq = port->id;
1299 	r2.field.qidix = slot;
1300 	r2.field.value = 1;
1301 	r2.field.nalb_haswork_v = (r1.field.count > 0);
1302 
1303 	DLB_CSR_WR(hw, DLB_LSP_LDB_SCHED_CTRL, r2.val);
1304 
1305 	dlb_flush_csr(hw);
1306 
1307 	return 0;
1308 }
1309 
dlb_ldb_port_clear_queue_if_status(struct dlb_hw * hw,struct dlb_ldb_port * port,int slot)1310 static void dlb_ldb_port_clear_queue_if_status(struct dlb_hw *hw,
1311 					       struct dlb_ldb_port *port,
1312 					       int slot)
1313 {
1314 	union dlb_lsp_ldb_sched_ctrl r0 = { {0} };
1315 
1316 	r0.field.cq = port->id;
1317 	r0.field.qidix = slot;
1318 	r0.field.value = 0;
1319 	r0.field.inflight_ok_v = 1;
1320 
1321 	DLB_CSR_WR(hw, DLB_LSP_LDB_SCHED_CTRL, r0.val);
1322 
1323 	dlb_flush_csr(hw);
1324 }
1325 
dlb_ldb_port_set_queue_if_status(struct dlb_hw * hw,struct dlb_ldb_port * port,int slot)1326 static void dlb_ldb_port_set_queue_if_status(struct dlb_hw *hw,
1327 					     struct dlb_ldb_port *port,
1328 					     int slot)
1329 {
1330 	union dlb_lsp_ldb_sched_ctrl r0 = { {0} };
1331 
1332 	r0.field.cq = port->id;
1333 	r0.field.qidix = slot;
1334 	r0.field.value = 1;
1335 	r0.field.inflight_ok_v = 1;
1336 
1337 	DLB_CSR_WR(hw, DLB_LSP_LDB_SCHED_CTRL, r0.val);
1338 
1339 	dlb_flush_csr(hw);
1340 }
1341 
dlb_ldb_queue_set_inflight_limit(struct dlb_hw * hw,struct dlb_ldb_queue * queue)1342 static void dlb_ldb_queue_set_inflight_limit(struct dlb_hw *hw,
1343 					     struct dlb_ldb_queue *queue)
1344 {
1345 	union dlb_lsp_qid_ldb_infl_lim r0 = { {0} };
1346 
1347 	r0.field.limit = queue->num_qid_inflights;
1348 
1349 	DLB_CSR_WR(hw, DLB_LSP_QID_LDB_INFL_LIM(queue->id), r0.val);
1350 }
1351 
dlb_ldb_queue_clear_inflight_limit(struct dlb_hw * hw,struct dlb_ldb_queue * queue)1352 static void dlb_ldb_queue_clear_inflight_limit(struct dlb_hw *hw,
1353 					       struct dlb_ldb_queue *queue)
1354 {
1355 	DLB_CSR_WR(hw,
1356 		   DLB_LSP_QID_LDB_INFL_LIM(queue->id),
1357 		   DLB_LSP_QID_LDB_INFL_LIM_RST);
1358 }
1359 
dlb_ldb_port_finish_map_qid_dynamic(struct dlb_hw * hw,struct dlb_domain * domain,struct dlb_ldb_port * port,struct dlb_ldb_queue * queue)1360 static int dlb_ldb_port_finish_map_qid_dynamic(struct dlb_hw *hw,
1361 					       struct dlb_domain *domain,
1362 					       struct dlb_ldb_port *port,
1363 					       struct dlb_ldb_queue *queue)
1364 {
1365 	struct dlb_list_entry *iter;
1366 	RTE_SET_USED(iter);
1367 	union dlb_lsp_qid_ldb_infl_cnt r0;
1368 	enum dlb_qid_map_state state;
1369 	int slot, ret;
1370 	u8 prio;
1371 
1372 	r0.val = DLB_CSR_RD(hw, DLB_LSP_QID_LDB_INFL_CNT(queue->id));
1373 
1374 	if (r0.field.count) {
1375 		DLB_HW_ERR(hw,
1376 			   "[%s()] Internal error: non-zero QID inflight count\n",
1377 			   __func__);
1378 		return -EFAULT;
1379 	}
1380 
1381 	/* For each port with a pending mapping to this queue, perform the
1382 	 * static mapping and set the corresponding has_work bits.
1383 	 */
1384 	state = DLB_QUEUE_MAP_IN_PROGRESS;
1385 	if (!dlb_port_find_slot_queue(port, state, queue, &slot))
1386 		return -EINVAL;
1387 
1388 	if (slot >= DLB_MAX_NUM_QIDS_PER_LDB_CQ) {
1389 		DLB_HW_ERR(hw,
1390 			   "[%s():%d] Internal error: port slot tracking failed\n",
1391 			   __func__, __LINE__);
1392 		return -EFAULT;
1393 	}
1394 
1395 	prio = port->qid_map[slot].priority;
1396 
1397 	/* Update the CQ2QID, CQ2PRIOV, and QID2CQIDX registers, and
1398 	 * the port's qid_map state.
1399 	 */
1400 	ret = dlb_ldb_port_map_qid_static(hw, port, queue, prio);
1401 	if (ret)
1402 		return ret;
1403 
1404 	ret = dlb_ldb_port_set_has_work_bits(hw, port, queue, slot);
1405 	if (ret)
1406 		return ret;
1407 
1408 	/* Ensure IF_status(cq,qid) is 0 before enabling the port to
1409 	 * prevent spurious schedules to cause the queue's inflight
1410 	 * count to increase.
1411 	 */
1412 	dlb_ldb_port_clear_queue_if_status(hw, port, slot);
1413 
1414 	/* Reset the queue's inflight status */
1415 	DLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter) {
1416 		state = DLB_QUEUE_MAPPED;
1417 		if (!dlb_port_find_slot_queue(port, state, queue, &slot))
1418 			continue;
1419 
1420 		dlb_ldb_port_set_queue_if_status(hw, port, slot);
1421 	}
1422 
1423 	dlb_ldb_queue_set_inflight_limit(hw, queue);
1424 
1425 	/* Re-enable CQs mapped to this queue */
1426 	dlb_ldb_queue_enable_mapped_cqs(hw, domain, queue);
1427 
1428 	/* If this queue has other mappings pending, clear its inflight limit */
1429 	if (queue->num_pending_additions > 0)
1430 		dlb_ldb_queue_clear_inflight_limit(hw, queue);
1431 
1432 	return 0;
1433 }
1434 
1435 /**
1436  * dlb_ldb_port_map_qid_dynamic() - perform a "dynamic" QID->CQ mapping
1437  * @hw: dlb_hw handle for a particular device.
1438  * @port: load-balanced port
1439  * @queue: load-balanced queue
1440  * @priority: queue servicing priority
1441  *
1442  * Returns 0 if the queue was mapped, 1 if the mapping is scheduled to occur
1443  * at a later point, and <0 if an error occurred.
1444  */
dlb_ldb_port_map_qid_dynamic(struct dlb_hw * hw,struct dlb_ldb_port * port,struct dlb_ldb_queue * queue,u8 priority)1445 static int dlb_ldb_port_map_qid_dynamic(struct dlb_hw *hw,
1446 					struct dlb_ldb_port *port,
1447 					struct dlb_ldb_queue *queue,
1448 					u8 priority)
1449 {
1450 	union dlb_lsp_qid_ldb_infl_cnt r0 = { {0} };
1451 	enum dlb_qid_map_state state;
1452 	struct dlb_domain *domain;
1453 	int slot, ret;
1454 
1455 	domain = dlb_get_domain_from_id(hw, port->domain_id);
1456 	if (domain == NULL) {
1457 		DLB_HW_ERR(hw,
1458 			   "[%s()] Internal error: unable to find domain %d\n",
1459 			   __func__, port->domain_id);
1460 		return -EFAULT;
1461 	}
1462 
1463 	/* Set the QID inflight limit to 0 to prevent further scheduling of the
1464 	 * queue.
1465 	 */
1466 	DLB_CSR_WR(hw, DLB_LSP_QID_LDB_INFL_LIM(queue->id), 0);
1467 
1468 	if (!dlb_port_find_slot(port, DLB_QUEUE_UNMAPPED, &slot)) {
1469 		DLB_HW_ERR(hw,
1470 			   "Internal error: No available unmapped slots\n");
1471 		return -EFAULT;
1472 	}
1473 
1474 	if (slot >= DLB_MAX_NUM_QIDS_PER_LDB_CQ) {
1475 		DLB_HW_ERR(hw,
1476 			   "[%s():%d] Internal error: port slot tracking failed\n",
1477 			   __func__, __LINE__);
1478 		return -EFAULT;
1479 	}
1480 
1481 	port->qid_map[slot].qid = queue->id;
1482 	port->qid_map[slot].priority = priority;
1483 
1484 	state = DLB_QUEUE_MAP_IN_PROGRESS;
1485 	ret = dlb_port_slot_state_transition(hw, port, queue, slot, state);
1486 	if (ret)
1487 		return ret;
1488 
1489 	r0.val = DLB_CSR_RD(hw, DLB_LSP_QID_LDB_INFL_CNT(queue->id));
1490 
1491 	if (r0.field.count) {
1492 		/* The queue is owed completions so it's not safe to map it
1493 		 * yet. Schedule a kernel thread to complete the mapping later,
1494 		 * once software has completed all the queue's inflight events.
1495 		 */
1496 		if (!os_worker_active(hw))
1497 			os_schedule_work(hw);
1498 
1499 		return 1;
1500 	}
1501 
1502 	/* Disable the affected CQ, and the CQs already mapped to the QID,
1503 	 * before reading the QID's inflight count a second time. There is an
1504 	 * unlikely race in which the QID may schedule one more QE after we
1505 	 * read an inflight count of 0, and disabling the CQs guarantees that
1506 	 * the race will not occur after a re-read of the inflight count
1507 	 * register.
1508 	 */
1509 	if (port->enabled)
1510 		dlb_ldb_port_cq_disable(hw, port);
1511 
1512 	dlb_ldb_queue_disable_mapped_cqs(hw, domain, queue);
1513 
1514 	r0.val = DLB_CSR_RD(hw, DLB_LSP_QID_LDB_INFL_CNT(queue->id));
1515 
1516 	if (r0.field.count) {
1517 		if (port->enabled)
1518 			dlb_ldb_port_cq_enable(hw, port);
1519 
1520 		dlb_ldb_queue_enable_mapped_cqs(hw, domain, queue);
1521 
1522 		/* The queue is owed completions so it's not safe to map it
1523 		 * yet. Schedule a kernel thread to complete the mapping later,
1524 		 * once software has completed all the queue's inflight events.
1525 		 */
1526 		if (!os_worker_active(hw))
1527 			os_schedule_work(hw);
1528 
1529 		return 1;
1530 	}
1531 
1532 	return dlb_ldb_port_finish_map_qid_dynamic(hw, domain, port, queue);
1533 }
1534 
1535 
dlb_ldb_port_map_qid(struct dlb_hw * hw,struct dlb_domain * domain,struct dlb_ldb_port * port,struct dlb_ldb_queue * queue,u8 prio)1536 static int dlb_ldb_port_map_qid(struct dlb_hw *hw,
1537 				struct dlb_domain *domain,
1538 				struct dlb_ldb_port *port,
1539 				struct dlb_ldb_queue *queue,
1540 				u8 prio)
1541 {
1542 	if (domain->started)
1543 		return dlb_ldb_port_map_qid_dynamic(hw, port, queue, prio);
1544 	else
1545 		return dlb_ldb_port_map_qid_static(hw, port, queue, prio);
1546 }
1547 
dlb_ldb_port_unmap_qid(struct dlb_hw * hw,struct dlb_ldb_port * port,struct dlb_ldb_queue * queue)1548 static int dlb_ldb_port_unmap_qid(struct dlb_hw *hw,
1549 				  struct dlb_ldb_port *port,
1550 				  struct dlb_ldb_queue *queue)
1551 {
1552 	enum dlb_qid_map_state mapped, in_progress, pending_map, unmapped;
1553 	union dlb_lsp_cq2priov r0;
1554 	union dlb_atm_pipe_qid_ldb_qid2cqidx r1;
1555 	union dlb_lsp_qid_ldb_qid2cqidx r2;
1556 	union dlb_lsp_qid_ldb_qid2cqidx2 r3;
1557 	u32 queue_id;
1558 	u32 port_id;
1559 	int i;
1560 
1561 	/* Find the queue's slot */
1562 	mapped = DLB_QUEUE_MAPPED;
1563 	in_progress = DLB_QUEUE_UNMAP_IN_PROGRESS;
1564 	pending_map = DLB_QUEUE_UNMAP_IN_PROGRESS_PENDING_MAP;
1565 
1566 	if (!dlb_port_find_slot_queue(port, mapped, queue, &i) &&
1567 	    !dlb_port_find_slot_queue(port, in_progress, queue, &i) &&
1568 	    !dlb_port_find_slot_queue(port, pending_map, queue, &i)) {
1569 		DLB_HW_ERR(hw,
1570 			   "[%s():%d] Internal error: QID %d isn't mapped\n",
1571 			   __func__, __LINE__, queue->id);
1572 		return -EFAULT;
1573 	}
1574 
1575 	if (i >= DLB_MAX_NUM_QIDS_PER_LDB_CQ) {
1576 		DLB_HW_ERR(hw,
1577 			   "[%s():%d] Internal error: port slot tracking failed\n",
1578 			   __func__, __LINE__);
1579 		return -EFAULT;
1580 	}
1581 
1582 	port_id = port->id;
1583 	queue_id = queue->id;
1584 
1585 	/* Read-modify-write the priority and valid bit register */
1586 	r0.val = DLB_CSR_RD(hw, DLB_LSP_CQ2PRIOV(port_id));
1587 
1588 	r0.field.v &= ~(1 << i);
1589 
1590 	DLB_CSR_WR(hw, DLB_LSP_CQ2PRIOV(port_id), r0.val);
1591 
1592 	r1.val = DLB_CSR_RD(hw,
1593 			    DLB_ATM_PIPE_QID_LDB_QID2CQIDX(queue_id,
1594 							   port_id / 4));
1595 
1596 	r2.val = DLB_CSR_RD(hw,
1597 			    DLB_LSP_QID_LDB_QID2CQIDX(queue_id,
1598 						      port_id / 4));
1599 
1600 	r3.val = DLB_CSR_RD(hw,
1601 			    DLB_LSP_QID_LDB_QID2CQIDX2(queue_id,
1602 						       port_id / 4));
1603 
1604 	switch (port_id % 4) {
1605 	case 0:
1606 		r1.field.cq_p0 &= ~(1 << i);
1607 		r2.field.cq_p0 &= ~(1 << i);
1608 		r3.field.cq_p0 &= ~(1 << i);
1609 		break;
1610 
1611 	case 1:
1612 		r1.field.cq_p1 &= ~(1 << i);
1613 		r2.field.cq_p1 &= ~(1 << i);
1614 		r3.field.cq_p1 &= ~(1 << i);
1615 		break;
1616 
1617 	case 2:
1618 		r1.field.cq_p2 &= ~(1 << i);
1619 		r2.field.cq_p2 &= ~(1 << i);
1620 		r3.field.cq_p2 &= ~(1 << i);
1621 		break;
1622 
1623 	case 3:
1624 		r1.field.cq_p3 &= ~(1 << i);
1625 		r2.field.cq_p3 &= ~(1 << i);
1626 		r3.field.cq_p3 &= ~(1 << i);
1627 		break;
1628 	}
1629 
1630 	DLB_CSR_WR(hw,
1631 		   DLB_ATM_PIPE_QID_LDB_QID2CQIDX(queue_id, port_id / 4),
1632 		   r1.val);
1633 
1634 	DLB_CSR_WR(hw,
1635 		   DLB_LSP_QID_LDB_QID2CQIDX(queue_id, port_id / 4),
1636 		   r2.val);
1637 
1638 	DLB_CSR_WR(hw,
1639 		   DLB_LSP_QID_LDB_QID2CQIDX2(queue_id, port_id / 4),
1640 		   r3.val);
1641 
1642 	dlb_flush_csr(hw);
1643 
1644 	unmapped = DLB_QUEUE_UNMAPPED;
1645 
1646 	return dlb_port_slot_state_transition(hw, port, queue, i, unmapped);
1647 }
1648 
1649 static int
dlb_verify_create_sched_domain_args(struct dlb_hw * hw,struct dlb_function_resources * rsrcs,struct dlb_create_sched_domain_args * args,struct dlb_cmd_response * resp)1650 dlb_verify_create_sched_domain_args(struct dlb_hw *hw,
1651 				    struct dlb_function_resources *rsrcs,
1652 				    struct dlb_create_sched_domain_args *args,
1653 				    struct dlb_cmd_response *resp)
1654 {
1655 	struct dlb_list_entry *iter;
1656 	RTE_SET_USED(iter);
1657 	struct dlb_bitmap *ldb_credit_freelist;
1658 	struct dlb_bitmap *dir_credit_freelist;
1659 	unsigned int ldb_credit_freelist_count;
1660 	unsigned int dir_credit_freelist_count;
1661 	unsigned int max_contig_aqed_entries;
1662 	unsigned int max_contig_dqed_entries;
1663 	unsigned int max_contig_qed_entries;
1664 	unsigned int max_contig_hl_entries;
1665 	struct dlb_bitmap *aqed_freelist;
1666 	enum dlb_dev_revision revision;
1667 
1668 	ldb_credit_freelist = rsrcs->avail_qed_freelist_entries;
1669 	dir_credit_freelist = rsrcs->avail_dqed_freelist_entries;
1670 	aqed_freelist = rsrcs->avail_aqed_freelist_entries;
1671 
1672 	ldb_credit_freelist_count = dlb_bitmap_count(ldb_credit_freelist);
1673 	dir_credit_freelist_count = dlb_bitmap_count(dir_credit_freelist);
1674 
1675 	max_contig_hl_entries =
1676 		dlb_bitmap_longest_set_range(rsrcs->avail_hist_list_entries);
1677 	max_contig_aqed_entries =
1678 		dlb_bitmap_longest_set_range(aqed_freelist);
1679 	max_contig_qed_entries =
1680 		dlb_bitmap_longest_set_range(ldb_credit_freelist);
1681 	max_contig_dqed_entries =
1682 		dlb_bitmap_longest_set_range(dir_credit_freelist);
1683 
1684 	if (rsrcs->num_avail_domains < 1)
1685 		resp->status = DLB_ST_DOMAIN_UNAVAILABLE;
1686 	else if (rsrcs->num_avail_ldb_queues < args->num_ldb_queues)
1687 		resp->status = DLB_ST_LDB_QUEUES_UNAVAILABLE;
1688 	else if (rsrcs->num_avail_ldb_ports < args->num_ldb_ports)
1689 		resp->status = DLB_ST_LDB_PORTS_UNAVAILABLE;
1690 	else if (args->num_ldb_queues > 0 && args->num_ldb_ports == 0)
1691 		resp->status = DLB_ST_LDB_PORT_REQUIRED_FOR_LDB_QUEUES;
1692 	else if (rsrcs->num_avail_dir_pq_pairs < args->num_dir_ports)
1693 		resp->status = DLB_ST_DIR_PORTS_UNAVAILABLE;
1694 	else if (ldb_credit_freelist_count < args->num_ldb_credits)
1695 		resp->status = DLB_ST_LDB_CREDITS_UNAVAILABLE;
1696 	else if (dir_credit_freelist_count < args->num_dir_credits)
1697 		resp->status = DLB_ST_DIR_CREDITS_UNAVAILABLE;
1698 	else if (rsrcs->num_avail_ldb_credit_pools < args->num_ldb_credit_pools)
1699 		resp->status = DLB_ST_LDB_CREDIT_POOLS_UNAVAILABLE;
1700 	else if (rsrcs->num_avail_dir_credit_pools < args->num_dir_credit_pools)
1701 		resp->status = DLB_ST_DIR_CREDIT_POOLS_UNAVAILABLE;
1702 	else if (max_contig_hl_entries < args->num_hist_list_entries)
1703 		resp->status = DLB_ST_HIST_LIST_ENTRIES_UNAVAILABLE;
1704 	else if (max_contig_aqed_entries < args->num_atomic_inflights)
1705 		resp->status = DLB_ST_ATOMIC_INFLIGHTS_UNAVAILABLE;
1706 	else if (max_contig_qed_entries < args->num_ldb_credits)
1707 		resp->status = DLB_ST_QED_FREELIST_ENTRIES_UNAVAILABLE;
1708 	else if (max_contig_dqed_entries < args->num_dir_credits)
1709 		resp->status = DLB_ST_DQED_FREELIST_ENTRIES_UNAVAILABLE;
1710 
1711 	/* DLB A-stepping workaround for hardware write buffer lock up issue:
1712 	 * limit the maximum configured ports to less than 128 and disable CQ
1713 	 * occupancy interrupts.
1714 	 */
1715 	revision = os_get_dev_revision(hw);
1716 
1717 	if (revision < DLB_B0) {
1718 		u32 n = dlb_get_num_ports_in_use(hw);
1719 
1720 		n += args->num_ldb_ports + args->num_dir_ports;
1721 
1722 		if (n >= DLB_A_STEP_MAX_PORTS)
1723 			resp->status = args->num_ldb_ports ?
1724 				DLB_ST_LDB_PORTS_UNAVAILABLE :
1725 				DLB_ST_DIR_PORTS_UNAVAILABLE;
1726 	}
1727 
1728 	if (resp->status)
1729 		return -1;
1730 
1731 	return 0;
1732 }
1733 
1734 
1735 static void
dlb_log_create_sched_domain_args(struct dlb_hw * hw,struct dlb_create_sched_domain_args * args)1736 dlb_log_create_sched_domain_args(struct dlb_hw *hw,
1737 				 struct dlb_create_sched_domain_args *args)
1738 {
1739 	DLB_HW_INFO(hw, "DLB create sched domain arguments:\n");
1740 	DLB_HW_INFO(hw, "\tNumber of LDB queues:        %d\n",
1741 		    args->num_ldb_queues);
1742 	DLB_HW_INFO(hw, "\tNumber of LDB ports:         %d\n",
1743 		    args->num_ldb_ports);
1744 	DLB_HW_INFO(hw, "\tNumber of DIR ports:         %d\n",
1745 		    args->num_dir_ports);
1746 	DLB_HW_INFO(hw, "\tNumber of ATM inflights:     %d\n",
1747 		    args->num_atomic_inflights);
1748 	DLB_HW_INFO(hw, "\tNumber of hist list entries: %d\n",
1749 		    args->num_hist_list_entries);
1750 	DLB_HW_INFO(hw, "\tNumber of LDB credits:       %d\n",
1751 		    args->num_ldb_credits);
1752 	DLB_HW_INFO(hw, "\tNumber of DIR credits:       %d\n",
1753 		    args->num_dir_credits);
1754 	DLB_HW_INFO(hw, "\tNumber of LDB credit pools:  %d\n",
1755 		    args->num_ldb_credit_pools);
1756 	DLB_HW_INFO(hw, "\tNumber of DIR credit pools:  %d\n",
1757 		    args->num_dir_credit_pools);
1758 }
1759 
1760 /**
1761  * dlb_hw_create_sched_domain() - Allocate and initialize a DLB scheduling
1762  *	domain and its resources.
1763  * @hw:	  Contains the current state of the DLB hardware.
1764  * @args: User-provided arguments.
1765  * @resp: Response to user.
1766  *
1767  * Return: returns < 0 on error, 0 otherwise. If the driver is unable to
1768  * satisfy a request, resp->status will be set accordingly.
1769  */
dlb_hw_create_sched_domain(struct dlb_hw * hw,struct dlb_create_sched_domain_args * args,struct dlb_cmd_response * resp)1770 int dlb_hw_create_sched_domain(struct dlb_hw *hw,
1771 			       struct dlb_create_sched_domain_args *args,
1772 			       struct dlb_cmd_response *resp)
1773 {
1774 	struct dlb_domain *domain;
1775 	struct dlb_function_resources *rsrcs;
1776 	int ret;
1777 
1778 	rsrcs = &hw->pf;
1779 
1780 	dlb_log_create_sched_domain_args(hw, args);
1781 
1782 	/* Verify that hardware resources are available before attempting to
1783 	 * satisfy the request. This simplifies the error unwinding code.
1784 	 */
1785 	if (dlb_verify_create_sched_domain_args(hw, rsrcs, args, resp))
1786 		return -EINVAL;
1787 
1788 	domain = DLB_FUNC_LIST_HEAD(rsrcs->avail_domains, typeof(*domain));
1789 
1790 	/* Verification should catch this. */
1791 	if (domain == NULL) {
1792 		DLB_HW_ERR(hw,
1793 			   "[%s():%d] Internal error: no available domains\n",
1794 			   __func__, __LINE__);
1795 		return -EFAULT;
1796 	}
1797 
1798 	if (domain->configured) {
1799 		DLB_HW_ERR(hw,
1800 			   "[%s()] Internal error: avail_domains contains configured domains.\n",
1801 			   __func__);
1802 		return -EFAULT;
1803 	}
1804 
1805 	dlb_init_domain_rsrc_lists(domain);
1806 
1807 	/* Verification should catch this too. */
1808 	ret = dlb_domain_attach_resources(hw, rsrcs, domain, args, resp);
1809 	if (ret < 0) {
1810 		DLB_HW_ERR(hw,
1811 			   "[%s()] Internal error: failed to verify args.\n",
1812 			   __func__);
1813 
1814 		return -EFAULT;
1815 	}
1816 
1817 	dlb_list_del(&rsrcs->avail_domains, &domain->func_list);
1818 
1819 	dlb_list_add(&rsrcs->used_domains, &domain->func_list);
1820 
1821 	resp->id = domain->id;
1822 	resp->status = 0;
1823 
1824 	return 0;
1825 }
1826 
1827 static void
dlb_configure_ldb_credit_pool(struct dlb_hw * hw,struct dlb_domain * domain,struct dlb_create_ldb_pool_args * args,struct dlb_credit_pool * pool)1828 dlb_configure_ldb_credit_pool(struct dlb_hw *hw,
1829 			      struct dlb_domain *domain,
1830 			      struct dlb_create_ldb_pool_args *args,
1831 			      struct dlb_credit_pool *pool)
1832 {
1833 	union dlb_sys_ldb_pool_enbld r0 = { {0} };
1834 	union dlb_chp_ldb_pool_crd_lim r1 = { {0} };
1835 	union dlb_chp_ldb_pool_crd_cnt r2 = { {0} };
1836 	union dlb_chp_qed_fl_base  r3 = { {0} };
1837 	union dlb_chp_qed_fl_lim r4 = { {0} };
1838 	union dlb_chp_qed_fl_push_ptr r5 = { {0} };
1839 	union dlb_chp_qed_fl_pop_ptr  r6 = { {0} };
1840 
1841 	r1.field.limit = args->num_ldb_credits;
1842 
1843 	DLB_CSR_WR(hw, DLB_CHP_LDB_POOL_CRD_LIM(pool->id), r1.val);
1844 
1845 	r2.field.count = args->num_ldb_credits;
1846 
1847 	DLB_CSR_WR(hw, DLB_CHP_LDB_POOL_CRD_CNT(pool->id), r2.val);
1848 
1849 	r3.field.base = domain->qed_freelist.base + domain->qed_freelist.offset;
1850 
1851 	DLB_CSR_WR(hw, DLB_CHP_QED_FL_BASE(pool->id), r3.val);
1852 
1853 	r4.field.freelist_disable = 0;
1854 	r4.field.limit = r3.field.base + args->num_ldb_credits - 1;
1855 
1856 	DLB_CSR_WR(hw, DLB_CHP_QED_FL_LIM(pool->id), r4.val);
1857 
1858 	r5.field.push_ptr = r3.field.base;
1859 	r5.field.generation = 1;
1860 
1861 	DLB_CSR_WR(hw, DLB_CHP_QED_FL_PUSH_PTR(pool->id), r5.val);
1862 
1863 	r6.field.pop_ptr = r3.field.base;
1864 	r6.field.generation = 0;
1865 
1866 	DLB_CSR_WR(hw, DLB_CHP_QED_FL_POP_PTR(pool->id), r6.val);
1867 
1868 	r0.field.pool_enabled = 1;
1869 
1870 	DLB_CSR_WR(hw, DLB_SYS_LDB_POOL_ENBLD(pool->id), r0.val);
1871 
1872 	pool->avail_credits = args->num_ldb_credits;
1873 	pool->total_credits = args->num_ldb_credits;
1874 	domain->qed_freelist.offset += args->num_ldb_credits;
1875 
1876 	pool->configured = true;
1877 }
1878 
1879 static int
dlb_verify_create_ldb_pool_args(struct dlb_hw * hw,u32 domain_id,struct dlb_create_ldb_pool_args * args,struct dlb_cmd_response * resp)1880 dlb_verify_create_ldb_pool_args(struct dlb_hw *hw,
1881 				u32 domain_id,
1882 				struct dlb_create_ldb_pool_args *args,
1883 				struct dlb_cmd_response *resp)
1884 {
1885 	struct dlb_freelist *qed_freelist;
1886 	struct dlb_domain *domain;
1887 
1888 	domain = dlb_get_domain_from_id(hw, domain_id);
1889 
1890 	if (domain == NULL) {
1891 		resp->status = DLB_ST_INVALID_DOMAIN_ID;
1892 		return -1;
1893 	}
1894 
1895 	if (!domain->configured) {
1896 		resp->status = DLB_ST_DOMAIN_NOT_CONFIGURED;
1897 		return -1;
1898 	}
1899 
1900 	qed_freelist = &domain->qed_freelist;
1901 
1902 	if (dlb_freelist_count(qed_freelist) < args->num_ldb_credits) {
1903 		resp->status = DLB_ST_LDB_CREDITS_UNAVAILABLE;
1904 		return -1;
1905 	}
1906 
1907 	if (dlb_list_empty(&domain->avail_ldb_credit_pools)) {
1908 		resp->status = DLB_ST_LDB_CREDIT_POOLS_UNAVAILABLE;
1909 		return -1;
1910 	}
1911 
1912 	if (domain->started) {
1913 		resp->status = DLB_ST_DOMAIN_STARTED;
1914 		return -1;
1915 	}
1916 
1917 	return 0;
1918 }
1919 
1920 static void
dlb_log_create_ldb_pool_args(struct dlb_hw * hw,u32 domain_id,struct dlb_create_ldb_pool_args * args)1921 dlb_log_create_ldb_pool_args(struct dlb_hw *hw,
1922 			     u32 domain_id,
1923 			     struct dlb_create_ldb_pool_args *args)
1924 {
1925 	DLB_HW_INFO(hw, "DLB create load-balanced credit pool arguments:\n");
1926 	DLB_HW_INFO(hw, "\tDomain ID:             %d\n", domain_id);
1927 	DLB_HW_INFO(hw, "\tNumber of LDB credits: %d\n",
1928 		    args->num_ldb_credits);
1929 }
1930 
1931 /**
1932  * dlb_hw_create_ldb_pool() - Allocate and initialize a DLB credit pool.
1933  * @hw:	  Contains the current state of the DLB hardware.
1934  * @args: User-provided arguments.
1935  * @resp: Response to user.
1936  *
1937  * Return: returns < 0 on error, 0 otherwise. If the driver is unable to
1938  * satisfy a request, resp->status will be set accordingly.
1939  */
dlb_hw_create_ldb_pool(struct dlb_hw * hw,u32 domain_id,struct dlb_create_ldb_pool_args * args,struct dlb_cmd_response * resp)1940 int dlb_hw_create_ldb_pool(struct dlb_hw *hw,
1941 			   u32 domain_id,
1942 			   struct dlb_create_ldb_pool_args *args,
1943 			   struct dlb_cmd_response *resp)
1944 {
1945 	struct dlb_credit_pool *pool;
1946 	struct dlb_domain *domain;
1947 
1948 	dlb_log_create_ldb_pool_args(hw, domain_id, args);
1949 
1950 	/* Verify that hardware resources are available before attempting to
1951 	 * satisfy the request. This simplifies the error unwinding code.
1952 	 */
1953 	if (dlb_verify_create_ldb_pool_args(hw, domain_id, args, resp))
1954 		return -EINVAL;
1955 
1956 	domain = dlb_get_domain_from_id(hw, domain_id);
1957 	if (domain == NULL) {
1958 		DLB_HW_ERR(hw,
1959 			   "[%s():%d] Internal error: domain not found\n",
1960 			   __func__, __LINE__);
1961 		return -EFAULT;
1962 	}
1963 
1964 	pool = DLB_DOM_LIST_HEAD(domain->avail_ldb_credit_pools, typeof(*pool));
1965 
1966 	/* Verification should catch this. */
1967 	if (pool == NULL) {
1968 		DLB_HW_ERR(hw,
1969 			   "[%s():%d] Internal error: no available ldb credit pools\n",
1970 			   __func__, __LINE__);
1971 		return -EFAULT;
1972 	}
1973 
1974 	dlb_configure_ldb_credit_pool(hw, domain, args, pool);
1975 
1976 	/* Configuration succeeded, so move the resource from the 'avail' to
1977 	 * the 'used' list.
1978 	 */
1979 	dlb_list_del(&domain->avail_ldb_credit_pools, &pool->domain_list);
1980 
1981 	dlb_list_add(&domain->used_ldb_credit_pools, &pool->domain_list);
1982 
1983 	resp->status = 0;
1984 	resp->id = pool->id;
1985 
1986 	return 0;
1987 }
1988 
1989 static void
dlb_configure_dir_credit_pool(struct dlb_hw * hw,struct dlb_domain * domain,struct dlb_create_dir_pool_args * args,struct dlb_credit_pool * pool)1990 dlb_configure_dir_credit_pool(struct dlb_hw *hw,
1991 			      struct dlb_domain *domain,
1992 			      struct dlb_create_dir_pool_args *args,
1993 			      struct dlb_credit_pool *pool)
1994 {
1995 	union dlb_sys_dir_pool_enbld r0 = { {0} };
1996 	union dlb_chp_dir_pool_crd_lim r1 = { {0} };
1997 	union dlb_chp_dir_pool_crd_cnt r2 = { {0} };
1998 	union dlb_chp_dqed_fl_base  r3 = { {0} };
1999 	union dlb_chp_dqed_fl_lim r4 = { {0} };
2000 	union dlb_chp_dqed_fl_push_ptr r5 = { {0} };
2001 	union dlb_chp_dqed_fl_pop_ptr  r6 = { {0} };
2002 
2003 	r1.field.limit = args->num_dir_credits;
2004 
2005 	DLB_CSR_WR(hw, DLB_CHP_DIR_POOL_CRD_LIM(pool->id), r1.val);
2006 
2007 	r2.field.count = args->num_dir_credits;
2008 
2009 	DLB_CSR_WR(hw, DLB_CHP_DIR_POOL_CRD_CNT(pool->id), r2.val);
2010 
2011 	r3.field.base = domain->dqed_freelist.base +
2012 			domain->dqed_freelist.offset;
2013 
2014 	DLB_CSR_WR(hw, DLB_CHP_DQED_FL_BASE(pool->id), r3.val);
2015 
2016 	r4.field.freelist_disable = 0;
2017 	r4.field.limit = r3.field.base + args->num_dir_credits - 1;
2018 
2019 	DLB_CSR_WR(hw, DLB_CHP_DQED_FL_LIM(pool->id), r4.val);
2020 
2021 	r5.field.push_ptr = r3.field.base;
2022 	r5.field.generation = 1;
2023 
2024 	DLB_CSR_WR(hw, DLB_CHP_DQED_FL_PUSH_PTR(pool->id), r5.val);
2025 
2026 	r6.field.pop_ptr = r3.field.base;
2027 	r6.field.generation = 0;
2028 
2029 	DLB_CSR_WR(hw, DLB_CHP_DQED_FL_POP_PTR(pool->id), r6.val);
2030 
2031 	r0.field.pool_enabled = 1;
2032 
2033 	DLB_CSR_WR(hw, DLB_SYS_DIR_POOL_ENBLD(pool->id), r0.val);
2034 
2035 	pool->avail_credits = args->num_dir_credits;
2036 	pool->total_credits = args->num_dir_credits;
2037 	domain->dqed_freelist.offset += args->num_dir_credits;
2038 
2039 	pool->configured = true;
2040 }
2041 
2042 static int
dlb_verify_create_dir_pool_args(struct dlb_hw * hw,u32 domain_id,struct dlb_create_dir_pool_args * args,struct dlb_cmd_response * resp)2043 dlb_verify_create_dir_pool_args(struct dlb_hw *hw,
2044 				u32 domain_id,
2045 				struct dlb_create_dir_pool_args *args,
2046 				struct dlb_cmd_response *resp)
2047 {
2048 	struct dlb_freelist *dqed_freelist;
2049 	struct dlb_domain *domain;
2050 
2051 	domain = dlb_get_domain_from_id(hw, domain_id);
2052 
2053 	if (domain == NULL) {
2054 		resp->status = DLB_ST_INVALID_DOMAIN_ID;
2055 		return -1;
2056 	}
2057 
2058 	if (!domain->configured) {
2059 		resp->status = DLB_ST_DOMAIN_NOT_CONFIGURED;
2060 		return -1;
2061 	}
2062 
2063 	dqed_freelist = &domain->dqed_freelist;
2064 
2065 	if (dlb_freelist_count(dqed_freelist) < args->num_dir_credits) {
2066 		resp->status = DLB_ST_DIR_CREDITS_UNAVAILABLE;
2067 		return -1;
2068 	}
2069 
2070 	if (dlb_list_empty(&domain->avail_dir_credit_pools)) {
2071 		resp->status = DLB_ST_DIR_CREDIT_POOLS_UNAVAILABLE;
2072 		return -1;
2073 	}
2074 
2075 	if (domain->started) {
2076 		resp->status = DLB_ST_DOMAIN_STARTED;
2077 		return -1;
2078 	}
2079 
2080 	return 0;
2081 }
2082 
2083 static void
dlb_log_create_dir_pool_args(struct dlb_hw * hw,u32 domain_id,struct dlb_create_dir_pool_args * args)2084 dlb_log_create_dir_pool_args(struct dlb_hw *hw,
2085 			     u32 domain_id,
2086 			     struct dlb_create_dir_pool_args *args)
2087 {
2088 	DLB_HW_INFO(hw, "DLB create directed credit pool arguments:\n");
2089 	DLB_HW_INFO(hw, "\tDomain ID:             %d\n", domain_id);
2090 	DLB_HW_INFO(hw, "\tNumber of DIR credits: %d\n",
2091 		    args->num_dir_credits);
2092 }
2093 
2094 /**
2095  * dlb_hw_create_dir_pool() - Allocate and initialize a DLB credit pool.
2096  * @hw:	  Contains the current state of the DLB hardware.
2097  * @args: User-provided arguments.
2098  * @resp: Response to user.
2099  *
2100  * Return: returns < 0 on error, 0 otherwise. If the driver is unable to
2101  * satisfy a request, resp->status will be set accordingly.
2102  */
dlb_hw_create_dir_pool(struct dlb_hw * hw,u32 domain_id,struct dlb_create_dir_pool_args * args,struct dlb_cmd_response * resp)2103 int dlb_hw_create_dir_pool(struct dlb_hw *hw,
2104 			   u32 domain_id,
2105 			   struct dlb_create_dir_pool_args *args,
2106 			   struct dlb_cmd_response *resp)
2107 {
2108 	struct dlb_credit_pool *pool;
2109 	struct dlb_domain *domain;
2110 
2111 	dlb_log_create_dir_pool_args(hw, domain_id, args);
2112 
2113 	/* Verify that hardware resources are available before attempting to
2114 	 * satisfy the request. This simplifies the error unwinding code.
2115 	 */
2116 	/* At least one available pool */
2117 	if (dlb_verify_create_dir_pool_args(hw, domain_id, args, resp))
2118 		return -EINVAL;
2119 
2120 	domain = dlb_get_domain_from_id(hw, domain_id);
2121 	if (domain == NULL) {
2122 		DLB_HW_ERR(hw,
2123 			   "[%s():%d] Internal error: domain not found\n",
2124 			   __func__, __LINE__);
2125 		return -EFAULT;
2126 	}
2127 
2128 	pool = DLB_DOM_LIST_HEAD(domain->avail_dir_credit_pools, typeof(*pool));
2129 
2130 	/* Verification should catch this. */
2131 	if (pool == NULL) {
2132 		DLB_HW_ERR(hw,
2133 			   "[%s():%d] Internal error: no available dir credit pools\n",
2134 			   __func__, __LINE__);
2135 		return -EFAULT;
2136 	}
2137 
2138 	dlb_configure_dir_credit_pool(hw, domain, args, pool);
2139 
2140 	/* Configuration succeeded, so move the resource from the 'avail' to
2141 	 * the 'used' list.
2142 	 */
2143 	dlb_list_del(&domain->avail_dir_credit_pools, &pool->domain_list);
2144 
2145 	dlb_list_add(&domain->used_dir_credit_pools, &pool->domain_list);
2146 
2147 	resp->status = 0;
2148 	resp->id = pool->id;
2149 
2150 	return 0;
2151 }
2152 
dlb_ldb_cq_inflight_count(struct dlb_hw * hw,struct dlb_ldb_port * port)2153 static u32 dlb_ldb_cq_inflight_count(struct dlb_hw *hw,
2154 				     struct dlb_ldb_port *port)
2155 {
2156 	union dlb_lsp_cq_ldb_infl_cnt r0;
2157 
2158 	r0.val = DLB_CSR_RD(hw, DLB_LSP_CQ_LDB_INFL_CNT(port->id));
2159 
2160 	return r0.field.count;
2161 }
2162 
dlb_ldb_cq_token_count(struct dlb_hw * hw,struct dlb_ldb_port * port)2163 static u32 dlb_ldb_cq_token_count(struct dlb_hw *hw,
2164 				  struct dlb_ldb_port *port)
2165 {
2166 	union dlb_lsp_cq_ldb_tkn_cnt r0;
2167 
2168 	r0.val = DLB_CSR_RD(hw, DLB_LSP_CQ_LDB_TKN_CNT(port->id));
2169 
2170 	return r0.field.token_count;
2171 }
2172 
dlb_drain_ldb_cq(struct dlb_hw * hw,struct dlb_ldb_port * port)2173 static int dlb_drain_ldb_cq(struct dlb_hw *hw, struct dlb_ldb_port *port)
2174 {
2175 	u32 infl_cnt, tkn_cnt;
2176 	unsigned int i;
2177 
2178 	infl_cnt = dlb_ldb_cq_inflight_count(hw, port);
2179 
2180 	/* Account for the initial token count, which is used in order to
2181 	 * provide a CQ with depth less than 8.
2182 	 */
2183 	tkn_cnt = dlb_ldb_cq_token_count(hw, port) - port->init_tkn_cnt;
2184 
2185 	if (infl_cnt || tkn_cnt) {
2186 		struct dlb_hcw hcw_mem[8], *hcw;
2187 		void  *pp_addr;
2188 
2189 		pp_addr = os_map_producer_port(hw, port->id, true);
2190 
2191 		/* Point hcw to a 64B-aligned location */
2192 		hcw = (struct dlb_hcw *)((uintptr_t)&hcw_mem[4] & ~0x3F);
2193 
2194 		/* Program the first HCW for a completion and token return and
2195 		 * the other HCWs as NOOPS
2196 		 */
2197 
2198 		memset(hcw, 0, 4 * sizeof(*hcw));
2199 		hcw->qe_comp = (infl_cnt > 0);
2200 		hcw->cq_token = (tkn_cnt > 0);
2201 		hcw->lock_id = tkn_cnt - 1;
2202 
2203 		/* Return tokens in the first HCW */
2204 		dlb_movdir64b(pp_addr, hcw);
2205 
2206 		hcw->cq_token = 0;
2207 
2208 		/* Issue remaining completions (if any) */
2209 		for (i = 1; i < infl_cnt; i++)
2210 			dlb_movdir64b(pp_addr, hcw);
2211 
2212 		os_fence_hcw(hw, pp_addr);
2213 
2214 		os_unmap_producer_port(hw, pp_addr);
2215 	}
2216 
2217 	return 0;
2218 }
2219 
dlb_domain_drain_ldb_cqs(struct dlb_hw * hw,struct dlb_domain * domain,bool toggle_port)2220 static int dlb_domain_drain_ldb_cqs(struct dlb_hw *hw,
2221 				    struct dlb_domain *domain,
2222 				    bool toggle_port)
2223 {
2224 	struct dlb_list_entry *iter;
2225 	RTE_SET_USED(iter);
2226 	struct dlb_ldb_port *port;
2227 	int ret;
2228 
2229 	/* If the domain hasn't been started, there's no traffic to drain */
2230 	if (!domain->started)
2231 		return 0;
2232 
2233 	DLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter) {
2234 		if (toggle_port)
2235 			dlb_ldb_port_cq_disable(hw, port);
2236 
2237 		ret = dlb_drain_ldb_cq(hw, port);
2238 		if (ret < 0)
2239 			return ret;
2240 
2241 		if (toggle_port)
2242 			dlb_ldb_port_cq_enable(hw, port);
2243 	}
2244 
2245 	return 0;
2246 }
2247 
dlb_domain_disable_ldb_queue_write_perms(struct dlb_hw * hw,struct dlb_domain * domain)2248 static void dlb_domain_disable_ldb_queue_write_perms(struct dlb_hw *hw,
2249 						     struct dlb_domain *domain)
2250 {
2251 	int domain_offset = domain->id * DLB_MAX_NUM_LDB_QUEUES;
2252 	struct dlb_list_entry *iter;
2253 	RTE_SET_USED(iter);
2254 	union dlb_sys_ldb_vasqid_v r0;
2255 	struct dlb_ldb_queue *queue;
2256 
2257 	r0.field.vasqid_v = 0;
2258 
2259 	DLB_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
2260 		int idx = domain_offset + queue->id;
2261 
2262 		DLB_CSR_WR(hw, DLB_SYS_LDB_VASQID_V(idx), r0.val);
2263 	}
2264 }
2265 
dlb_domain_disable_ldb_seq_checks(struct dlb_hw * hw,struct dlb_domain * domain)2266 static void dlb_domain_disable_ldb_seq_checks(struct dlb_hw *hw,
2267 					      struct dlb_domain *domain)
2268 {
2269 	struct dlb_list_entry *iter;
2270 	RTE_SET_USED(iter);
2271 	union dlb_chp_sn_chk_enbl r1;
2272 	struct dlb_ldb_port *port;
2273 
2274 	r1.field.en = 0;
2275 
2276 	DLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter)
2277 		DLB_CSR_WR(hw,
2278 			   DLB_CHP_SN_CHK_ENBL(port->id),
2279 			   r1.val);
2280 }
2281 
dlb_domain_disable_ldb_port_crd_updates(struct dlb_hw * hw,struct dlb_domain * domain)2282 static void dlb_domain_disable_ldb_port_crd_updates(struct dlb_hw *hw,
2283 						    struct dlb_domain *domain)
2284 {
2285 	struct dlb_list_entry *iter;
2286 	RTE_SET_USED(iter);
2287 	union dlb_chp_ldb_pp_crd_req_state r0;
2288 	struct dlb_ldb_port *port;
2289 
2290 	r0.field.no_pp_credit_update = 1;
2291 
2292 	DLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter)
2293 		DLB_CSR_WR(hw,
2294 			   DLB_CHP_LDB_PP_CRD_REQ_STATE(port->id),
2295 			   r0.val);
2296 }
2297 
dlb_domain_disable_ldb_port_interrupts(struct dlb_hw * hw,struct dlb_domain * domain)2298 static void dlb_domain_disable_ldb_port_interrupts(struct dlb_hw *hw,
2299 						   struct dlb_domain *domain)
2300 {
2301 	struct dlb_list_entry *iter;
2302 	RTE_SET_USED(iter);
2303 	union dlb_chp_ldb_cq_int_enb r0 = { {0} };
2304 	union dlb_chp_ldb_cq_wd_enb r1 = { {0} };
2305 	struct dlb_ldb_port *port;
2306 
2307 	r0.field.en_tim = 0;
2308 	r0.field.en_depth = 0;
2309 
2310 	r1.field.wd_enable = 0;
2311 
2312 	DLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter) {
2313 		DLB_CSR_WR(hw,
2314 			   DLB_CHP_LDB_CQ_INT_ENB(port->id),
2315 			   r0.val);
2316 
2317 		DLB_CSR_WR(hw,
2318 			   DLB_CHP_LDB_CQ_WD_ENB(port->id),
2319 			   r1.val);
2320 	}
2321 }
2322 
dlb_domain_disable_dir_queue_write_perms(struct dlb_hw * hw,struct dlb_domain * domain)2323 static void dlb_domain_disable_dir_queue_write_perms(struct dlb_hw *hw,
2324 						     struct dlb_domain *domain)
2325 {
2326 	int domain_offset = domain->id * DLB_MAX_NUM_DIR_PORTS;
2327 	struct dlb_list_entry *iter;
2328 	RTE_SET_USED(iter);
2329 	union dlb_sys_dir_vasqid_v r0;
2330 	struct dlb_dir_pq_pair *port;
2331 
2332 	r0.field.vasqid_v = 0;
2333 
2334 	DLB_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
2335 		int idx = domain_offset + port->id;
2336 
2337 		DLB_CSR_WR(hw, DLB_SYS_DIR_VASQID_V(idx), r0.val);
2338 	}
2339 }
2340 
dlb_domain_disable_dir_port_interrupts(struct dlb_hw * hw,struct dlb_domain * domain)2341 static void dlb_domain_disable_dir_port_interrupts(struct dlb_hw *hw,
2342 						   struct dlb_domain *domain)
2343 {
2344 	struct dlb_list_entry *iter;
2345 	RTE_SET_USED(iter);
2346 	union dlb_chp_dir_cq_int_enb r0 = { {0} };
2347 	union dlb_chp_dir_cq_wd_enb r1 = { {0} };
2348 	struct dlb_dir_pq_pair *port;
2349 
2350 	r0.field.en_tim = 0;
2351 	r0.field.en_depth = 0;
2352 
2353 	r1.field.wd_enable = 0;
2354 
2355 	DLB_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
2356 		DLB_CSR_WR(hw,
2357 			   DLB_CHP_DIR_CQ_INT_ENB(port->id),
2358 			   r0.val);
2359 
2360 		DLB_CSR_WR(hw,
2361 			   DLB_CHP_DIR_CQ_WD_ENB(port->id),
2362 			   r1.val);
2363 	}
2364 }
2365 
dlb_domain_disable_dir_port_crd_updates(struct dlb_hw * hw,struct dlb_domain * domain)2366 static void dlb_domain_disable_dir_port_crd_updates(struct dlb_hw *hw,
2367 						    struct dlb_domain *domain)
2368 {
2369 	struct dlb_list_entry *iter;
2370 	RTE_SET_USED(iter);
2371 	union dlb_chp_dir_pp_crd_req_state r0;
2372 	struct dlb_dir_pq_pair *port;
2373 
2374 	r0.field.no_pp_credit_update = 1;
2375 
2376 	DLB_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter)
2377 		DLB_CSR_WR(hw,
2378 			   DLB_CHP_DIR_PP_CRD_REQ_STATE(port->id),
2379 			   r0.val);
2380 }
2381 
dlb_domain_disable_dir_cqs(struct dlb_hw * hw,struct dlb_domain * domain)2382 static void dlb_domain_disable_dir_cqs(struct dlb_hw *hw,
2383 				       struct dlb_domain *domain)
2384 {
2385 	struct dlb_list_entry *iter;
2386 	RTE_SET_USED(iter);
2387 	struct dlb_dir_pq_pair *port;
2388 
2389 	DLB_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
2390 		port->enabled = false;
2391 
2392 		dlb_dir_port_cq_disable(hw, port);
2393 	}
2394 }
2395 
dlb_domain_disable_ldb_cqs(struct dlb_hw * hw,struct dlb_domain * domain)2396 static void dlb_domain_disable_ldb_cqs(struct dlb_hw *hw,
2397 				       struct dlb_domain *domain)
2398 {
2399 	struct dlb_list_entry *iter;
2400 	RTE_SET_USED(iter);
2401 	struct dlb_ldb_port *port;
2402 
2403 	DLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter) {
2404 		port->enabled = false;
2405 
2406 		dlb_ldb_port_cq_disable(hw, port);
2407 	}
2408 }
2409 
dlb_domain_enable_ldb_cqs(struct dlb_hw * hw,struct dlb_domain * domain)2410 static void dlb_domain_enable_ldb_cqs(struct dlb_hw *hw,
2411 				      struct dlb_domain *domain)
2412 {
2413 	struct dlb_list_entry *iter;
2414 	RTE_SET_USED(iter);
2415 	struct dlb_ldb_port *port;
2416 
2417 	DLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter) {
2418 		port->enabled = true;
2419 
2420 		dlb_ldb_port_cq_enable(hw, port);
2421 	}
2422 }
2423 
dlb_get_ldb_queue_from_id(struct dlb_hw * hw,u32 id)2424 static struct dlb_ldb_queue *dlb_get_ldb_queue_from_id(struct dlb_hw *hw,
2425 						       u32 id)
2426 {
2427 	if (id >= DLB_MAX_NUM_LDB_QUEUES)
2428 		return NULL;
2429 
2430 	return &hw->rsrcs.ldb_queues[id];
2431 }
2432 
dlb_ldb_port_clear_has_work_bits(struct dlb_hw * hw,struct dlb_ldb_port * port,u8 slot)2433 static void dlb_ldb_port_clear_has_work_bits(struct dlb_hw *hw,
2434 					     struct dlb_ldb_port *port,
2435 					     u8 slot)
2436 {
2437 	union dlb_lsp_ldb_sched_ctrl r2 = { {0} };
2438 
2439 	r2.field.cq = port->id;
2440 	r2.field.qidix = slot;
2441 	r2.field.value = 0;
2442 	r2.field.rlist_haswork_v = 1;
2443 
2444 	DLB_CSR_WR(hw, DLB_LSP_LDB_SCHED_CTRL, r2.val);
2445 
2446 	memset(&r2, 0, sizeof(r2));
2447 
2448 	r2.field.cq = port->id;
2449 	r2.field.qidix = slot;
2450 	r2.field.value = 0;
2451 	r2.field.nalb_haswork_v = 1;
2452 
2453 	DLB_CSR_WR(hw, DLB_LSP_LDB_SCHED_CTRL, r2.val);
2454 
2455 	dlb_flush_csr(hw);
2456 }
2457 
dlb_domain_finish_map_port(struct dlb_hw * hw,struct dlb_domain * domain,struct dlb_ldb_port * port)2458 static void dlb_domain_finish_map_port(struct dlb_hw *hw,
2459 				       struct dlb_domain *domain,
2460 				       struct dlb_ldb_port *port)
2461 {
2462 	int i;
2463 
2464 	for (i = 0; i < DLB_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
2465 		union dlb_lsp_qid_ldb_infl_cnt r0;
2466 		struct dlb_ldb_queue *queue;
2467 		int qid;
2468 
2469 		if (port->qid_map[i].state != DLB_QUEUE_MAP_IN_PROGRESS)
2470 			continue;
2471 
2472 		qid = port->qid_map[i].qid;
2473 
2474 		queue = dlb_get_ldb_queue_from_id(hw, qid);
2475 
2476 		if (queue == NULL) {
2477 			DLB_HW_ERR(hw,
2478 				   "[%s()] Internal error: unable to find queue %d\n",
2479 				   __func__, qid);
2480 			continue;
2481 		}
2482 
2483 		r0.val = DLB_CSR_RD(hw, DLB_LSP_QID_LDB_INFL_CNT(qid));
2484 
2485 		if (r0.field.count)
2486 			continue;
2487 
2488 		/* Disable the affected CQ, and the CQs already mapped to the
2489 		 * QID, before reading the QID's inflight count a second time.
2490 		 * There is an unlikely race in which the QID may schedule one
2491 		 * more QE after we read an inflight count of 0, and disabling
2492 		 * the CQs guarantees that the race will not occur after a
2493 		 * re-read of the inflight count register.
2494 		 */
2495 		if (port->enabled)
2496 			dlb_ldb_port_cq_disable(hw, port);
2497 
2498 		dlb_ldb_queue_disable_mapped_cqs(hw, domain, queue);
2499 
2500 		r0.val = DLB_CSR_RD(hw, DLB_LSP_QID_LDB_INFL_CNT(qid));
2501 
2502 		if (r0.field.count) {
2503 			if (port->enabled)
2504 				dlb_ldb_port_cq_enable(hw, port);
2505 
2506 			dlb_ldb_queue_enable_mapped_cqs(hw, domain, queue);
2507 
2508 			continue;
2509 		}
2510 
2511 		dlb_ldb_port_finish_map_qid_dynamic(hw, domain, port, queue);
2512 	}
2513 }
2514 
2515 static unsigned int
dlb_domain_finish_map_qid_procedures(struct dlb_hw * hw,struct dlb_domain * domain)2516 dlb_domain_finish_map_qid_procedures(struct dlb_hw *hw,
2517 				     struct dlb_domain *domain)
2518 {
2519 	struct dlb_list_entry *iter;
2520 	RTE_SET_USED(iter);
2521 	struct dlb_ldb_port *port;
2522 
2523 	if (!domain->configured || domain->num_pending_additions == 0)
2524 		return 0;
2525 
2526 	DLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter)
2527 		dlb_domain_finish_map_port(hw, domain, port);
2528 
2529 	return domain->num_pending_additions;
2530 }
2531 
dlb_finish_map_qid_procedures(struct dlb_hw * hw)2532 unsigned int dlb_finish_map_qid_procedures(struct dlb_hw *hw)
2533 {
2534 	int i, num = 0;
2535 
2536 	/* Finish queue map jobs for any domain that needs it */
2537 	for (i = 0; i < DLB_MAX_NUM_DOMAINS; i++) {
2538 		struct dlb_domain *domain = &hw->domains[i];
2539 
2540 		num += dlb_domain_finish_map_qid_procedures(hw, domain);
2541 	}
2542 
2543 	return num;
2544 }
2545 
2546 
dlb_domain_wait_for_ldb_cqs_to_empty(struct dlb_hw * hw,struct dlb_domain * domain)2547 static int dlb_domain_wait_for_ldb_cqs_to_empty(struct dlb_hw *hw,
2548 						struct dlb_domain *domain)
2549 {
2550 	struct dlb_list_entry *iter;
2551 	RTE_SET_USED(iter);
2552 	struct dlb_ldb_port *port;
2553 
2554 	DLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter) {
2555 		int i;
2556 
2557 		for (i = 0; i < DLB_MAX_CQ_COMP_CHECK_LOOPS; i++) {
2558 			if (dlb_ldb_cq_inflight_count(hw, port) == 0)
2559 				break;
2560 		}
2561 
2562 		if (i == DLB_MAX_CQ_COMP_CHECK_LOOPS) {
2563 			DLB_HW_ERR(hw,
2564 				   "[%s()] Internal error: failed to flush load-balanced port %d's completions.\n",
2565 				   __func__, port->id);
2566 			return -EFAULT;
2567 		}
2568 	}
2569 
2570 	return 0;
2571 }
2572 
2573 
dlb_domain_finish_unmap_port_slot(struct dlb_hw * hw,struct dlb_domain * domain,struct dlb_ldb_port * port,int slot)2574 static void dlb_domain_finish_unmap_port_slot(struct dlb_hw *hw,
2575 					      struct dlb_domain *domain,
2576 					      struct dlb_ldb_port *port,
2577 					      int slot)
2578 {
2579 	enum dlb_qid_map_state state;
2580 	struct dlb_ldb_queue *queue;
2581 
2582 	queue = &hw->rsrcs.ldb_queues[port->qid_map[slot].qid];
2583 
2584 	state = port->qid_map[slot].state;
2585 
2586 	/* Update the QID2CQIDX and CQ2QID vectors */
2587 	dlb_ldb_port_unmap_qid(hw, port, queue);
2588 
2589 	/* Ensure the QID will not be serviced by this {CQ, slot} by clearing
2590 	 * the has_work bits
2591 	 */
2592 	dlb_ldb_port_clear_has_work_bits(hw, port, slot);
2593 
2594 	/* Reset the {CQ, slot} to its default state */
2595 	dlb_ldb_port_set_queue_if_status(hw, port, slot);
2596 
2597 	/* Re-enable the CQ if it was not manually disabled by the user */
2598 	if (port->enabled)
2599 		dlb_ldb_port_cq_enable(hw, port);
2600 
2601 	/* If there is a mapping that is pending this slot's removal, perform
2602 	 * the mapping now.
2603 	 */
2604 	if (state == DLB_QUEUE_UNMAP_IN_PROGRESS_PENDING_MAP) {
2605 		struct dlb_ldb_port_qid_map *map;
2606 		struct dlb_ldb_queue *map_queue;
2607 		u8 prio;
2608 
2609 		map = &port->qid_map[slot];
2610 
2611 		map->qid = map->pending_qid;
2612 		map->priority = map->pending_priority;
2613 
2614 		map_queue = &hw->rsrcs.ldb_queues[map->qid];
2615 		prio = map->priority;
2616 
2617 		dlb_ldb_port_map_qid(hw, domain, port, map_queue, prio);
2618 	}
2619 }
2620 
dlb_domain_finish_unmap_port(struct dlb_hw * hw,struct dlb_domain * domain,struct dlb_ldb_port * port)2621 static bool dlb_domain_finish_unmap_port(struct dlb_hw *hw,
2622 					 struct dlb_domain *domain,
2623 					 struct dlb_ldb_port *port)
2624 {
2625 	union dlb_lsp_cq_ldb_infl_cnt r0;
2626 	int i;
2627 
2628 	if (port->num_pending_removals == 0)
2629 		return false;
2630 
2631 	/* The unmap requires all the CQ's outstanding inflights to be
2632 	 * completed.
2633 	 */
2634 	r0.val = DLB_CSR_RD(hw, DLB_LSP_CQ_LDB_INFL_CNT(port->id));
2635 	if (r0.field.count > 0)
2636 		return false;
2637 
2638 	for (i = 0; i < DLB_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
2639 		struct dlb_ldb_port_qid_map *map;
2640 
2641 		map = &port->qid_map[i];
2642 
2643 		if (map->state != DLB_QUEUE_UNMAP_IN_PROGRESS &&
2644 		    map->state != DLB_QUEUE_UNMAP_IN_PROGRESS_PENDING_MAP)
2645 			continue;
2646 
2647 		dlb_domain_finish_unmap_port_slot(hw, domain, port, i);
2648 	}
2649 
2650 	return true;
2651 }
2652 
2653 static unsigned int
dlb_domain_finish_unmap_qid_procedures(struct dlb_hw * hw,struct dlb_domain * domain)2654 dlb_domain_finish_unmap_qid_procedures(struct dlb_hw *hw,
2655 				       struct dlb_domain *domain)
2656 {
2657 	struct dlb_list_entry *iter;
2658 	RTE_SET_USED(iter);
2659 	struct dlb_ldb_port *port;
2660 
2661 	if (!domain->configured || domain->num_pending_removals == 0)
2662 		return 0;
2663 
2664 	DLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter)
2665 		dlb_domain_finish_unmap_port(hw, domain, port);
2666 
2667 	return domain->num_pending_removals;
2668 }
2669 
dlb_finish_unmap_qid_procedures(struct dlb_hw * hw)2670 unsigned int dlb_finish_unmap_qid_procedures(struct dlb_hw *hw)
2671 {
2672 	int i, num = 0;
2673 
2674 	/* Finish queue unmap jobs for any domain that needs it */
2675 	for (i = 0; i < DLB_MAX_NUM_DOMAINS; i++) {
2676 		struct dlb_domain *domain = &hw->domains[i];
2677 
2678 		num += dlb_domain_finish_unmap_qid_procedures(hw, domain);
2679 	}
2680 
2681 	return num;
2682 }
2683 
2684 /* Returns whether the queue is empty, including its inflight and replay
2685  * counts.
2686  */
dlb_ldb_queue_is_empty(struct dlb_hw * hw,struct dlb_ldb_queue * queue)2687 static bool dlb_ldb_queue_is_empty(struct dlb_hw *hw,
2688 				   struct dlb_ldb_queue *queue)
2689 {
2690 	union dlb_lsp_qid_ldb_replay_cnt r0;
2691 	union dlb_lsp_qid_aqed_active_cnt r1;
2692 	union dlb_lsp_qid_atq_enqueue_cnt r2;
2693 	union dlb_lsp_qid_ldb_enqueue_cnt r3;
2694 	union dlb_lsp_qid_ldb_infl_cnt r4;
2695 
2696 	r0.val = DLB_CSR_RD(hw,
2697 			    DLB_LSP_QID_LDB_REPLAY_CNT(queue->id));
2698 	if (r0.val)
2699 		return false;
2700 
2701 	r1.val = DLB_CSR_RD(hw,
2702 			    DLB_LSP_QID_AQED_ACTIVE_CNT(queue->id));
2703 	if (r1.val)
2704 		return false;
2705 
2706 	r2.val = DLB_CSR_RD(hw,
2707 			    DLB_LSP_QID_ATQ_ENQUEUE_CNT(queue->id));
2708 	if (r2.val)
2709 		return false;
2710 
2711 	r3.val = DLB_CSR_RD(hw,
2712 			    DLB_LSP_QID_LDB_ENQUEUE_CNT(queue->id));
2713 	if (r3.val)
2714 		return false;
2715 
2716 	r4.val = DLB_CSR_RD(hw,
2717 			    DLB_LSP_QID_LDB_INFL_CNT(queue->id));
2718 	if (r4.val)
2719 		return false;
2720 
2721 	return true;
2722 }
2723 
dlb_domain_mapped_queues_empty(struct dlb_hw * hw,struct dlb_domain * domain)2724 static bool dlb_domain_mapped_queues_empty(struct dlb_hw *hw,
2725 					   struct dlb_domain *domain)
2726 {
2727 	struct dlb_list_entry *iter;
2728 	RTE_SET_USED(iter);
2729 	struct dlb_ldb_queue *queue;
2730 
2731 	DLB_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
2732 		if (queue->num_mappings == 0)
2733 			continue;
2734 
2735 		if (!dlb_ldb_queue_is_empty(hw, queue))
2736 			return false;
2737 	}
2738 
2739 	return true;
2740 }
2741 
dlb_domain_drain_mapped_queues(struct dlb_hw * hw,struct dlb_domain * domain)2742 static int dlb_domain_drain_mapped_queues(struct dlb_hw *hw,
2743 					  struct dlb_domain *domain)
2744 {
2745 	int i, ret;
2746 
2747 	/* If the domain hasn't been started, there's no traffic to drain */
2748 	if (!domain->started)
2749 		return 0;
2750 
2751 	if (domain->num_pending_removals > 0) {
2752 		DLB_HW_ERR(hw,
2753 			   "[%s()] Internal error: failed to unmap domain queues\n",
2754 			   __func__);
2755 		return -EFAULT;
2756 	}
2757 
2758 	for (i = 0; i < DLB_MAX_QID_EMPTY_CHECK_LOOPS; i++) {
2759 		ret = dlb_domain_drain_ldb_cqs(hw, domain, true);
2760 		if (ret < 0)
2761 			return ret;
2762 
2763 		if (dlb_domain_mapped_queues_empty(hw, domain))
2764 			break;
2765 	}
2766 
2767 	if (i == DLB_MAX_QID_EMPTY_CHECK_LOOPS) {
2768 		DLB_HW_ERR(hw,
2769 			   "[%s()] Internal error: failed to empty queues\n",
2770 			   __func__);
2771 		return -EFAULT;
2772 	}
2773 
2774 	/* Drain the CQs one more time. For the queues to go empty, they would
2775 	 * have scheduled one or more QEs.
2776 	 */
2777 	ret = dlb_domain_drain_ldb_cqs(hw, domain, true);
2778 	if (ret < 0)
2779 		return ret;
2780 
2781 	return 0;
2782 }
2783 
dlb_domain_drain_unmapped_queue(struct dlb_hw * hw,struct dlb_domain * domain,struct dlb_ldb_queue * queue)2784 static int dlb_domain_drain_unmapped_queue(struct dlb_hw *hw,
2785 					   struct dlb_domain *domain,
2786 					   struct dlb_ldb_queue *queue)
2787 {
2788 	struct dlb_ldb_port *port;
2789 	int ret;
2790 
2791 	/* If a domain has LDB queues, it must have LDB ports */
2792 	if (dlb_list_empty(&domain->used_ldb_ports)) {
2793 		DLB_HW_ERR(hw,
2794 			   "[%s()] Internal error: No configured LDB ports\n",
2795 			   __func__);
2796 		return -EFAULT;
2797 	}
2798 
2799 	port = DLB_DOM_LIST_HEAD(domain->used_ldb_ports, typeof(*port));
2800 
2801 	/* If necessary, free up a QID slot in this CQ */
2802 	if (port->num_mappings == DLB_MAX_NUM_QIDS_PER_LDB_CQ) {
2803 		struct dlb_ldb_queue *mapped_queue;
2804 
2805 		mapped_queue = &hw->rsrcs.ldb_queues[port->qid_map[0].qid];
2806 
2807 		ret = dlb_ldb_port_unmap_qid(hw, port, mapped_queue);
2808 		if (ret)
2809 			return ret;
2810 	}
2811 
2812 	ret = dlb_ldb_port_map_qid_dynamic(hw, port, queue, 0);
2813 	if (ret)
2814 		return ret;
2815 
2816 	return dlb_domain_drain_mapped_queues(hw, domain);
2817 }
2818 
dlb_domain_drain_unmapped_queues(struct dlb_hw * hw,struct dlb_domain * domain)2819 static int dlb_domain_drain_unmapped_queues(struct dlb_hw *hw,
2820 					    struct dlb_domain *domain)
2821 {
2822 	struct dlb_list_entry *iter;
2823 	RTE_SET_USED(iter);
2824 	struct dlb_ldb_queue *queue;
2825 	int ret;
2826 
2827 	/* If the domain hasn't been started, there's no traffic to drain */
2828 	if (!domain->started)
2829 		return 0;
2830 
2831 	DLB_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
2832 		if (queue->num_mappings != 0 ||
2833 		    dlb_ldb_queue_is_empty(hw, queue))
2834 			continue;
2835 
2836 		ret = dlb_domain_drain_unmapped_queue(hw, domain, queue);
2837 		if (ret)
2838 			return ret;
2839 	}
2840 
2841 	return 0;
2842 }
2843 
dlb_domain_wait_for_ldb_pool_refill(struct dlb_hw * hw,struct dlb_domain * domain)2844 static int dlb_domain_wait_for_ldb_pool_refill(struct dlb_hw *hw,
2845 					       struct dlb_domain *domain)
2846 {
2847 	struct dlb_list_entry *iter;
2848 	RTE_SET_USED(iter);
2849 	struct dlb_credit_pool *pool;
2850 
2851 	/* Confirm that all credits are returned to the domain's credit pools */
2852 	DLB_DOM_LIST_FOR(domain->used_ldb_credit_pools, pool, iter) {
2853 		union dlb_chp_qed_fl_push_ptr r0;
2854 		union dlb_chp_qed_fl_pop_ptr r1;
2855 		unsigned long pop_offs, push_offs;
2856 		int i;
2857 
2858 		push_offs = DLB_CHP_QED_FL_PUSH_PTR(pool->id);
2859 		pop_offs = DLB_CHP_QED_FL_POP_PTR(pool->id);
2860 
2861 		for (i = 0; i < DLB_MAX_QID_EMPTY_CHECK_LOOPS; i++) {
2862 			r0.val = DLB_CSR_RD(hw, push_offs);
2863 
2864 			r1.val = DLB_CSR_RD(hw, pop_offs);
2865 
2866 			/* Break early if the freelist is replenished */
2867 			if (r1.field.pop_ptr == r0.field.push_ptr &&
2868 			    r1.field.generation != r0.field.generation) {
2869 				break;
2870 			}
2871 		}
2872 
2873 		/* Error if the freelist is not full */
2874 		if (r1.field.pop_ptr != r0.field.push_ptr ||
2875 		    r1.field.generation == r0.field.generation) {
2876 			return -EFAULT;
2877 		}
2878 	}
2879 
2880 	return 0;
2881 }
2882 
dlb_domain_wait_for_dir_pool_refill(struct dlb_hw * hw,struct dlb_domain * domain)2883 static int dlb_domain_wait_for_dir_pool_refill(struct dlb_hw *hw,
2884 					       struct dlb_domain *domain)
2885 {
2886 	struct dlb_list_entry *iter;
2887 	RTE_SET_USED(iter);
2888 	struct dlb_credit_pool *pool;
2889 
2890 	/* Confirm that all credits are returned to the domain's credit pools */
2891 	DLB_DOM_LIST_FOR(domain->used_dir_credit_pools, pool, iter) {
2892 		union dlb_chp_dqed_fl_push_ptr r0;
2893 		union dlb_chp_dqed_fl_pop_ptr r1;
2894 		unsigned long pop_offs, push_offs;
2895 		int i;
2896 
2897 		push_offs = DLB_CHP_DQED_FL_PUSH_PTR(pool->id);
2898 		pop_offs = DLB_CHP_DQED_FL_POP_PTR(pool->id);
2899 
2900 		for (i = 0; i < DLB_MAX_QID_EMPTY_CHECK_LOOPS; i++) {
2901 			r0.val = DLB_CSR_RD(hw, push_offs);
2902 
2903 			r1.val = DLB_CSR_RD(hw, pop_offs);
2904 
2905 			/* Break early if the freelist is replenished */
2906 			if (r1.field.pop_ptr == r0.field.push_ptr &&
2907 			    r1.field.generation != r0.field.generation) {
2908 				break;
2909 			}
2910 		}
2911 
2912 		/* Error if the freelist is not full */
2913 		if (r1.field.pop_ptr != r0.field.push_ptr ||
2914 		    r1.field.generation == r0.field.generation) {
2915 			return -EFAULT;
2916 		}
2917 	}
2918 
2919 	return 0;
2920 }
2921 
dlb_dir_queue_depth(struct dlb_hw * hw,struct dlb_dir_pq_pair * queue)2922 static u32 dlb_dir_queue_depth(struct dlb_hw *hw,
2923 			       struct dlb_dir_pq_pair *queue)
2924 {
2925 	union dlb_lsp_qid_dir_enqueue_cnt r0;
2926 
2927 	r0.val = DLB_CSR_RD(hw, DLB_LSP_QID_DIR_ENQUEUE_CNT(queue->id));
2928 
2929 	return r0.field.count;
2930 }
2931 
dlb_dir_queue_is_empty(struct dlb_hw * hw,struct dlb_dir_pq_pair * queue)2932 static bool dlb_dir_queue_is_empty(struct dlb_hw *hw,
2933 				   struct dlb_dir_pq_pair *queue)
2934 {
2935 	return dlb_dir_queue_depth(hw, queue) == 0;
2936 }
2937 
dlb_domain_dir_queues_empty(struct dlb_hw * hw,struct dlb_domain * domain)2938 static bool dlb_domain_dir_queues_empty(struct dlb_hw *hw,
2939 					struct dlb_domain *domain)
2940 {
2941 	struct dlb_list_entry *iter;
2942 	RTE_SET_USED(iter);
2943 	struct dlb_dir_pq_pair *queue;
2944 
2945 	DLB_DOM_LIST_FOR(domain->used_dir_pq_pairs, queue, iter) {
2946 		if (!dlb_dir_queue_is_empty(hw, queue))
2947 			return false;
2948 	}
2949 
2950 	return true;
2951 }
2952 
dlb_dir_cq_token_count(struct dlb_hw * hw,struct dlb_dir_pq_pair * port)2953 static u32 dlb_dir_cq_token_count(struct dlb_hw *hw,
2954 				  struct dlb_dir_pq_pair *port)
2955 {
2956 	union dlb_lsp_cq_dir_tkn_cnt r0;
2957 
2958 	r0.val = DLB_CSR_RD(hw, DLB_LSP_CQ_DIR_TKN_CNT(port->id));
2959 
2960 	return r0.field.count;
2961 }
2962 
dlb_drain_dir_cq(struct dlb_hw * hw,struct dlb_dir_pq_pair * port)2963 static void dlb_drain_dir_cq(struct dlb_hw *hw, struct dlb_dir_pq_pair *port)
2964 {
2965 	unsigned int port_id = port->id;
2966 	u32 cnt;
2967 
2968 	/* Return any outstanding tokens */
2969 	cnt = dlb_dir_cq_token_count(hw, port);
2970 
2971 	if (cnt != 0) {
2972 		struct dlb_hcw hcw_mem[8], *hcw;
2973 		void  *pp_addr;
2974 
2975 		pp_addr = os_map_producer_port(hw, port_id, false);
2976 
2977 		/* Point hcw to a 64B-aligned location */
2978 		hcw = (struct dlb_hcw *)((uintptr_t)&hcw_mem[4] & ~0x3F);
2979 
2980 		/* Program the first HCW for a batch token return and
2981 		 * the rest as NOOPS
2982 		 */
2983 		memset(hcw, 0, 4 * sizeof(*hcw));
2984 		hcw->cq_token = 1;
2985 		hcw->lock_id = cnt - 1;
2986 
2987 		dlb_movdir64b(pp_addr, hcw);
2988 
2989 		os_fence_hcw(hw, pp_addr);
2990 
2991 		os_unmap_producer_port(hw, pp_addr);
2992 	}
2993 }
2994 
dlb_domain_drain_dir_cqs(struct dlb_hw * hw,struct dlb_domain * domain,bool toggle_port)2995 static int dlb_domain_drain_dir_cqs(struct dlb_hw *hw,
2996 				    struct dlb_domain *domain,
2997 				    bool toggle_port)
2998 {
2999 	struct dlb_list_entry *iter;
3000 	RTE_SET_USED(iter);
3001 	struct dlb_dir_pq_pair *port;
3002 
3003 	DLB_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
3004 		/* Can't drain a port if it's not configured, and there's
3005 		 * nothing to drain if its queue is unconfigured.
3006 		 */
3007 		if (!port->port_configured || !port->queue_configured)
3008 			continue;
3009 
3010 		if (toggle_port)
3011 			dlb_dir_port_cq_disable(hw, port);
3012 
3013 		dlb_drain_dir_cq(hw, port);
3014 
3015 		if (toggle_port)
3016 			dlb_dir_port_cq_enable(hw, port);
3017 	}
3018 
3019 	return 0;
3020 }
3021 
dlb_domain_drain_dir_queues(struct dlb_hw * hw,struct dlb_domain * domain)3022 static int dlb_domain_drain_dir_queues(struct dlb_hw *hw,
3023 				       struct dlb_domain *domain)
3024 {
3025 	int i;
3026 
3027 	/* If the domain hasn't been started, there's no traffic to drain */
3028 	if (!domain->started)
3029 		return 0;
3030 
3031 	for (i = 0; i < DLB_MAX_QID_EMPTY_CHECK_LOOPS; i++) {
3032 		dlb_domain_drain_dir_cqs(hw, domain, true);
3033 
3034 		if (dlb_domain_dir_queues_empty(hw, domain))
3035 			break;
3036 	}
3037 
3038 	if (i == DLB_MAX_QID_EMPTY_CHECK_LOOPS) {
3039 		DLB_HW_ERR(hw,
3040 			   "[%s()] Internal error: failed to empty queues\n",
3041 			   __func__);
3042 		return -EFAULT;
3043 	}
3044 
3045 	/* Drain the CQs one more time. For the queues to go empty, they would
3046 	 * have scheduled one or more QEs.
3047 	 */
3048 	dlb_domain_drain_dir_cqs(hw, domain, true);
3049 
3050 	return 0;
3051 }
3052 
dlb_domain_disable_dir_producer_ports(struct dlb_hw * hw,struct dlb_domain * domain)3053 static void dlb_domain_disable_dir_producer_ports(struct dlb_hw *hw,
3054 						  struct dlb_domain *domain)
3055 {
3056 	struct dlb_list_entry *iter;
3057 	RTE_SET_USED(iter);
3058 	struct dlb_dir_pq_pair *port;
3059 	union dlb_sys_dir_pp_v r1;
3060 
3061 	r1.field.pp_v = 0;
3062 
3063 	DLB_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter)
3064 		DLB_CSR_WR(hw,
3065 			   DLB_SYS_DIR_PP_V(port->id),
3066 			   r1.val);
3067 }
3068 
dlb_domain_disable_ldb_producer_ports(struct dlb_hw * hw,struct dlb_domain * domain)3069 static void dlb_domain_disable_ldb_producer_ports(struct dlb_hw *hw,
3070 						  struct dlb_domain *domain)
3071 {
3072 	struct dlb_list_entry *iter;
3073 	RTE_SET_USED(iter);
3074 	union dlb_sys_ldb_pp_v r1;
3075 	struct dlb_ldb_port *port;
3076 
3077 	r1.field.pp_v = 0;
3078 
3079 	DLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter) {
3080 		DLB_CSR_WR(hw,
3081 			   DLB_SYS_LDB_PP_V(port->id),
3082 			   r1.val);
3083 
3084 		hw->pf.num_enabled_ldb_ports--;
3085 	}
3086 }
3087 
dlb_domain_disable_dir_pools(struct dlb_hw * hw,struct dlb_domain * domain)3088 static void dlb_domain_disable_dir_pools(struct dlb_hw *hw,
3089 					 struct dlb_domain *domain)
3090 {
3091 	struct dlb_list_entry *iter;
3092 	RTE_SET_USED(iter);
3093 	union dlb_sys_dir_pool_enbld r0 = { {0} };
3094 	struct dlb_credit_pool *pool;
3095 
3096 	DLB_DOM_LIST_FOR(domain->used_dir_credit_pools, pool, iter)
3097 		DLB_CSR_WR(hw,
3098 			   DLB_SYS_DIR_POOL_ENBLD(pool->id),
3099 			   r0.val);
3100 }
3101 
dlb_domain_disable_ldb_pools(struct dlb_hw * hw,struct dlb_domain * domain)3102 static void dlb_domain_disable_ldb_pools(struct dlb_hw *hw,
3103 					 struct dlb_domain *domain)
3104 {
3105 	struct dlb_list_entry *iter;
3106 	RTE_SET_USED(iter);
3107 	union dlb_sys_ldb_pool_enbld r0 = { {0} };
3108 	struct dlb_credit_pool *pool;
3109 
3110 	DLB_DOM_LIST_FOR(domain->used_ldb_credit_pools, pool, iter)
3111 		DLB_CSR_WR(hw,
3112 			   DLB_SYS_LDB_POOL_ENBLD(pool->id),
3113 			   r0.val);
3114 }
3115 
dlb_reset_hw_resource(struct dlb_hw * hw,int type,int id)3116 static int dlb_reset_hw_resource(struct dlb_hw *hw, int type, int id)
3117 {
3118 	union dlb_cfg_mstr_diag_reset_sts r0 = { {0} };
3119 	union dlb_cfg_mstr_bcast_reset_vf_start r1 = { {0} };
3120 	int i;
3121 
3122 	r1.field.vf_reset_start = 1;
3123 
3124 	r1.field.vf_reset_type = type;
3125 	r1.field.vf_reset_id = id;
3126 
3127 	DLB_CSR_WR(hw, DLB_CFG_MSTR_BCAST_RESET_VF_START, r1.val);
3128 
3129 	/* Wait for hardware to complete. This is a finite time operation,
3130 	 * but wait set a loop bound just in case.
3131 	 */
3132 	for (i = 0; i < 1024 * 1024; i++) {
3133 		r0.val = DLB_CSR_RD(hw, DLB_CFG_MSTR_DIAG_RESET_STS);
3134 
3135 		if (r0.field.chp_vf_reset_done &&
3136 		    r0.field.rop_vf_reset_done &&
3137 		    r0.field.lsp_vf_reset_done &&
3138 		    r0.field.nalb_vf_reset_done &&
3139 		    r0.field.ap_vf_reset_done &&
3140 		    r0.field.dp_vf_reset_done &&
3141 		    r0.field.qed_vf_reset_done &&
3142 		    r0.field.dqed_vf_reset_done &&
3143 		    r0.field.aqed_vf_reset_done)
3144 			return 0;
3145 
3146 		os_udelay(1);
3147 	}
3148 
3149 	return -ETIMEDOUT;
3150 }
3151 
dlb_domain_reset_hw_resources(struct dlb_hw * hw,struct dlb_domain * domain)3152 static int dlb_domain_reset_hw_resources(struct dlb_hw *hw,
3153 					 struct dlb_domain *domain)
3154 {
3155 	struct dlb_list_entry *iter;
3156 	RTE_SET_USED(iter);
3157 	struct dlb_dir_pq_pair *dir_port;
3158 	struct dlb_ldb_queue *ldb_queue;
3159 	struct dlb_ldb_port *ldb_port;
3160 	struct dlb_credit_pool *pool;
3161 	int ret;
3162 
3163 	DLB_DOM_LIST_FOR(domain->used_ldb_credit_pools, pool, iter) {
3164 		ret = dlb_reset_hw_resource(hw,
3165 					    VF_RST_TYPE_POOL_LDB,
3166 					    pool->id);
3167 		if (ret)
3168 			return ret;
3169 	}
3170 
3171 	DLB_DOM_LIST_FOR(domain->used_dir_credit_pools, pool, iter) {
3172 		ret = dlb_reset_hw_resource(hw,
3173 					    VF_RST_TYPE_POOL_DIR,
3174 					    pool->id);
3175 		if (ret)
3176 			return ret;
3177 	}
3178 
3179 	DLB_DOM_LIST_FOR(domain->used_ldb_queues, ldb_queue, iter) {
3180 		ret = dlb_reset_hw_resource(hw,
3181 					    VF_RST_TYPE_QID_LDB,
3182 					    ldb_queue->id);
3183 		if (ret)
3184 			return ret;
3185 	}
3186 
3187 	DLB_DOM_LIST_FOR(domain->used_dir_pq_pairs, dir_port, iter) {
3188 		ret = dlb_reset_hw_resource(hw,
3189 					    VF_RST_TYPE_QID_DIR,
3190 					    dir_port->id);
3191 		if (ret)
3192 			return ret;
3193 	}
3194 
3195 	DLB_DOM_LIST_FOR(domain->used_ldb_ports, ldb_port, iter) {
3196 		ret = dlb_reset_hw_resource(hw,
3197 					    VF_RST_TYPE_CQ_LDB,
3198 					    ldb_port->id);
3199 		if (ret)
3200 			return ret;
3201 	}
3202 
3203 	DLB_DOM_LIST_FOR(domain->used_dir_pq_pairs, dir_port, iter) {
3204 		ret = dlb_reset_hw_resource(hw,
3205 					    VF_RST_TYPE_CQ_DIR,
3206 					    dir_port->id);
3207 		if (ret)
3208 			return ret;
3209 	}
3210 
3211 	return 0;
3212 }
3213 
dlb_domain_verify_reset_success(struct dlb_hw * hw,struct dlb_domain * domain)3214 static int dlb_domain_verify_reset_success(struct dlb_hw *hw,
3215 					   struct dlb_domain *domain)
3216 {
3217 	struct dlb_list_entry *iter;
3218 	RTE_SET_USED(iter);
3219 	struct dlb_dir_pq_pair *dir_port;
3220 	struct dlb_ldb_port *ldb_port;
3221 	struct dlb_credit_pool *pool;
3222 	struct dlb_ldb_queue *queue;
3223 
3224 	/* Confirm that all credits are returned to the domain's credit pools */
3225 	DLB_DOM_LIST_FOR(domain->used_dir_credit_pools, pool, iter) {
3226 		union dlb_chp_dqed_fl_pop_ptr r0;
3227 		union dlb_chp_dqed_fl_push_ptr r1;
3228 
3229 		r0.val = DLB_CSR_RD(hw,
3230 				    DLB_CHP_DQED_FL_POP_PTR(pool->id));
3231 
3232 		r1.val = DLB_CSR_RD(hw,
3233 				    DLB_CHP_DQED_FL_PUSH_PTR(pool->id));
3234 
3235 		if (r0.field.pop_ptr != r1.field.push_ptr ||
3236 		    r0.field.generation == r1.field.generation) {
3237 			DLB_HW_ERR(hw,
3238 				   "[%s()] Internal error: failed to refill directed pool %d's credits.\n",
3239 				   __func__, pool->id);
3240 			return -EFAULT;
3241 		}
3242 	}
3243 
3244 	/* Confirm that all the domain's queue's inflight counts and AQED
3245 	 * active counts are 0.
3246 	 */
3247 	DLB_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
3248 		if (!dlb_ldb_queue_is_empty(hw, queue)) {
3249 			DLB_HW_ERR(hw,
3250 				   "[%s()] Internal error: failed to empty ldb queue %d\n",
3251 				   __func__, queue->id);
3252 			return -EFAULT;
3253 		}
3254 	}
3255 
3256 	/* Confirm that all the domain's CQs inflight and token counts are 0. */
3257 	DLB_DOM_LIST_FOR(domain->used_ldb_ports, ldb_port, iter) {
3258 		if (dlb_ldb_cq_inflight_count(hw, ldb_port) ||
3259 		    dlb_ldb_cq_token_count(hw, ldb_port)) {
3260 			DLB_HW_ERR(hw,
3261 				   "[%s()] Internal error: failed to empty ldb port %d\n",
3262 				   __func__, ldb_port->id);
3263 			return -EFAULT;
3264 		}
3265 	}
3266 
3267 	DLB_DOM_LIST_FOR(domain->used_dir_pq_pairs, dir_port, iter) {
3268 		if (!dlb_dir_queue_is_empty(hw, dir_port)) {
3269 			DLB_HW_ERR(hw,
3270 				   "[%s()] Internal error: failed to empty dir queue %d\n",
3271 				   __func__, dir_port->id);
3272 			return -EFAULT;
3273 		}
3274 
3275 		if (dlb_dir_cq_token_count(hw, dir_port)) {
3276 			DLB_HW_ERR(hw,
3277 				   "[%s()] Internal error: failed to empty dir port %d\n",
3278 				   __func__, dir_port->id);
3279 			return -EFAULT;
3280 		}
3281 	}
3282 
3283 	return 0;
3284 }
3285 
__dlb_domain_reset_ldb_port_registers(struct dlb_hw * hw,struct dlb_ldb_port * port)3286 static void __dlb_domain_reset_ldb_port_registers(struct dlb_hw *hw,
3287 						  struct dlb_ldb_port *port)
3288 {
3289 	union dlb_chp_ldb_pp_state_reset r0 = { {0} };
3290 
3291 	DLB_CSR_WR(hw,
3292 		   DLB_CHP_LDB_PP_CRD_REQ_STATE(port->id),
3293 		   DLB_CHP_LDB_PP_CRD_REQ_STATE_RST);
3294 
3295 	/* Reset the port's load-balanced and directed credit state */
3296 	r0.field.dir_type = 0;
3297 	r0.field.reset_pp_state = 1;
3298 
3299 	DLB_CSR_WR(hw,
3300 		   DLB_CHP_LDB_PP_STATE_RESET(port->id),
3301 		   r0.val);
3302 
3303 	r0.field.dir_type = 1;
3304 	r0.field.reset_pp_state = 1;
3305 
3306 	DLB_CSR_WR(hw,
3307 		   DLB_CHP_LDB_PP_STATE_RESET(port->id),
3308 		   r0.val);
3309 
3310 	DLB_CSR_WR(hw,
3311 		   DLB_CHP_LDB_PP_DIR_PUSH_PTR(port->id),
3312 		   DLB_CHP_LDB_PP_DIR_PUSH_PTR_RST);
3313 
3314 	DLB_CSR_WR(hw,
3315 		   DLB_CHP_LDB_PP_LDB_PUSH_PTR(port->id),
3316 		   DLB_CHP_LDB_PP_LDB_PUSH_PTR_RST);
3317 
3318 	DLB_CSR_WR(hw,
3319 		   DLB_CHP_LDB_PP_LDB_MIN_CRD_QNT(port->id),
3320 		   DLB_CHP_LDB_PP_LDB_MIN_CRD_QNT_RST);
3321 
3322 	DLB_CSR_WR(hw,
3323 		   DLB_CHP_LDB_PP_LDB_CRD_LWM(port->id),
3324 		   DLB_CHP_LDB_PP_LDB_CRD_LWM_RST);
3325 
3326 	DLB_CSR_WR(hw,
3327 		   DLB_CHP_LDB_PP_LDB_CRD_HWM(port->id),
3328 		   DLB_CHP_LDB_PP_LDB_CRD_HWM_RST);
3329 
3330 	DLB_CSR_WR(hw,
3331 		   DLB_CHP_LDB_LDB_PP2POOL(port->id),
3332 		   DLB_CHP_LDB_LDB_PP2POOL_RST);
3333 
3334 	DLB_CSR_WR(hw,
3335 		   DLB_CHP_LDB_PP_DIR_MIN_CRD_QNT(port->id),
3336 		   DLB_CHP_LDB_PP_DIR_MIN_CRD_QNT_RST);
3337 
3338 	DLB_CSR_WR(hw,
3339 		   DLB_CHP_LDB_PP_DIR_CRD_LWM(port->id),
3340 		   DLB_CHP_LDB_PP_DIR_CRD_LWM_RST);
3341 
3342 	DLB_CSR_WR(hw,
3343 		   DLB_CHP_LDB_PP_DIR_CRD_HWM(port->id),
3344 		   DLB_CHP_LDB_PP_DIR_CRD_HWM_RST);
3345 
3346 	DLB_CSR_WR(hw,
3347 		   DLB_CHP_LDB_DIR_PP2POOL(port->id),
3348 		   DLB_CHP_LDB_DIR_PP2POOL_RST);
3349 
3350 	DLB_CSR_WR(hw,
3351 		   DLB_SYS_LDB_PP2LDBPOOL(port->id),
3352 		   DLB_SYS_LDB_PP2LDBPOOL_RST);
3353 
3354 	DLB_CSR_WR(hw,
3355 		   DLB_SYS_LDB_PP2DIRPOOL(port->id),
3356 		   DLB_SYS_LDB_PP2DIRPOOL_RST);
3357 
3358 	DLB_CSR_WR(hw,
3359 		   DLB_CHP_HIST_LIST_LIM(port->id),
3360 		   DLB_CHP_HIST_LIST_LIM_RST);
3361 
3362 	DLB_CSR_WR(hw,
3363 		   DLB_CHP_HIST_LIST_BASE(port->id),
3364 		   DLB_CHP_HIST_LIST_BASE_RST);
3365 
3366 	DLB_CSR_WR(hw,
3367 		   DLB_CHP_HIST_LIST_POP_PTR(port->id),
3368 		   DLB_CHP_HIST_LIST_POP_PTR_RST);
3369 
3370 	DLB_CSR_WR(hw,
3371 		   DLB_CHP_HIST_LIST_PUSH_PTR(port->id),
3372 		   DLB_CHP_HIST_LIST_PUSH_PTR_RST);
3373 
3374 	DLB_CSR_WR(hw,
3375 		   DLB_CHP_LDB_CQ_WPTR(port->id),
3376 		   DLB_CHP_LDB_CQ_WPTR_RST);
3377 
3378 	DLB_CSR_WR(hw,
3379 		   DLB_CHP_LDB_CQ_INT_DEPTH_THRSH(port->id),
3380 		   DLB_CHP_LDB_CQ_INT_DEPTH_THRSH_RST);
3381 
3382 	DLB_CSR_WR(hw,
3383 		   DLB_CHP_LDB_CQ_TMR_THRESHOLD(port->id),
3384 		   DLB_CHP_LDB_CQ_TMR_THRESHOLD_RST);
3385 
3386 	DLB_CSR_WR(hw,
3387 		   DLB_CHP_LDB_CQ_INT_ENB(port->id),
3388 		   DLB_CHP_LDB_CQ_INT_ENB_RST);
3389 
3390 	DLB_CSR_WR(hw,
3391 		   DLB_LSP_CQ_LDB_INFL_LIM(port->id),
3392 		   DLB_LSP_CQ_LDB_INFL_LIM_RST);
3393 
3394 	DLB_CSR_WR(hw,
3395 		   DLB_LSP_CQ2PRIOV(port->id),
3396 		   DLB_LSP_CQ2PRIOV_RST);
3397 
3398 	DLB_CSR_WR(hw,
3399 		   DLB_LSP_CQ_LDB_TOT_SCH_CNT_CTRL(port->id),
3400 		   DLB_LSP_CQ_LDB_TOT_SCH_CNT_CTRL_RST);
3401 
3402 	DLB_CSR_WR(hw,
3403 		   DLB_LSP_CQ_LDB_TKN_DEPTH_SEL(port->id),
3404 		   DLB_LSP_CQ_LDB_TKN_DEPTH_SEL_RST);
3405 
3406 	DLB_CSR_WR(hw,
3407 		   DLB_CHP_LDB_CQ_TKN_DEPTH_SEL(port->id),
3408 		   DLB_CHP_LDB_CQ_TKN_DEPTH_SEL_RST);
3409 
3410 	DLB_CSR_WR(hw,
3411 		   DLB_LSP_CQ_LDB_DSBL(port->id),
3412 		   DLB_LSP_CQ_LDB_DSBL_RST);
3413 
3414 	DLB_CSR_WR(hw,
3415 		   DLB_SYS_LDB_CQ2VF_PF(port->id),
3416 		   DLB_SYS_LDB_CQ2VF_PF_RST);
3417 
3418 	DLB_CSR_WR(hw,
3419 		   DLB_SYS_LDB_PP2VF_PF(port->id),
3420 		   DLB_SYS_LDB_PP2VF_PF_RST);
3421 
3422 	DLB_CSR_WR(hw,
3423 		   DLB_SYS_LDB_CQ_ADDR_L(port->id),
3424 		   DLB_SYS_LDB_CQ_ADDR_L_RST);
3425 
3426 	DLB_CSR_WR(hw,
3427 		   DLB_SYS_LDB_CQ_ADDR_U(port->id),
3428 		   DLB_SYS_LDB_CQ_ADDR_U_RST);
3429 
3430 	DLB_CSR_WR(hw,
3431 		   DLB_SYS_LDB_PP_ADDR_L(port->id),
3432 		   DLB_SYS_LDB_PP_ADDR_L_RST);
3433 
3434 	DLB_CSR_WR(hw,
3435 		   DLB_SYS_LDB_PP_ADDR_U(port->id),
3436 		   DLB_SYS_LDB_PP_ADDR_U_RST);
3437 
3438 	DLB_CSR_WR(hw,
3439 		   DLB_SYS_LDB_PP_V(port->id),
3440 		   DLB_SYS_LDB_PP_V_RST);
3441 
3442 	DLB_CSR_WR(hw,
3443 		   DLB_SYS_LDB_PP2VAS(port->id),
3444 		   DLB_SYS_LDB_PP2VAS_RST);
3445 
3446 	DLB_CSR_WR(hw,
3447 		   DLB_SYS_LDB_CQ_ISR(port->id),
3448 		   DLB_SYS_LDB_CQ_ISR_RST);
3449 
3450 	DLB_CSR_WR(hw,
3451 		   DLB_SYS_WBUF_LDB_FLAGS(port->id),
3452 		   DLB_SYS_WBUF_LDB_FLAGS_RST);
3453 }
3454 
__dlb_domain_reset_dir_port_registers(struct dlb_hw * hw,struct dlb_dir_pq_pair * port)3455 static void __dlb_domain_reset_dir_port_registers(struct dlb_hw *hw,
3456 						  struct dlb_dir_pq_pair *port)
3457 {
3458 	union dlb_chp_dir_pp_state_reset r0 = { {0} };
3459 
3460 	DLB_CSR_WR(hw,
3461 		   DLB_CHP_DIR_PP_CRD_REQ_STATE(port->id),
3462 		   DLB_CHP_DIR_PP_CRD_REQ_STATE_RST);
3463 
3464 	/* Reset the port's load-balanced and directed credit state */
3465 	r0.field.dir_type = 0;
3466 	r0.field.reset_pp_state = 1;
3467 
3468 	DLB_CSR_WR(hw,
3469 		   DLB_CHP_DIR_PP_STATE_RESET(port->id),
3470 		   r0.val);
3471 
3472 	r0.field.dir_type = 1;
3473 	r0.field.reset_pp_state = 1;
3474 
3475 	DLB_CSR_WR(hw,
3476 		   DLB_CHP_DIR_PP_STATE_RESET(port->id),
3477 		   r0.val);
3478 
3479 	DLB_CSR_WR(hw,
3480 		   DLB_CHP_DIR_PP_DIR_PUSH_PTR(port->id),
3481 		   DLB_CHP_DIR_PP_DIR_PUSH_PTR_RST);
3482 
3483 	DLB_CSR_WR(hw,
3484 		   DLB_CHP_DIR_PP_LDB_PUSH_PTR(port->id),
3485 		   DLB_CHP_DIR_PP_LDB_PUSH_PTR_RST);
3486 
3487 	DLB_CSR_WR(hw,
3488 		   DLB_CHP_DIR_PP_LDB_MIN_CRD_QNT(port->id),
3489 		   DLB_CHP_DIR_PP_LDB_MIN_CRD_QNT_RST);
3490 
3491 	DLB_CSR_WR(hw,
3492 		   DLB_CHP_DIR_PP_LDB_CRD_LWM(port->id),
3493 		   DLB_CHP_DIR_PP_LDB_CRD_LWM_RST);
3494 
3495 	DLB_CSR_WR(hw,
3496 		   DLB_CHP_DIR_PP_LDB_CRD_HWM(port->id),
3497 		   DLB_CHP_DIR_PP_LDB_CRD_HWM_RST);
3498 
3499 	DLB_CSR_WR(hw,
3500 		   DLB_CHP_DIR_LDB_PP2POOL(port->id),
3501 		   DLB_CHP_DIR_LDB_PP2POOL_RST);
3502 
3503 	DLB_CSR_WR(hw,
3504 		   DLB_CHP_DIR_PP_DIR_MIN_CRD_QNT(port->id),
3505 		   DLB_CHP_DIR_PP_DIR_MIN_CRD_QNT_RST);
3506 
3507 	DLB_CSR_WR(hw,
3508 		   DLB_CHP_DIR_PP_DIR_CRD_LWM(port->id),
3509 		   DLB_CHP_DIR_PP_DIR_CRD_LWM_RST);
3510 
3511 	DLB_CSR_WR(hw,
3512 		   DLB_CHP_DIR_PP_DIR_CRD_HWM(port->id),
3513 		   DLB_CHP_DIR_PP_DIR_CRD_HWM_RST);
3514 
3515 	DLB_CSR_WR(hw,
3516 		   DLB_CHP_DIR_DIR_PP2POOL(port->id),
3517 		   DLB_CHP_DIR_DIR_PP2POOL_RST);
3518 
3519 	DLB_CSR_WR(hw,
3520 		   DLB_SYS_DIR_PP2LDBPOOL(port->id),
3521 		   DLB_SYS_DIR_PP2LDBPOOL_RST);
3522 
3523 	DLB_CSR_WR(hw,
3524 		   DLB_SYS_DIR_PP2DIRPOOL(port->id),
3525 		   DLB_SYS_DIR_PP2DIRPOOL_RST);
3526 
3527 	DLB_CSR_WR(hw,
3528 		   DLB_CHP_DIR_CQ_WPTR(port->id),
3529 		   DLB_CHP_DIR_CQ_WPTR_RST);
3530 
3531 	DLB_CSR_WR(hw,
3532 		   DLB_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI(port->id),
3533 		   DLB_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI_RST);
3534 
3535 	DLB_CSR_WR(hw,
3536 		   DLB_CHP_DIR_CQ_TKN_DEPTH_SEL(port->id),
3537 		   DLB_CHP_DIR_CQ_TKN_DEPTH_SEL_RST);
3538 
3539 	DLB_CSR_WR(hw,
3540 		   DLB_LSP_CQ_DIR_DSBL(port->id),
3541 		   DLB_LSP_CQ_DIR_DSBL_RST);
3542 
3543 	DLB_CSR_WR(hw,
3544 		   DLB_CHP_DIR_CQ_WPTR(port->id),
3545 		   DLB_CHP_DIR_CQ_WPTR_RST);
3546 
3547 	DLB_CSR_WR(hw,
3548 		   DLB_CHP_DIR_CQ_INT_DEPTH_THRSH(port->id),
3549 		   DLB_CHP_DIR_CQ_INT_DEPTH_THRSH_RST);
3550 
3551 	DLB_CSR_WR(hw,
3552 		   DLB_CHP_DIR_CQ_TMR_THRESHOLD(port->id),
3553 		   DLB_CHP_DIR_CQ_TMR_THRESHOLD_RST);
3554 
3555 	DLB_CSR_WR(hw,
3556 		   DLB_CHP_DIR_CQ_INT_ENB(port->id),
3557 		   DLB_CHP_DIR_CQ_INT_ENB_RST);
3558 
3559 	DLB_CSR_WR(hw,
3560 		   DLB_SYS_DIR_CQ2VF_PF(port->id),
3561 		   DLB_SYS_DIR_CQ2VF_PF_RST);
3562 
3563 	DLB_CSR_WR(hw,
3564 		   DLB_SYS_DIR_PP2VF_PF(port->id),
3565 		   DLB_SYS_DIR_PP2VF_PF_RST);
3566 
3567 	DLB_CSR_WR(hw,
3568 		   DLB_SYS_DIR_CQ_ADDR_L(port->id),
3569 		   DLB_SYS_DIR_CQ_ADDR_L_RST);
3570 
3571 	DLB_CSR_WR(hw,
3572 		   DLB_SYS_DIR_CQ_ADDR_U(port->id),
3573 		   DLB_SYS_DIR_CQ_ADDR_U_RST);
3574 
3575 	DLB_CSR_WR(hw,
3576 		   DLB_SYS_DIR_PP_ADDR_L(port->id),
3577 		   DLB_SYS_DIR_PP_ADDR_L_RST);
3578 
3579 	DLB_CSR_WR(hw,
3580 		   DLB_SYS_DIR_PP_ADDR_U(port->id),
3581 		   DLB_SYS_DIR_PP_ADDR_U_RST);
3582 
3583 	DLB_CSR_WR(hw,
3584 		   DLB_SYS_DIR_PP_V(port->id),
3585 		   DLB_SYS_DIR_PP_V_RST);
3586 
3587 	DLB_CSR_WR(hw,
3588 		   DLB_SYS_DIR_PP2VAS(port->id),
3589 		   DLB_SYS_DIR_PP2VAS_RST);
3590 
3591 	DLB_CSR_WR(hw,
3592 		   DLB_SYS_DIR_CQ_ISR(port->id),
3593 		   DLB_SYS_DIR_CQ_ISR_RST);
3594 
3595 	DLB_CSR_WR(hw,
3596 		   DLB_SYS_WBUF_DIR_FLAGS(port->id),
3597 		   DLB_SYS_WBUF_DIR_FLAGS_RST);
3598 }
3599 
dlb_domain_reset_dir_port_registers(struct dlb_hw * hw,struct dlb_domain * domain)3600 static void dlb_domain_reset_dir_port_registers(struct dlb_hw *hw,
3601 						struct dlb_domain *domain)
3602 {
3603 	struct dlb_list_entry *iter;
3604 	RTE_SET_USED(iter);
3605 	struct dlb_dir_pq_pair *port;
3606 
3607 	DLB_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter)
3608 		__dlb_domain_reset_dir_port_registers(hw, port);
3609 }
3610 
dlb_domain_reset_ldb_queue_registers(struct dlb_hw * hw,struct dlb_domain * domain)3611 static void dlb_domain_reset_ldb_queue_registers(struct dlb_hw *hw,
3612 						 struct dlb_domain *domain)
3613 {
3614 	struct dlb_list_entry *iter;
3615 	RTE_SET_USED(iter);
3616 	struct dlb_ldb_queue *queue;
3617 
3618 	DLB_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
3619 		DLB_CSR_WR(hw,
3620 			   DLB_AQED_PIPE_FL_LIM(queue->id),
3621 			   DLB_AQED_PIPE_FL_LIM_RST);
3622 
3623 		DLB_CSR_WR(hw,
3624 			   DLB_AQED_PIPE_FL_BASE(queue->id),
3625 			   DLB_AQED_PIPE_FL_BASE_RST);
3626 
3627 		DLB_CSR_WR(hw,
3628 			   DLB_AQED_PIPE_FL_POP_PTR(queue->id),
3629 			   DLB_AQED_PIPE_FL_POP_PTR_RST);
3630 
3631 		DLB_CSR_WR(hw,
3632 			   DLB_AQED_PIPE_FL_PUSH_PTR(queue->id),
3633 			   DLB_AQED_PIPE_FL_PUSH_PTR_RST);
3634 
3635 		DLB_CSR_WR(hw,
3636 			   DLB_AQED_PIPE_QID_FID_LIM(queue->id),
3637 			   DLB_AQED_PIPE_QID_FID_LIM_RST);
3638 
3639 		DLB_CSR_WR(hw,
3640 			   DLB_LSP_QID_AQED_ACTIVE_LIM(queue->id),
3641 			   DLB_LSP_QID_AQED_ACTIVE_LIM_RST);
3642 
3643 		DLB_CSR_WR(hw,
3644 			   DLB_LSP_QID_LDB_INFL_LIM(queue->id),
3645 			   DLB_LSP_QID_LDB_INFL_LIM_RST);
3646 
3647 		DLB_CSR_WR(hw,
3648 			   DLB_SYS_LDB_QID_V(queue->id),
3649 			   DLB_SYS_LDB_QID_V_RST);
3650 
3651 		DLB_CSR_WR(hw,
3652 			   DLB_SYS_LDB_QID_V(queue->id),
3653 			   DLB_SYS_LDB_QID_V_RST);
3654 
3655 		DLB_CSR_WR(hw,
3656 			   DLB_CHP_ORD_QID_SN(queue->id),
3657 			   DLB_CHP_ORD_QID_SN_RST);
3658 
3659 		DLB_CSR_WR(hw,
3660 			   DLB_CHP_ORD_QID_SN_MAP(queue->id),
3661 			   DLB_CHP_ORD_QID_SN_MAP_RST);
3662 
3663 		DLB_CSR_WR(hw,
3664 			   DLB_RO_PIPE_QID2GRPSLT(queue->id),
3665 			   DLB_RO_PIPE_QID2GRPSLT_RST);
3666 	}
3667 }
3668 
dlb_domain_reset_dir_queue_registers(struct dlb_hw * hw,struct dlb_domain * domain)3669 static void dlb_domain_reset_dir_queue_registers(struct dlb_hw *hw,
3670 						 struct dlb_domain *domain)
3671 {
3672 	struct dlb_list_entry *iter;
3673 	RTE_SET_USED(iter);
3674 	struct dlb_dir_pq_pair *queue;
3675 
3676 	DLB_DOM_LIST_FOR(domain->used_dir_pq_pairs, queue, iter) {
3677 		DLB_CSR_WR(hw,
3678 			   DLB_SYS_DIR_QID_V(queue->id),
3679 			   DLB_SYS_DIR_QID_V_RST);
3680 	}
3681 }
3682 
dlb_domain_reset_ldb_pool_registers(struct dlb_hw * hw,struct dlb_domain * domain)3683 static void dlb_domain_reset_ldb_pool_registers(struct dlb_hw *hw,
3684 						struct dlb_domain *domain)
3685 {
3686 	struct dlb_list_entry *iter;
3687 	RTE_SET_USED(iter);
3688 	struct dlb_credit_pool *pool;
3689 
3690 	DLB_DOM_LIST_FOR(domain->used_ldb_credit_pools, pool, iter) {
3691 		DLB_CSR_WR(hw,
3692 			   DLB_CHP_LDB_POOL_CRD_LIM(pool->id),
3693 			   DLB_CHP_LDB_POOL_CRD_LIM_RST);
3694 
3695 		DLB_CSR_WR(hw,
3696 			   DLB_CHP_LDB_POOL_CRD_CNT(pool->id),
3697 			   DLB_CHP_LDB_POOL_CRD_CNT_RST);
3698 
3699 		DLB_CSR_WR(hw,
3700 			   DLB_CHP_QED_FL_BASE(pool->id),
3701 			   DLB_CHP_QED_FL_BASE_RST);
3702 
3703 		DLB_CSR_WR(hw,
3704 			   DLB_CHP_QED_FL_LIM(pool->id),
3705 			   DLB_CHP_QED_FL_LIM_RST);
3706 
3707 		DLB_CSR_WR(hw,
3708 			   DLB_CHP_QED_FL_PUSH_PTR(pool->id),
3709 			   DLB_CHP_QED_FL_PUSH_PTR_RST);
3710 
3711 		DLB_CSR_WR(hw,
3712 			   DLB_CHP_QED_FL_POP_PTR(pool->id),
3713 			   DLB_CHP_QED_FL_POP_PTR_RST);
3714 	}
3715 }
3716 
dlb_domain_reset_dir_pool_registers(struct dlb_hw * hw,struct dlb_domain * domain)3717 static void dlb_domain_reset_dir_pool_registers(struct dlb_hw *hw,
3718 						struct dlb_domain *domain)
3719 {
3720 	struct dlb_list_entry *iter;
3721 	RTE_SET_USED(iter);
3722 	struct dlb_credit_pool *pool;
3723 
3724 	DLB_DOM_LIST_FOR(domain->used_dir_credit_pools, pool, iter) {
3725 		DLB_CSR_WR(hw,
3726 			   DLB_CHP_DIR_POOL_CRD_LIM(pool->id),
3727 			   DLB_CHP_DIR_POOL_CRD_LIM_RST);
3728 
3729 		DLB_CSR_WR(hw,
3730 			   DLB_CHP_DIR_POOL_CRD_CNT(pool->id),
3731 			   DLB_CHP_DIR_POOL_CRD_CNT_RST);
3732 
3733 		DLB_CSR_WR(hw,
3734 			   DLB_CHP_DQED_FL_BASE(pool->id),
3735 			   DLB_CHP_DQED_FL_BASE_RST);
3736 
3737 		DLB_CSR_WR(hw,
3738 			   DLB_CHP_DQED_FL_LIM(pool->id),
3739 			   DLB_CHP_DQED_FL_LIM_RST);
3740 
3741 		DLB_CSR_WR(hw,
3742 			   DLB_CHP_DQED_FL_PUSH_PTR(pool->id),
3743 			   DLB_CHP_DQED_FL_PUSH_PTR_RST);
3744 
3745 		DLB_CSR_WR(hw,
3746 			   DLB_CHP_DQED_FL_POP_PTR(pool->id),
3747 			   DLB_CHP_DQED_FL_POP_PTR_RST);
3748 	}
3749 }
3750 
dlb_domain_reset_ldb_port_registers(struct dlb_hw * hw,struct dlb_domain * domain)3751 static void dlb_domain_reset_ldb_port_registers(struct dlb_hw *hw,
3752 						struct dlb_domain *domain)
3753 {
3754 	struct dlb_list_entry *iter;
3755 	RTE_SET_USED(iter);
3756 	struct dlb_ldb_port *port;
3757 
3758 	DLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter)
3759 		__dlb_domain_reset_ldb_port_registers(hw, port);
3760 }
3761 
dlb_domain_reset_registers(struct dlb_hw * hw,struct dlb_domain * domain)3762 static void dlb_domain_reset_registers(struct dlb_hw *hw,
3763 				       struct dlb_domain *domain)
3764 {
3765 	dlb_domain_reset_ldb_port_registers(hw, domain);
3766 
3767 	dlb_domain_reset_dir_port_registers(hw, domain);
3768 
3769 	dlb_domain_reset_ldb_queue_registers(hw, domain);
3770 
3771 	dlb_domain_reset_dir_queue_registers(hw, domain);
3772 
3773 	dlb_domain_reset_ldb_pool_registers(hw, domain);
3774 
3775 	dlb_domain_reset_dir_pool_registers(hw, domain);
3776 }
3777 
dlb_domain_reset_software_state(struct dlb_hw * hw,struct dlb_domain * domain)3778 static int dlb_domain_reset_software_state(struct dlb_hw *hw,
3779 					   struct dlb_domain *domain)
3780 {
3781 	struct dlb_ldb_queue *tmp_ldb_queue;
3782 	RTE_SET_USED(tmp_ldb_queue);
3783 	struct dlb_dir_pq_pair *tmp_dir_port;
3784 	RTE_SET_USED(tmp_dir_port);
3785 	struct dlb_ldb_port *tmp_ldb_port;
3786 	RTE_SET_USED(tmp_ldb_port);
3787 	struct dlb_credit_pool *tmp_pool;
3788 	RTE_SET_USED(tmp_pool);
3789 	struct dlb_list_entry *iter1;
3790 	RTE_SET_USED(iter1);
3791 	struct dlb_list_entry *iter2;
3792 	RTE_SET_USED(iter2);
3793 	struct dlb_ldb_queue *ldb_queue;
3794 	struct dlb_dir_pq_pair *dir_port;
3795 	struct dlb_ldb_port *ldb_port;
3796 	struct dlb_credit_pool *pool;
3797 
3798 	struct dlb_function_resources *rsrcs;
3799 	struct dlb_list_head *list;
3800 	int ret;
3801 
3802 	rsrcs = domain->parent_func;
3803 
3804 	/* Move the domain's ldb queues to the function's avail list */
3805 	list = &domain->used_ldb_queues;
3806 	DLB_DOM_LIST_FOR_SAFE(*list, ldb_queue, tmp_ldb_queue, iter1, iter2) {
3807 		if (ldb_queue->sn_cfg_valid) {
3808 			struct dlb_sn_group *grp;
3809 
3810 			grp = &hw->rsrcs.sn_groups[ldb_queue->sn_group];
3811 
3812 			dlb_sn_group_free_slot(grp, ldb_queue->sn_slot);
3813 			ldb_queue->sn_cfg_valid = false;
3814 		}
3815 
3816 		ldb_queue->owned = false;
3817 		ldb_queue->num_mappings = 0;
3818 		ldb_queue->num_pending_additions = 0;
3819 
3820 		dlb_list_del(&domain->used_ldb_queues, &ldb_queue->domain_list);
3821 		dlb_list_add(&rsrcs->avail_ldb_queues, &ldb_queue->func_list);
3822 		rsrcs->num_avail_ldb_queues++;
3823 	}
3824 
3825 	list = &domain->avail_ldb_queues;
3826 	DLB_DOM_LIST_FOR_SAFE(*list, ldb_queue, tmp_ldb_queue, iter1, iter2) {
3827 		ldb_queue->owned = false;
3828 
3829 		dlb_list_del(&domain->avail_ldb_queues,
3830 			     &ldb_queue->domain_list);
3831 		dlb_list_add(&rsrcs->avail_ldb_queues,
3832 			     &ldb_queue->func_list);
3833 		rsrcs->num_avail_ldb_queues++;
3834 	}
3835 
3836 	/* Move the domain's ldb ports to the function's avail list */
3837 	list = &domain->used_ldb_ports;
3838 	DLB_DOM_LIST_FOR_SAFE(*list, ldb_port, tmp_ldb_port, iter1, iter2) {
3839 		int i;
3840 
3841 		ldb_port->owned = false;
3842 		ldb_port->configured = false;
3843 		ldb_port->num_pending_removals = 0;
3844 		ldb_port->num_mappings = 0;
3845 		for (i = 0; i < DLB_MAX_NUM_QIDS_PER_LDB_CQ; i++)
3846 			ldb_port->qid_map[i].state = DLB_QUEUE_UNMAPPED;
3847 
3848 		dlb_list_del(&domain->used_ldb_ports, &ldb_port->domain_list);
3849 		dlb_list_add(&rsrcs->avail_ldb_ports, &ldb_port->func_list);
3850 		rsrcs->num_avail_ldb_ports++;
3851 	}
3852 
3853 	list = &domain->avail_ldb_ports;
3854 	DLB_DOM_LIST_FOR_SAFE(*list, ldb_port, tmp_ldb_port, iter1, iter2) {
3855 		ldb_port->owned = false;
3856 
3857 		dlb_list_del(&domain->avail_ldb_ports, &ldb_port->domain_list);
3858 		dlb_list_add(&rsrcs->avail_ldb_ports, &ldb_port->func_list);
3859 		rsrcs->num_avail_ldb_ports++;
3860 	}
3861 
3862 	/* Move the domain's dir ports to the function's avail list */
3863 	list = &domain->used_dir_pq_pairs;
3864 	DLB_DOM_LIST_FOR_SAFE(*list, dir_port, tmp_dir_port, iter1, iter2) {
3865 		dir_port->owned = false;
3866 		dir_port->port_configured = false;
3867 
3868 		dlb_list_del(&domain->used_dir_pq_pairs,
3869 			     &dir_port->domain_list);
3870 
3871 		dlb_list_add(&rsrcs->avail_dir_pq_pairs,
3872 			     &dir_port->func_list);
3873 		rsrcs->num_avail_dir_pq_pairs++;
3874 	}
3875 
3876 	list = &domain->avail_dir_pq_pairs;
3877 	DLB_DOM_LIST_FOR_SAFE(*list, dir_port, tmp_dir_port, iter1, iter2) {
3878 		dir_port->owned = false;
3879 
3880 		dlb_list_del(&domain->avail_dir_pq_pairs,
3881 			     &dir_port->domain_list);
3882 
3883 		dlb_list_add(&rsrcs->avail_dir_pq_pairs,
3884 			     &dir_port->func_list);
3885 		rsrcs->num_avail_dir_pq_pairs++;
3886 	}
3887 
3888 	/* Return hist list entries to the function */
3889 	ret = dlb_bitmap_set_range(rsrcs->avail_hist_list_entries,
3890 				   domain->hist_list_entry_base,
3891 				   domain->total_hist_list_entries);
3892 	if (ret) {
3893 		DLB_HW_ERR(hw,
3894 			   "[%s()] Internal error: domain hist list base does not match the function's bitmap.\n",
3895 			   __func__);
3896 		return -EFAULT;
3897 	}
3898 
3899 	domain->total_hist_list_entries = 0;
3900 	domain->avail_hist_list_entries = 0;
3901 	domain->hist_list_entry_base = 0;
3902 	domain->hist_list_entry_offset = 0;
3903 
3904 	/* Return QED entries to the function */
3905 	ret = dlb_bitmap_set_range(rsrcs->avail_qed_freelist_entries,
3906 				   domain->qed_freelist.base,
3907 				   (domain->qed_freelist.bound -
3908 					domain->qed_freelist.base));
3909 	if (ret) {
3910 		DLB_HW_ERR(hw,
3911 			   "[%s()] Internal error: domain QED base does not match the function's bitmap.\n",
3912 			   __func__);
3913 		return -EFAULT;
3914 	}
3915 
3916 	domain->qed_freelist.base = 0;
3917 	domain->qed_freelist.bound = 0;
3918 	domain->qed_freelist.offset = 0;
3919 
3920 	/* Return DQED entries back to the function */
3921 	ret = dlb_bitmap_set_range(rsrcs->avail_dqed_freelist_entries,
3922 				   domain->dqed_freelist.base,
3923 				   (domain->dqed_freelist.bound -
3924 					domain->dqed_freelist.base));
3925 	if (ret) {
3926 		DLB_HW_ERR(hw,
3927 			   "[%s()] Internal error: domain DQED base does not match the function's bitmap.\n",
3928 			   __func__);
3929 		return -EFAULT;
3930 	}
3931 
3932 	domain->dqed_freelist.base = 0;
3933 	domain->dqed_freelist.bound = 0;
3934 	domain->dqed_freelist.offset = 0;
3935 
3936 	/* Return AQED entries back to the function */
3937 	ret = dlb_bitmap_set_range(rsrcs->avail_aqed_freelist_entries,
3938 				   domain->aqed_freelist.base,
3939 				   (domain->aqed_freelist.bound -
3940 					domain->aqed_freelist.base));
3941 	if (ret) {
3942 		DLB_HW_ERR(hw,
3943 			   "[%s()] Internal error: domain AQED base does not match the function's bitmap.\n",
3944 			   __func__);
3945 		return -EFAULT;
3946 	}
3947 
3948 	domain->aqed_freelist.base = 0;
3949 	domain->aqed_freelist.bound = 0;
3950 	domain->aqed_freelist.offset = 0;
3951 
3952 	/* Return ldb credit pools back to the function's avail list */
3953 	list = &domain->used_ldb_credit_pools;
3954 	DLB_DOM_LIST_FOR_SAFE(*list, pool, tmp_pool, iter1, iter2) {
3955 		pool->owned = false;
3956 		pool->configured = false;
3957 
3958 		dlb_list_del(&domain->used_ldb_credit_pools,
3959 			     &pool->domain_list);
3960 		dlb_list_add(&rsrcs->avail_ldb_credit_pools,
3961 			     &pool->func_list);
3962 		rsrcs->num_avail_ldb_credit_pools++;
3963 	}
3964 
3965 	list = &domain->avail_ldb_credit_pools;
3966 	DLB_DOM_LIST_FOR_SAFE(*list, pool, tmp_pool, iter1, iter2) {
3967 		pool->owned = false;
3968 
3969 		dlb_list_del(&domain->avail_ldb_credit_pools,
3970 			     &pool->domain_list);
3971 		dlb_list_add(&rsrcs->avail_ldb_credit_pools,
3972 			     &pool->func_list);
3973 		rsrcs->num_avail_ldb_credit_pools++;
3974 	}
3975 
3976 	/* Move dir credit pools back to the function */
3977 	list = &domain->used_dir_credit_pools;
3978 	DLB_DOM_LIST_FOR_SAFE(*list, pool, tmp_pool, iter1, iter2) {
3979 		pool->owned = false;
3980 		pool->configured = false;
3981 
3982 		dlb_list_del(&domain->used_dir_credit_pools,
3983 			     &pool->domain_list);
3984 		dlb_list_add(&rsrcs->avail_dir_credit_pools,
3985 			     &pool->func_list);
3986 		rsrcs->num_avail_dir_credit_pools++;
3987 	}
3988 
3989 	list = &domain->avail_dir_credit_pools;
3990 	DLB_DOM_LIST_FOR_SAFE(*list, pool, tmp_pool, iter1, iter2) {
3991 		pool->owned = false;
3992 
3993 		dlb_list_del(&domain->avail_dir_credit_pools,
3994 			     &pool->domain_list);
3995 		dlb_list_add(&rsrcs->avail_dir_credit_pools,
3996 			     &pool->func_list);
3997 		rsrcs->num_avail_dir_credit_pools++;
3998 	}
3999 
4000 	domain->num_pending_removals = 0;
4001 	domain->num_pending_additions = 0;
4002 	domain->configured = false;
4003 	domain->started = false;
4004 
4005 	/* Move the domain out of the used_domains list and back to the
4006 	 * function's avail_domains list.
4007 	 */
4008 	dlb_list_del(&rsrcs->used_domains, &domain->func_list);
4009 	dlb_list_add(&rsrcs->avail_domains, &domain->func_list);
4010 	rsrcs->num_avail_domains++;
4011 
4012 	return 0;
4013 }
4014 
dlb_log_reset_domain(struct dlb_hw * hw,u32 domain_id)4015 static void dlb_log_reset_domain(struct dlb_hw *hw, u32 domain_id)
4016 {
4017 	DLB_HW_INFO(hw, "DLB reset domain:\n");
4018 	DLB_HW_INFO(hw, "\tDomain ID: %d\n", domain_id);
4019 }
4020 
4021 /**
4022  * dlb_reset_domain() - Reset a DLB scheduling domain and its associated
4023  *	hardware resources.
4024  * @hw:	  Contains the current state of the DLB hardware.
4025  * @args: User-provided arguments.
4026  * @resp: Response to user.
4027  *
4028  * Note: User software *must* stop sending to this domain's producer ports
4029  * before invoking this function, otherwise undefined behavior will result.
4030  *
4031  * Return: returns < 0 on error, 0 otherwise.
4032  */
dlb_reset_domain(struct dlb_hw * hw,u32 domain_id)4033 int dlb_reset_domain(struct dlb_hw *hw, u32 domain_id)
4034 {
4035 	struct dlb_domain *domain;
4036 	int ret;
4037 
4038 	dlb_log_reset_domain(hw, domain_id);
4039 
4040 	domain = dlb_get_domain_from_id(hw, domain_id);
4041 
4042 	if (domain  == NULL || !domain->configured)
4043 		return -EINVAL;
4044 
4045 	/* For each queue owned by this domain, disable its write permissions to
4046 	 * cause any traffic sent to it to be dropped. Well-behaved software
4047 	 * should not be sending QEs at this point.
4048 	 */
4049 	dlb_domain_disable_dir_queue_write_perms(hw, domain);
4050 
4051 	dlb_domain_disable_ldb_queue_write_perms(hw, domain);
4052 
4053 	/* Disable credit updates and turn off completion tracking on all the
4054 	 * domain's PPs.
4055 	 */
4056 	dlb_domain_disable_dir_port_crd_updates(hw, domain);
4057 
4058 	dlb_domain_disable_ldb_port_crd_updates(hw, domain);
4059 
4060 	dlb_domain_disable_dir_port_interrupts(hw, domain);
4061 
4062 	dlb_domain_disable_ldb_port_interrupts(hw, domain);
4063 
4064 	dlb_domain_disable_ldb_seq_checks(hw, domain);
4065 
4066 	/* Disable the LDB CQs and drain them in order to complete the map and
4067 	 * unmap procedures, which require zero CQ inflights and zero QID
4068 	 * inflights respectively.
4069 	 */
4070 	dlb_domain_disable_ldb_cqs(hw, domain);
4071 
4072 	ret = dlb_domain_drain_ldb_cqs(hw, domain, false);
4073 	if (ret < 0)
4074 		return ret;
4075 
4076 	ret = dlb_domain_wait_for_ldb_cqs_to_empty(hw, domain);
4077 	if (ret < 0)
4078 		return ret;
4079 
4080 	ret = dlb_domain_finish_unmap_qid_procedures(hw, domain);
4081 	if (ret < 0)
4082 		return ret;
4083 
4084 	ret = dlb_domain_finish_map_qid_procedures(hw, domain);
4085 	if (ret < 0)
4086 		return ret;
4087 
4088 	/* Re-enable the CQs in order to drain the mapped queues. */
4089 	dlb_domain_enable_ldb_cqs(hw, domain);
4090 
4091 	ret = dlb_domain_drain_mapped_queues(hw, domain);
4092 	if (ret < 0)
4093 		return ret;
4094 
4095 	ret = dlb_domain_drain_unmapped_queues(hw, domain);
4096 	if (ret < 0)
4097 		return ret;
4098 
4099 	ret = dlb_domain_wait_for_ldb_pool_refill(hw, domain);
4100 	if (ret) {
4101 		DLB_HW_ERR(hw,
4102 			   "[%s()] Internal error: LDB credits failed to refill\n",
4103 			   __func__);
4104 		return ret;
4105 	}
4106 
4107 	/* Done draining LDB QEs, so disable the CQs. */
4108 	dlb_domain_disable_ldb_cqs(hw, domain);
4109 
4110 	/* Directed queues are reset in dlb_domain_reset_hw_resources(), but
4111 	 * that process does not decrement the directed queue size counters used
4112 	 * by SMON for its average DQED depth measurement. So, we manually drain
4113 	 * the directed queues here.
4114 	 */
4115 	dlb_domain_drain_dir_queues(hw, domain);
4116 
4117 	ret = dlb_domain_wait_for_dir_pool_refill(hw, domain);
4118 	if (ret) {
4119 		DLB_HW_ERR(hw,
4120 			   "[%s()] Internal error: DIR credits failed to refill\n",
4121 			   __func__);
4122 		return ret;
4123 	}
4124 
4125 	/* Done draining DIR QEs, so disable the CQs. */
4126 	dlb_domain_disable_dir_cqs(hw, domain);
4127 
4128 	dlb_domain_disable_dir_producer_ports(hw, domain);
4129 
4130 	dlb_domain_disable_ldb_producer_ports(hw, domain);
4131 
4132 	dlb_domain_disable_dir_pools(hw, domain);
4133 
4134 	dlb_domain_disable_ldb_pools(hw, domain);
4135 
4136 	/* Reset the QID, credit pool, and CQ hardware.
4137 	 *
4138 	 * Note: DLB 1.0 A0 h/w does not disarm CQ interrupts during sched
4139 	 * domain reset.
4140 	 * A spurious interrupt can occur on subsequent use of a reset CQ.
4141 	 */
4142 	ret = dlb_domain_reset_hw_resources(hw, domain);
4143 	if (ret)
4144 		return ret;
4145 
4146 	ret = dlb_domain_verify_reset_success(hw, domain);
4147 	if (ret)
4148 		return ret;
4149 
4150 	dlb_domain_reset_registers(hw, domain);
4151 
4152 	/* Hardware reset complete. Reset the domain's software state */
4153 	ret = dlb_domain_reset_software_state(hw, domain);
4154 	if (ret)
4155 		return ret;
4156 
4157 	return 0;
4158 }
4159 
dlb_hw_get_num_resources(struct dlb_hw * hw,struct dlb_get_num_resources_args * arg)4160 void dlb_hw_get_num_resources(struct dlb_hw *hw,
4161 			      struct dlb_get_num_resources_args *arg)
4162 {
4163 	struct dlb_function_resources *rsrcs;
4164 	struct dlb_bitmap *map;
4165 
4166 	rsrcs = &hw->pf;
4167 
4168 	arg->num_sched_domains = rsrcs->num_avail_domains;
4169 
4170 	arg->num_ldb_queues = rsrcs->num_avail_ldb_queues;
4171 
4172 	arg->num_ldb_ports = rsrcs->num_avail_ldb_ports;
4173 
4174 	arg->num_dir_ports = rsrcs->num_avail_dir_pq_pairs;
4175 
4176 	map = rsrcs->avail_aqed_freelist_entries;
4177 
4178 	arg->num_atomic_inflights = dlb_bitmap_count(map);
4179 
4180 	arg->max_contiguous_atomic_inflights =
4181 		dlb_bitmap_longest_set_range(map);
4182 
4183 	map = rsrcs->avail_hist_list_entries;
4184 
4185 	arg->num_hist_list_entries = dlb_bitmap_count(map);
4186 
4187 	arg->max_contiguous_hist_list_entries =
4188 		dlb_bitmap_longest_set_range(map);
4189 
4190 	map = rsrcs->avail_qed_freelist_entries;
4191 
4192 	arg->num_ldb_credits = dlb_bitmap_count(map);
4193 
4194 	arg->max_contiguous_ldb_credits = dlb_bitmap_longest_set_range(map);
4195 
4196 	map = rsrcs->avail_dqed_freelist_entries;
4197 
4198 	arg->num_dir_credits = dlb_bitmap_count(map);
4199 
4200 	arg->max_contiguous_dir_credits = dlb_bitmap_longest_set_range(map);
4201 
4202 	arg->num_ldb_credit_pools = rsrcs->num_avail_ldb_credit_pools;
4203 
4204 	arg->num_dir_credit_pools = rsrcs->num_avail_dir_credit_pools;
4205 }
4206 
dlb_hw_disable_vf_to_pf_isr_pend_err(struct dlb_hw * hw)4207 void dlb_hw_disable_vf_to_pf_isr_pend_err(struct dlb_hw *hw)
4208 {
4209 	union dlb_sys_sys_alarm_int_enable r0;
4210 
4211 	r0.val = DLB_CSR_RD(hw, DLB_SYS_SYS_ALARM_INT_ENABLE);
4212 
4213 	r0.field.vf_to_pf_isr_pend_error = 0;
4214 
4215 	DLB_CSR_WR(hw, DLB_SYS_SYS_ALARM_INT_ENABLE, r0.val);
4216 }
4217 
dlb_configure_ldb_queue(struct dlb_hw * hw,struct dlb_domain * domain,struct dlb_ldb_queue * queue,struct dlb_create_ldb_queue_args * args)4218 static void dlb_configure_ldb_queue(struct dlb_hw *hw,
4219 				    struct dlb_domain *domain,
4220 				    struct dlb_ldb_queue *queue,
4221 				    struct dlb_create_ldb_queue_args *args)
4222 {
4223 	union dlb_sys_ldb_vasqid_v r0 = { {0} };
4224 	union dlb_lsp_qid_ldb_infl_lim r1 = { {0} };
4225 	union dlb_lsp_qid_aqed_active_lim r2 = { {0} };
4226 	union dlb_aqed_pipe_fl_lim r3 = { {0} };
4227 	union dlb_aqed_pipe_fl_base r4 = { {0} };
4228 	union dlb_chp_ord_qid_sn_map r7 = { {0} };
4229 	union dlb_sys_ldb_qid_cfg_v r10 = { {0} };
4230 	union dlb_sys_ldb_qid_v r11 = { {0} };
4231 	union dlb_aqed_pipe_fl_push_ptr r5 = { {0} };
4232 	union dlb_aqed_pipe_fl_pop_ptr r6 = { {0} };
4233 	union dlb_aqed_pipe_qid_fid_lim r8 = { {0} };
4234 	union dlb_ro_pipe_qid2grpslt r9 = { {0} };
4235 	struct dlb_sn_group *sn_group;
4236 	unsigned int offs;
4237 
4238 	/* QID write permissions are turned on when the domain is started */
4239 	r0.field.vasqid_v = 0;
4240 
4241 	offs = domain->id * DLB_MAX_NUM_LDB_QUEUES + queue->id;
4242 
4243 	DLB_CSR_WR(hw, DLB_SYS_LDB_VASQID_V(offs), r0.val);
4244 
4245 	/*
4246 	 * Unordered QIDs get 4K inflights, ordered get as many as the number
4247 	 * of sequence numbers.
4248 	 */
4249 	r1.field.limit = args->num_qid_inflights;
4250 
4251 	DLB_CSR_WR(hw, DLB_LSP_QID_LDB_INFL_LIM(queue->id), r1.val);
4252 
4253 	r2.field.limit = queue->aqed_freelist.bound -
4254 			 queue->aqed_freelist.base;
4255 
4256 	if (r2.field.limit > DLB_MAX_NUM_AQOS_ENTRIES)
4257 		r2.field.limit = DLB_MAX_NUM_AQOS_ENTRIES;
4258 
4259 	/* AQOS */
4260 	DLB_CSR_WR(hw, DLB_LSP_QID_AQED_ACTIVE_LIM(queue->id), r2.val);
4261 
4262 	r3.field.freelist_disable = 0;
4263 	r3.field.limit = queue->aqed_freelist.bound - 1;
4264 
4265 	DLB_CSR_WR(hw, DLB_AQED_PIPE_FL_LIM(queue->id), r3.val);
4266 
4267 	r4.field.base = queue->aqed_freelist.base;
4268 
4269 	DLB_CSR_WR(hw, DLB_AQED_PIPE_FL_BASE(queue->id), r4.val);
4270 
4271 	r5.field.push_ptr = r4.field.base;
4272 	r5.field.generation = 1;
4273 
4274 	DLB_CSR_WR(hw, DLB_AQED_PIPE_FL_PUSH_PTR(queue->id), r5.val);
4275 
4276 	r6.field.pop_ptr = r4.field.base;
4277 	r6.field.generation = 0;
4278 
4279 	DLB_CSR_WR(hw, DLB_AQED_PIPE_FL_POP_PTR(queue->id), r6.val);
4280 
4281 	/* Configure SNs */
4282 	sn_group = &hw->rsrcs.sn_groups[queue->sn_group];
4283 	r7.field.mode = sn_group->mode;
4284 	r7.field.slot = queue->sn_slot;
4285 	r7.field.grp  = sn_group->id;
4286 
4287 	DLB_CSR_WR(hw, DLB_CHP_ORD_QID_SN_MAP(queue->id), r7.val);
4288 
4289 	/*
4290 	 * This register limits the number of inflight flows a queue can have
4291 	 * at one time.  It has an upper bound of 2048, but can be
4292 	 * over-subscribed. 512 is chosen so that a single queue doesn't use
4293 	 * the entire atomic storage, but can use a substantial portion if
4294 	 * needed.
4295 	 */
4296 	r8.field.qid_fid_limit = 512;
4297 
4298 	DLB_CSR_WR(hw, DLB_AQED_PIPE_QID_FID_LIM(queue->id), r8.val);
4299 
4300 	r9.field.group = sn_group->id;
4301 	r9.field.slot = queue->sn_slot;
4302 
4303 	DLB_CSR_WR(hw, DLB_RO_PIPE_QID2GRPSLT(queue->id), r9.val);
4304 
4305 	r10.field.sn_cfg_v = (args->num_sequence_numbers != 0);
4306 	r10.field.fid_cfg_v = (args->num_atomic_inflights != 0);
4307 
4308 	DLB_CSR_WR(hw, DLB_SYS_LDB_QID_CFG_V(queue->id), r10.val);
4309 
4310 	r11.field.qid_v = 1;
4311 
4312 	DLB_CSR_WR(hw, DLB_SYS_LDB_QID_V(queue->id), r11.val);
4313 }
4314 
dlb_get_group_sequence_numbers(struct dlb_hw * hw,unsigned int group_id)4315 int dlb_get_group_sequence_numbers(struct dlb_hw *hw, unsigned int group_id)
4316 {
4317 	if (group_id >= DLB_MAX_NUM_SEQUENCE_NUMBER_GROUPS)
4318 		return -EINVAL;
4319 
4320 	return hw->rsrcs.sn_groups[group_id].sequence_numbers_per_queue;
4321 }
4322 
dlb_get_group_sequence_number_occupancy(struct dlb_hw * hw,unsigned int group_id)4323 int dlb_get_group_sequence_number_occupancy(struct dlb_hw *hw,
4324 					    unsigned int group_id)
4325 {
4326 	if (group_id >= DLB_MAX_NUM_SEQUENCE_NUMBER_GROUPS)
4327 		return -EINVAL;
4328 
4329 	return dlb_sn_group_used_slots(&hw->rsrcs.sn_groups[group_id]);
4330 }
4331 
dlb_log_set_group_sequence_numbers(struct dlb_hw * hw,unsigned int group_id,unsigned long val)4332 static void dlb_log_set_group_sequence_numbers(struct dlb_hw *hw,
4333 					       unsigned int group_id,
4334 					       unsigned long val)
4335 {
4336 	DLB_HW_INFO(hw, "DLB set group sequence numbers:\n");
4337 	DLB_HW_INFO(hw, "\tGroup ID: %u\n", group_id);
4338 	DLB_HW_INFO(hw, "\tValue:    %lu\n", val);
4339 }
4340 
dlb_set_group_sequence_numbers(struct dlb_hw * hw,unsigned int group_id,unsigned long val)4341 int dlb_set_group_sequence_numbers(struct dlb_hw *hw,
4342 				   unsigned int group_id,
4343 				   unsigned long val)
4344 {
4345 	u32 valid_allocations[6] = {32, 64, 128, 256, 512, 1024};
4346 	union dlb_ro_pipe_grp_sn_mode r0 = { {0} };
4347 	struct dlb_sn_group *group;
4348 	int mode;
4349 
4350 	if (group_id >= DLB_MAX_NUM_SEQUENCE_NUMBER_GROUPS)
4351 		return -EINVAL;
4352 
4353 	group = &hw->rsrcs.sn_groups[group_id];
4354 
4355 	/* Once the first load-balanced queue using an SN group is configured,
4356 	 * the group cannot be changed.
4357 	 */
4358 	if (group->slot_use_bitmap != 0)
4359 		return -EPERM;
4360 
4361 	for (mode = 0; mode < DLB_MAX_NUM_SEQUENCE_NUMBER_MODES; mode++)
4362 		if (val == valid_allocations[mode])
4363 			break;
4364 
4365 	if (mode == DLB_MAX_NUM_SEQUENCE_NUMBER_MODES)
4366 		return -EINVAL;
4367 
4368 	group->mode = mode;
4369 	group->sequence_numbers_per_queue = val;
4370 
4371 	r0.field.sn_mode_0 = hw->rsrcs.sn_groups[0].mode;
4372 	r0.field.sn_mode_1 = hw->rsrcs.sn_groups[1].mode;
4373 	r0.field.sn_mode_2 = hw->rsrcs.sn_groups[2].mode;
4374 	r0.field.sn_mode_3 = hw->rsrcs.sn_groups[3].mode;
4375 
4376 	DLB_CSR_WR(hw, DLB_RO_PIPE_GRP_SN_MODE, r0.val);
4377 
4378 	dlb_log_set_group_sequence_numbers(hw, group_id, val);
4379 
4380 	return 0;
4381 }
4382 
4383 static int
dlb_ldb_queue_attach_to_sn_group(struct dlb_hw * hw,struct dlb_ldb_queue * queue,struct dlb_create_ldb_queue_args * args)4384 dlb_ldb_queue_attach_to_sn_group(struct dlb_hw *hw,
4385 				 struct dlb_ldb_queue *queue,
4386 				 struct dlb_create_ldb_queue_args *args)
4387 {
4388 	int slot = -1;
4389 	int i;
4390 
4391 	queue->sn_cfg_valid = false;
4392 
4393 	if (args->num_sequence_numbers == 0)
4394 		return 0;
4395 
4396 	for (i = 0; i < DLB_MAX_NUM_SEQUENCE_NUMBER_GROUPS; i++) {
4397 		struct dlb_sn_group *group = &hw->rsrcs.sn_groups[i];
4398 
4399 		if (group->sequence_numbers_per_queue ==
4400 		    args->num_sequence_numbers &&
4401 		    !dlb_sn_group_full(group)) {
4402 			slot = dlb_sn_group_alloc_slot(group);
4403 			if (slot >= 0)
4404 				break;
4405 		}
4406 	}
4407 
4408 	if (slot == -1) {
4409 		DLB_HW_ERR(hw,
4410 			   "[%s():%d] Internal error: no sequence number slots available\n",
4411 			   __func__, __LINE__);
4412 		return -EFAULT;
4413 	}
4414 
4415 	queue->sn_cfg_valid = true;
4416 	queue->sn_group = i;
4417 	queue->sn_slot = slot;
4418 	return 0;
4419 }
4420 
4421 static int
dlb_ldb_queue_attach_resources(struct dlb_hw * hw,struct dlb_domain * domain,struct dlb_ldb_queue * queue,struct dlb_create_ldb_queue_args * args)4422 dlb_ldb_queue_attach_resources(struct dlb_hw *hw,
4423 			       struct dlb_domain *domain,
4424 			       struct dlb_ldb_queue *queue,
4425 			       struct dlb_create_ldb_queue_args *args)
4426 {
4427 	int ret;
4428 
4429 	ret = dlb_ldb_queue_attach_to_sn_group(hw, queue, args);
4430 	if (ret)
4431 		return ret;
4432 
4433 	/* Attach QID inflights */
4434 	queue->num_qid_inflights = args->num_qid_inflights;
4435 
4436 	/* Attach atomic inflights */
4437 	queue->aqed_freelist.base = domain->aqed_freelist.base +
4438 				    domain->aqed_freelist.offset;
4439 	queue->aqed_freelist.bound = queue->aqed_freelist.base +
4440 				     args->num_atomic_inflights;
4441 	domain->aqed_freelist.offset += args->num_atomic_inflights;
4442 
4443 	return 0;
4444 }
4445 
4446 static int
dlb_verify_create_ldb_queue_args(struct dlb_hw * hw,u32 domain_id,struct dlb_create_ldb_queue_args * args,struct dlb_cmd_response * resp)4447 dlb_verify_create_ldb_queue_args(struct dlb_hw *hw,
4448 				 u32 domain_id,
4449 				 struct dlb_create_ldb_queue_args *args,
4450 				 struct dlb_cmd_response *resp)
4451 {
4452 	struct dlb_freelist *aqed_freelist;
4453 	struct dlb_domain *domain;
4454 	int i;
4455 
4456 	domain = dlb_get_domain_from_id(hw, domain_id);
4457 
4458 	if (domain == NULL) {
4459 		resp->status = DLB_ST_INVALID_DOMAIN_ID;
4460 		return -1;
4461 	}
4462 
4463 	if (!domain->configured) {
4464 		resp->status = DLB_ST_DOMAIN_NOT_CONFIGURED;
4465 		return -1;
4466 	}
4467 
4468 	if (domain->started) {
4469 		resp->status = DLB_ST_DOMAIN_STARTED;
4470 		return -1;
4471 	}
4472 
4473 	if (dlb_list_empty(&domain->avail_ldb_queues)) {
4474 		resp->status = DLB_ST_LDB_QUEUES_UNAVAILABLE;
4475 		return -1;
4476 	}
4477 
4478 	if (args->num_sequence_numbers) {
4479 		for (i = 0; i < DLB_MAX_NUM_SEQUENCE_NUMBER_GROUPS; i++) {
4480 			struct dlb_sn_group *group = &hw->rsrcs.sn_groups[i];
4481 
4482 			if (group->sequence_numbers_per_queue ==
4483 			    args->num_sequence_numbers &&
4484 			    !dlb_sn_group_full(group))
4485 				break;
4486 		}
4487 
4488 		if (i == DLB_MAX_NUM_SEQUENCE_NUMBER_GROUPS) {
4489 			resp->status = DLB_ST_SEQUENCE_NUMBERS_UNAVAILABLE;
4490 			return -1;
4491 		}
4492 	}
4493 
4494 	if (args->num_qid_inflights > 4096) {
4495 		resp->status = DLB_ST_INVALID_QID_INFLIGHT_ALLOCATION;
4496 		return -1;
4497 	}
4498 
4499 	/* Inflights must be <= number of sequence numbers if ordered */
4500 	if (args->num_sequence_numbers != 0 &&
4501 	    args->num_qid_inflights > args->num_sequence_numbers) {
4502 		resp->status = DLB_ST_INVALID_QID_INFLIGHT_ALLOCATION;
4503 		return -1;
4504 	}
4505 
4506 	aqed_freelist = &domain->aqed_freelist;
4507 
4508 	if (dlb_freelist_count(aqed_freelist) < args->num_atomic_inflights) {
4509 		resp->status = DLB_ST_ATOMIC_INFLIGHTS_UNAVAILABLE;
4510 		return -1;
4511 	}
4512 
4513 	return 0;
4514 }
4515 
4516 static void
dlb_log_create_ldb_queue_args(struct dlb_hw * hw,u32 domain_id,struct dlb_create_ldb_queue_args * args)4517 dlb_log_create_ldb_queue_args(struct dlb_hw *hw,
4518 			      u32 domain_id,
4519 			      struct dlb_create_ldb_queue_args *args)
4520 {
4521 	DLB_HW_INFO(hw, "DLB create load-balanced queue arguments:\n");
4522 	DLB_HW_INFO(hw, "\tDomain ID:                  %d\n",
4523 		    domain_id);
4524 	DLB_HW_INFO(hw, "\tNumber of sequence numbers: %d\n",
4525 		    args->num_sequence_numbers);
4526 	DLB_HW_INFO(hw, "\tNumber of QID inflights:    %d\n",
4527 		    args->num_qid_inflights);
4528 	DLB_HW_INFO(hw, "\tNumber of ATM inflights:    %d\n",
4529 		    args->num_atomic_inflights);
4530 }
4531 
4532 /**
4533  * dlb_hw_create_ldb_queue() - Allocate and initialize a DLB LDB queue.
4534  * @hw:	  Contains the current state of the DLB hardware.
4535  * @args: User-provided arguments.
4536  * @resp: Response to user.
4537  *
4538  * Return: returns < 0 on error, 0 otherwise. If the driver is unable to
4539  * satisfy a request, resp->status will be set accordingly.
4540  */
dlb_hw_create_ldb_queue(struct dlb_hw * hw,u32 domain_id,struct dlb_create_ldb_queue_args * args,struct dlb_cmd_response * resp)4541 int dlb_hw_create_ldb_queue(struct dlb_hw *hw,
4542 			    u32 domain_id,
4543 			    struct dlb_create_ldb_queue_args *args,
4544 			    struct dlb_cmd_response *resp)
4545 {
4546 	struct dlb_ldb_queue *queue;
4547 	struct dlb_domain *domain;
4548 	int ret;
4549 
4550 	dlb_log_create_ldb_queue_args(hw, domain_id, args);
4551 
4552 	/* Verify that hardware resources are available before attempting to
4553 	 * satisfy the request. This simplifies the error unwinding code.
4554 	 */
4555 	/* At least one available queue */
4556 	if (dlb_verify_create_ldb_queue_args(hw, domain_id, args, resp))
4557 		return -EINVAL;
4558 
4559 	domain = dlb_get_domain_from_id(hw, domain_id);
4560 	if (domain == NULL) {
4561 		DLB_HW_ERR(hw,
4562 			   "[%s():%d] Internal error: domain not found\n",
4563 			   __func__, __LINE__);
4564 		return -EFAULT;
4565 	}
4566 
4567 	queue = DLB_DOM_LIST_HEAD(domain->avail_ldb_queues, typeof(*queue));
4568 
4569 	/* Verification should catch this. */
4570 	if (queue == NULL) {
4571 		DLB_HW_ERR(hw,
4572 			   "[%s():%d] Internal error: no available ldb queues\n",
4573 			   __func__, __LINE__);
4574 		return -EFAULT;
4575 	}
4576 
4577 	ret = dlb_ldb_queue_attach_resources(hw, domain, queue, args);
4578 	if (ret < 0) {
4579 		DLB_HW_ERR(hw,
4580 			   "[%s():%d] Internal error: failed to attach the ldb queue resources\n",
4581 			   __func__, __LINE__);
4582 		return ret;
4583 	}
4584 
4585 	dlb_configure_ldb_queue(hw, domain, queue, args);
4586 
4587 	queue->num_mappings = 0;
4588 
4589 	queue->configured = true;
4590 
4591 	/* Configuration succeeded, so move the resource from the 'avail' to
4592 	 * the 'used' list.
4593 	 */
4594 	dlb_list_del(&domain->avail_ldb_queues, &queue->domain_list);
4595 
4596 	dlb_list_add(&domain->used_ldb_queues, &queue->domain_list);
4597 
4598 	resp->status = 0;
4599 	resp->id = queue->id;
4600 
4601 	return 0;
4602 }
4603 
4604 
4605 static void
dlb_log_create_dir_queue_args(struct dlb_hw * hw,u32 domain_id,struct dlb_create_dir_queue_args * args)4606 dlb_log_create_dir_queue_args(struct dlb_hw *hw,
4607 			      u32 domain_id,
4608 			      struct dlb_create_dir_queue_args *args)
4609 {
4610 	DLB_HW_INFO(hw, "DLB create directed queue arguments:\n");
4611 	DLB_HW_INFO(hw, "\tDomain ID: %d\n", domain_id);
4612 	DLB_HW_INFO(hw, "\tPort ID:   %d\n", args->port_id);
4613 }
4614 
4615 static struct dlb_dir_pq_pair *
dlb_get_domain_used_dir_pq(u32 id,struct dlb_domain * domain)4616 dlb_get_domain_used_dir_pq(u32 id, struct dlb_domain *domain)
4617 {
4618 	struct dlb_list_entry *iter;
4619 	struct dlb_dir_pq_pair *port;
4620 	RTE_SET_USED(iter);
4621 
4622 	if (id >= DLB_MAX_NUM_DIR_PORTS)
4623 		return NULL;
4624 
4625 	DLB_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter)
4626 		if (port->id == id)
4627 			return port;
4628 
4629 	return NULL;
4630 }
4631 
4632 static int
dlb_verify_create_dir_queue_args(struct dlb_hw * hw,u32 domain_id,struct dlb_create_dir_queue_args * args,struct dlb_cmd_response * resp)4633 dlb_verify_create_dir_queue_args(struct dlb_hw *hw,
4634 				 u32 domain_id,
4635 				 struct dlb_create_dir_queue_args *args,
4636 				 struct dlb_cmd_response *resp)
4637 {
4638 	struct dlb_domain *domain;
4639 
4640 	domain = dlb_get_domain_from_id(hw, domain_id);
4641 
4642 	if (domain == NULL) {
4643 		resp->status = DLB_ST_INVALID_DOMAIN_ID;
4644 		return -1;
4645 	}
4646 
4647 	if (!domain->configured) {
4648 		resp->status = DLB_ST_DOMAIN_NOT_CONFIGURED;
4649 		return -1;
4650 	}
4651 
4652 	if (domain->started) {
4653 		resp->status = DLB_ST_DOMAIN_STARTED;
4654 		return -1;
4655 	}
4656 
4657 	/* If the user claims the port is already configured, validate the port
4658 	 * ID, its domain, and whether the port is configured.
4659 	 */
4660 	if (args->port_id != -1) {
4661 		struct dlb_dir_pq_pair *port;
4662 
4663 		port = dlb_get_domain_used_dir_pq(args->port_id, domain);
4664 
4665 		if (port  == NULL || port->domain_id != domain->id ||
4666 		    !port->port_configured) {
4667 			resp->status = DLB_ST_INVALID_PORT_ID;
4668 			return -1;
4669 		}
4670 	}
4671 
4672 	/* If the queue's port is not configured, validate that a free
4673 	 * port-queue pair is available.
4674 	 */
4675 	if (args->port_id == -1 &&
4676 	    dlb_list_empty(&domain->avail_dir_pq_pairs)) {
4677 		resp->status = DLB_ST_DIR_QUEUES_UNAVAILABLE;
4678 		return -1;
4679 	}
4680 
4681 	return 0;
4682 }
4683 
dlb_configure_dir_queue(struct dlb_hw * hw,struct dlb_domain * domain,struct dlb_dir_pq_pair * queue)4684 static void dlb_configure_dir_queue(struct dlb_hw *hw,
4685 				    struct dlb_domain *domain,
4686 				    struct dlb_dir_pq_pair *queue)
4687 {
4688 	union dlb_sys_dir_vasqid_v r0 = { {0} };
4689 	union dlb_sys_dir_qid_v r1 = { {0} };
4690 	unsigned int offs;
4691 
4692 	/* QID write permissions are turned on when the domain is started */
4693 	r0.field.vasqid_v = 0;
4694 
4695 	offs = (domain->id * DLB_MAX_NUM_DIR_PORTS) + queue->id;
4696 
4697 	DLB_CSR_WR(hw, DLB_SYS_DIR_VASQID_V(offs), r0.val);
4698 
4699 	r1.field.qid_v = 1;
4700 
4701 	DLB_CSR_WR(hw, DLB_SYS_DIR_QID_V(queue->id), r1.val);
4702 
4703 	queue->queue_configured = true;
4704 }
4705 
4706 /**
4707  * dlb_hw_create_dir_queue() - Allocate and initialize a DLB DIR queue.
4708  * @hw:	  Contains the current state of the DLB hardware.
4709  * @args: User-provided arguments.
4710  * @resp: Response to user.
4711  *
4712  * Return: returns < 0 on error, 0 otherwise. If the driver is unable to
4713  * satisfy a request, resp->status will be set accordingly.
4714  */
dlb_hw_create_dir_queue(struct dlb_hw * hw,u32 domain_id,struct dlb_create_dir_queue_args * args,struct dlb_cmd_response * resp)4715 int dlb_hw_create_dir_queue(struct dlb_hw *hw,
4716 			    u32 domain_id,
4717 			    struct dlb_create_dir_queue_args *args,
4718 			    struct dlb_cmd_response *resp)
4719 {
4720 	struct dlb_dir_pq_pair *queue;
4721 	struct dlb_domain *domain;
4722 
4723 	dlb_log_create_dir_queue_args(hw, domain_id, args);
4724 
4725 	/* Verify that hardware resources are available before attempting to
4726 	 * satisfy the request. This simplifies the error unwinding code.
4727 	 */
4728 	if (dlb_verify_create_dir_queue_args(hw, domain_id, args, resp))
4729 		return -EINVAL;
4730 
4731 	domain = dlb_get_domain_from_id(hw, domain_id);
4732 	if (domain == NULL) {
4733 		DLB_HW_ERR(hw,
4734 			   "[%s():%d] Internal error: domain not found\n",
4735 			   __func__, __LINE__);
4736 		return -EFAULT;
4737 	}
4738 
4739 	if (args->port_id != -1)
4740 		queue = dlb_get_domain_used_dir_pq(args->port_id, domain);
4741 	else
4742 		queue = DLB_DOM_LIST_HEAD(domain->avail_dir_pq_pairs,
4743 					  typeof(*queue));
4744 
4745 	/* Verification should catch this. */
4746 	if (queue == NULL) {
4747 		DLB_HW_ERR(hw,
4748 			   "[%s():%d] Internal error: no available dir queues\n",
4749 			   __func__, __LINE__);
4750 		return -EFAULT;
4751 	}
4752 
4753 	dlb_configure_dir_queue(hw, domain, queue);
4754 
4755 	/* Configuration succeeded, so move the resource from the 'avail' to
4756 	 * the 'used' list (if it's not already there).
4757 	 */
4758 	if (args->port_id == -1) {
4759 		dlb_list_del(&domain->avail_dir_pq_pairs, &queue->domain_list);
4760 
4761 		dlb_list_add(&domain->used_dir_pq_pairs, &queue->domain_list);
4762 	}
4763 
4764 	resp->status = 0;
4765 
4766 	resp->id = queue->id;
4767 
4768 	return 0;
4769 }
4770 
dlb_log_create_ldb_port_args(struct dlb_hw * hw,u32 domain_id,u64 pop_count_dma_base,u64 cq_dma_base,struct dlb_create_ldb_port_args * args)4771 static void dlb_log_create_ldb_port_args(struct dlb_hw *hw,
4772 					 u32 domain_id,
4773 					 u64 pop_count_dma_base,
4774 					 u64 cq_dma_base,
4775 					 struct dlb_create_ldb_port_args *args)
4776 {
4777 	DLB_HW_INFO(hw, "DLB create load-balanced port arguments:\n");
4778 	DLB_HW_INFO(hw, "\tDomain ID:                 %d\n",
4779 		    domain_id);
4780 	DLB_HW_INFO(hw, "\tLDB credit pool ID:        %d\n",
4781 		    args->ldb_credit_pool_id);
4782 	DLB_HW_INFO(hw, "\tLDB credit high watermark: %d\n",
4783 		    args->ldb_credit_high_watermark);
4784 	DLB_HW_INFO(hw, "\tLDB credit low watermark:  %d\n",
4785 		    args->ldb_credit_low_watermark);
4786 	DLB_HW_INFO(hw, "\tLDB credit quantum:        %d\n",
4787 		    args->ldb_credit_quantum);
4788 	DLB_HW_INFO(hw, "\tDIR credit pool ID:        %d\n",
4789 		    args->dir_credit_pool_id);
4790 	DLB_HW_INFO(hw, "\tDIR credit high watermark: %d\n",
4791 		    args->dir_credit_high_watermark);
4792 	DLB_HW_INFO(hw, "\tDIR credit low watermark:  %d\n",
4793 		    args->dir_credit_low_watermark);
4794 	DLB_HW_INFO(hw, "\tDIR credit quantum:        %d\n",
4795 		    args->dir_credit_quantum);
4796 	DLB_HW_INFO(hw, "\tpop_count_address:         0x%"PRIx64"\n",
4797 		    pop_count_dma_base);
4798 	DLB_HW_INFO(hw, "\tCQ depth:                  %d\n",
4799 		    args->cq_depth);
4800 	DLB_HW_INFO(hw, "\tCQ hist list size:         %d\n",
4801 		    args->cq_history_list_size);
4802 	DLB_HW_INFO(hw, "\tCQ base address:           0x%"PRIx64"\n",
4803 		    cq_dma_base);
4804 }
4805 
4806 static struct dlb_credit_pool *
dlb_get_domain_ldb_pool(u32 id,struct dlb_domain * domain)4807 dlb_get_domain_ldb_pool(u32 id, struct dlb_domain *domain)
4808 {
4809 	struct dlb_list_entry *iter;
4810 	struct dlb_credit_pool *pool;
4811 	RTE_SET_USED(iter);
4812 
4813 	if (id >= DLB_MAX_NUM_LDB_CREDIT_POOLS)
4814 		return NULL;
4815 
4816 	DLB_DOM_LIST_FOR(domain->used_ldb_credit_pools, pool, iter)
4817 		if (pool->id == id)
4818 			return pool;
4819 
4820 	return NULL;
4821 }
4822 
4823 static struct dlb_credit_pool *
dlb_get_domain_dir_pool(u32 id,struct dlb_domain * domain)4824 dlb_get_domain_dir_pool(u32 id, struct dlb_domain *domain)
4825 {
4826 	struct dlb_list_entry *iter;
4827 	struct dlb_credit_pool *pool;
4828 	RTE_SET_USED(iter);
4829 
4830 	if (id >= DLB_MAX_NUM_DIR_CREDIT_POOLS)
4831 		return NULL;
4832 
4833 	DLB_DOM_LIST_FOR(domain->used_dir_credit_pools, pool, iter)
4834 		if (pool->id == id)
4835 			return pool;
4836 
4837 	return NULL;
4838 }
4839 
4840 static int
dlb_verify_create_ldb_port_args(struct dlb_hw * hw,u32 domain_id,u64 pop_count_dma_base,u64 cq_dma_base,struct dlb_create_ldb_port_args * args,struct dlb_cmd_response * resp)4841 dlb_verify_create_ldb_port_args(struct dlb_hw *hw,
4842 				u32 domain_id,
4843 				u64 pop_count_dma_base,
4844 				u64 cq_dma_base,
4845 				struct dlb_create_ldb_port_args *args,
4846 				struct dlb_cmd_response *resp)
4847 {
4848 	struct dlb_domain *domain;
4849 	struct dlb_credit_pool *pool;
4850 
4851 	domain = dlb_get_domain_from_id(hw, domain_id);
4852 
4853 	if (domain == NULL) {
4854 		resp->status = DLB_ST_INVALID_DOMAIN_ID;
4855 		return -1;
4856 	}
4857 
4858 	if (!domain->configured) {
4859 		resp->status = DLB_ST_DOMAIN_NOT_CONFIGURED;
4860 		return -1;
4861 	}
4862 
4863 	if (domain->started) {
4864 		resp->status = DLB_ST_DOMAIN_STARTED;
4865 		return -1;
4866 	}
4867 
4868 	if (dlb_list_empty(&domain->avail_ldb_ports)) {
4869 		resp->status = DLB_ST_LDB_PORTS_UNAVAILABLE;
4870 		return -1;
4871 	}
4872 
4873 	/* If the scheduling domain has no LDB queues, we configure the
4874 	 * hardware to not supply the port with any LDB credits. In that
4875 	 * case, ignore the LDB credit arguments.
4876 	 */
4877 	if (!dlb_list_empty(&domain->used_ldb_queues) ||
4878 	    !dlb_list_empty(&domain->avail_ldb_queues)) {
4879 		pool = dlb_get_domain_ldb_pool(args->ldb_credit_pool_id,
4880 					       domain);
4881 
4882 		if (pool  == NULL || !pool->configured ||
4883 		    pool->domain_id != domain->id) {
4884 			resp->status = DLB_ST_INVALID_LDB_CREDIT_POOL_ID;
4885 			return -1;
4886 		}
4887 
4888 		if (args->ldb_credit_high_watermark > pool->avail_credits) {
4889 			resp->status = DLB_ST_LDB_CREDITS_UNAVAILABLE;
4890 			return -1;
4891 		}
4892 
4893 		if (args->ldb_credit_low_watermark >=
4894 		    args->ldb_credit_high_watermark) {
4895 			resp->status = DLB_ST_INVALID_LDB_CREDIT_LOW_WATERMARK;
4896 			return -1;
4897 		}
4898 
4899 		if (args->ldb_credit_quantum >=
4900 		    args->ldb_credit_high_watermark) {
4901 			resp->status = DLB_ST_INVALID_LDB_CREDIT_QUANTUM;
4902 			return -1;
4903 		}
4904 
4905 		if (args->ldb_credit_quantum > DLB_MAX_PORT_CREDIT_QUANTUM) {
4906 			resp->status = DLB_ST_INVALID_LDB_CREDIT_QUANTUM;
4907 			return -1;
4908 		}
4909 	}
4910 
4911 	/* Likewise, if the scheduling domain has no DIR queues, we configure
4912 	 * the hardware to not supply the port with any DIR credits. In that
4913 	 * case, ignore the DIR credit arguments.
4914 	 */
4915 	if (!dlb_list_empty(&domain->used_dir_pq_pairs) ||
4916 	    !dlb_list_empty(&domain->avail_dir_pq_pairs)) {
4917 		pool = dlb_get_domain_dir_pool(args->dir_credit_pool_id,
4918 					       domain);
4919 
4920 		if (pool  == NULL || !pool->configured ||
4921 		    pool->domain_id != domain->id) {
4922 			resp->status = DLB_ST_INVALID_DIR_CREDIT_POOL_ID;
4923 			return -1;
4924 		}
4925 
4926 		if (args->dir_credit_high_watermark > pool->avail_credits) {
4927 			resp->status = DLB_ST_DIR_CREDITS_UNAVAILABLE;
4928 			return -1;
4929 		}
4930 
4931 		if (args->dir_credit_low_watermark >=
4932 		    args->dir_credit_high_watermark) {
4933 			resp->status = DLB_ST_INVALID_DIR_CREDIT_LOW_WATERMARK;
4934 			return -1;
4935 		}
4936 
4937 		if (args->dir_credit_quantum >=
4938 		    args->dir_credit_high_watermark) {
4939 			resp->status = DLB_ST_INVALID_DIR_CREDIT_QUANTUM;
4940 			return -1;
4941 		}
4942 
4943 		if (args->dir_credit_quantum > DLB_MAX_PORT_CREDIT_QUANTUM) {
4944 			resp->status = DLB_ST_INVALID_DIR_CREDIT_QUANTUM;
4945 			return -1;
4946 		}
4947 	}
4948 
4949 	/* Check cache-line alignment */
4950 	if ((pop_count_dma_base & 0x3F) != 0) {
4951 		resp->status = DLB_ST_INVALID_POP_COUNT_VIRT_ADDR;
4952 		return -1;
4953 	}
4954 
4955 	if ((cq_dma_base & 0x3F) != 0) {
4956 		resp->status = DLB_ST_INVALID_CQ_VIRT_ADDR;
4957 		return -1;
4958 	}
4959 
4960 	if (args->cq_depth != 1 &&
4961 	    args->cq_depth != 2 &&
4962 	    args->cq_depth != 4 &&
4963 	    args->cq_depth != 8 &&
4964 	    args->cq_depth != 16 &&
4965 	    args->cq_depth != 32 &&
4966 	    args->cq_depth != 64 &&
4967 	    args->cq_depth != 128 &&
4968 	    args->cq_depth != 256 &&
4969 	    args->cq_depth != 512 &&
4970 	    args->cq_depth != 1024) {
4971 		resp->status = DLB_ST_INVALID_CQ_DEPTH;
4972 		return -1;
4973 	}
4974 
4975 	/* The history list size must be >= 1 */
4976 	if (!args->cq_history_list_size) {
4977 		resp->status = DLB_ST_INVALID_HIST_LIST_DEPTH;
4978 		return -1;
4979 	}
4980 
4981 	if (args->cq_history_list_size > domain->avail_hist_list_entries) {
4982 		resp->status = DLB_ST_HIST_LIST_ENTRIES_UNAVAILABLE;
4983 		return -1;
4984 	}
4985 
4986 	return 0;
4987 }
4988 
dlb_ldb_pool_update_credit_count(struct dlb_hw * hw,u32 pool_id,u32 count)4989 static void dlb_ldb_pool_update_credit_count(struct dlb_hw *hw,
4990 					     u32 pool_id,
4991 					     u32 count)
4992 {
4993 	hw->rsrcs.ldb_credit_pools[pool_id].avail_credits -= count;
4994 }
4995 
dlb_dir_pool_update_credit_count(struct dlb_hw * hw,u32 pool_id,u32 count)4996 static void dlb_dir_pool_update_credit_count(struct dlb_hw *hw,
4997 					     u32 pool_id,
4998 					     u32 count)
4999 {
5000 	hw->rsrcs.dir_credit_pools[pool_id].avail_credits -= count;
5001 }
5002 
dlb_ldb_port_configure_pp(struct dlb_hw * hw,struct dlb_domain * domain,struct dlb_ldb_port * port,struct dlb_create_ldb_port_args * args)5003 static int dlb_ldb_port_configure_pp(struct dlb_hw *hw,
5004 				     struct dlb_domain *domain,
5005 				     struct dlb_ldb_port *port,
5006 				     struct dlb_create_ldb_port_args *args)
5007 {
5008 	union dlb_sys_ldb_pp2ldbpool r0 = { {0} };
5009 	union dlb_sys_ldb_pp2dirpool r1 = { {0} };
5010 	union dlb_sys_ldb_pp2vf_pf r2 = { {0} };
5011 	union dlb_sys_ldb_pp2vas r3 = { {0} };
5012 	union dlb_sys_ldb_pp_v r4 = { {0} };
5013 	union dlb_chp_ldb_pp_ldb_crd_hwm r6 = { {0} };
5014 	union dlb_chp_ldb_pp_dir_crd_hwm r7 = { {0} };
5015 	union dlb_chp_ldb_pp_ldb_crd_lwm r8 = { {0} };
5016 	union dlb_chp_ldb_pp_dir_crd_lwm r9 = { {0} };
5017 	union dlb_chp_ldb_pp_ldb_min_crd_qnt r10 = { {0} };
5018 	union dlb_chp_ldb_pp_dir_min_crd_qnt r11 = { {0} };
5019 	union dlb_chp_ldb_pp_ldb_crd_cnt r12 = { {0} };
5020 	union dlb_chp_ldb_pp_dir_crd_cnt r13 = { {0} };
5021 	union dlb_chp_ldb_ldb_pp2pool r14 = { {0} };
5022 	union dlb_chp_ldb_dir_pp2pool r15 = { {0} };
5023 	union dlb_chp_ldb_pp_crd_req_state r16 = { {0} };
5024 	union dlb_chp_ldb_pp_ldb_push_ptr r17 = { {0} };
5025 	union dlb_chp_ldb_pp_dir_push_ptr r18 = { {0} };
5026 
5027 	struct dlb_credit_pool *ldb_pool = NULL;
5028 	struct dlb_credit_pool *dir_pool = NULL;
5029 
5030 	if (port->ldb_pool_used) {
5031 		ldb_pool = dlb_get_domain_ldb_pool(args->ldb_credit_pool_id,
5032 						   domain);
5033 		if (ldb_pool == NULL) {
5034 			DLB_HW_ERR(hw,
5035 				   "[%s()] Internal error: port validation failed\n",
5036 				   __func__);
5037 			return -EFAULT;
5038 		}
5039 	}
5040 
5041 	if (port->dir_pool_used) {
5042 		dir_pool = dlb_get_domain_dir_pool(args->dir_credit_pool_id,
5043 						   domain);
5044 		if (dir_pool == NULL) {
5045 			DLB_HW_ERR(hw,
5046 				   "[%s()] Internal error: port validation failed\n",
5047 				   __func__);
5048 			return -EFAULT;
5049 		}
5050 	}
5051 
5052 	r0.field.ldbpool = (port->ldb_pool_used) ? ldb_pool->id : 0;
5053 
5054 	DLB_CSR_WR(hw, DLB_SYS_LDB_PP2LDBPOOL(port->id), r0.val);
5055 
5056 	r1.field.dirpool = (port->dir_pool_used) ? dir_pool->id : 0;
5057 
5058 	DLB_CSR_WR(hw, DLB_SYS_LDB_PP2DIRPOOL(port->id), r1.val);
5059 
5060 	r2.field.is_pf = 1;
5061 
5062 	DLB_CSR_WR(hw, DLB_SYS_LDB_PP2VF_PF(port->id), r2.val);
5063 
5064 	r3.field.vas = domain->id;
5065 
5066 	DLB_CSR_WR(hw, DLB_SYS_LDB_PP2VAS(port->id), r3.val);
5067 
5068 	r6.field.hwm = args->ldb_credit_high_watermark;
5069 
5070 	DLB_CSR_WR(hw, DLB_CHP_LDB_PP_LDB_CRD_HWM(port->id), r6.val);
5071 
5072 	r7.field.hwm = args->dir_credit_high_watermark;
5073 
5074 	DLB_CSR_WR(hw, DLB_CHP_LDB_PP_DIR_CRD_HWM(port->id), r7.val);
5075 
5076 	r8.field.lwm = args->ldb_credit_low_watermark;
5077 
5078 	DLB_CSR_WR(hw, DLB_CHP_LDB_PP_LDB_CRD_LWM(port->id), r8.val);
5079 
5080 	r9.field.lwm = args->dir_credit_low_watermark;
5081 
5082 	DLB_CSR_WR(hw, DLB_CHP_LDB_PP_DIR_CRD_LWM(port->id), r9.val);
5083 
5084 	r10.field.quanta = args->ldb_credit_quantum;
5085 
5086 	DLB_CSR_WR(hw,
5087 		   DLB_CHP_LDB_PP_LDB_MIN_CRD_QNT(port->id),
5088 		   r10.val);
5089 
5090 	r11.field.quanta = args->dir_credit_quantum;
5091 
5092 	DLB_CSR_WR(hw,
5093 		   DLB_CHP_LDB_PP_DIR_MIN_CRD_QNT(port->id),
5094 		   r11.val);
5095 
5096 	r12.field.count = args->ldb_credit_high_watermark;
5097 
5098 	DLB_CSR_WR(hw, DLB_CHP_LDB_PP_LDB_CRD_CNT(port->id), r12.val);
5099 
5100 	r13.field.count = args->dir_credit_high_watermark;
5101 
5102 	DLB_CSR_WR(hw, DLB_CHP_LDB_PP_DIR_CRD_CNT(port->id), r13.val);
5103 
5104 	r14.field.pool = (port->ldb_pool_used) ? ldb_pool->id : 0;
5105 
5106 	DLB_CSR_WR(hw, DLB_CHP_LDB_LDB_PP2POOL(port->id), r14.val);
5107 
5108 	r15.field.pool = (port->dir_pool_used) ? dir_pool->id : 0;
5109 
5110 	DLB_CSR_WR(hw, DLB_CHP_LDB_DIR_PP2POOL(port->id), r15.val);
5111 
5112 	r16.field.no_pp_credit_update = 0;
5113 
5114 	DLB_CSR_WR(hw, DLB_CHP_LDB_PP_CRD_REQ_STATE(port->id), r16.val);
5115 
5116 	r17.field.push_pointer = 0;
5117 
5118 	DLB_CSR_WR(hw, DLB_CHP_LDB_PP_LDB_PUSH_PTR(port->id), r17.val);
5119 
5120 	r18.field.push_pointer = 0;
5121 
5122 	DLB_CSR_WR(hw, DLB_CHP_LDB_PP_DIR_PUSH_PTR(port->id), r18.val);
5123 
5124 	r4.field.pp_v = 1;
5125 
5126 	DLB_CSR_WR(hw,
5127 		   DLB_SYS_LDB_PP_V(port->id),
5128 		   r4.val);
5129 
5130 	return 0;
5131 }
5132 
dlb_ldb_port_configure_cq(struct dlb_hw * hw,struct dlb_ldb_port * port,u64 pop_count_dma_base,u64 cq_dma_base,struct dlb_create_ldb_port_args * args)5133 static int dlb_ldb_port_configure_cq(struct dlb_hw *hw,
5134 				     struct dlb_ldb_port *port,
5135 				     u64 pop_count_dma_base,
5136 				     u64 cq_dma_base,
5137 				     struct dlb_create_ldb_port_args *args)
5138 {
5139 	int i;
5140 
5141 	union dlb_sys_ldb_cq_addr_l r0 = { {0} };
5142 	union dlb_sys_ldb_cq_addr_u r1 = { {0} };
5143 	union dlb_sys_ldb_cq2vf_pf r2 = { {0} };
5144 	union dlb_chp_ldb_cq_tkn_depth_sel r3 = { {0} };
5145 	union dlb_chp_hist_list_lim r4 = { {0} };
5146 	union dlb_chp_hist_list_base r5 = { {0} };
5147 	union dlb_lsp_cq_ldb_infl_lim r6 = { {0} };
5148 	union dlb_lsp_cq2priov r7 = { {0} };
5149 	union dlb_chp_hist_list_push_ptr r8 = { {0} };
5150 	union dlb_chp_hist_list_pop_ptr r9 = { {0} };
5151 	union dlb_lsp_cq_ldb_tkn_depth_sel r10 = { {0} };
5152 	union dlb_sys_ldb_pp_addr_l r11 = { {0} };
5153 	union dlb_sys_ldb_pp_addr_u r12 = { {0} };
5154 
5155 	/* The CQ address is 64B-aligned, and the DLB only wants bits [63:6] */
5156 	r0.field.addr_l = cq_dma_base >> 6;
5157 
5158 	DLB_CSR_WR(hw,
5159 		   DLB_SYS_LDB_CQ_ADDR_L(port->id),
5160 		   r0.val);
5161 
5162 	r1.field.addr_u = cq_dma_base >> 32;
5163 
5164 	DLB_CSR_WR(hw,
5165 		   DLB_SYS_LDB_CQ_ADDR_U(port->id),
5166 		   r1.val);
5167 
5168 	r2.field.is_pf = 1;
5169 
5170 	DLB_CSR_WR(hw,
5171 		   DLB_SYS_LDB_CQ2VF_PF(port->id),
5172 		   r2.val);
5173 
5174 	if (args->cq_depth <= 8) {
5175 		r3.field.token_depth_select = 1;
5176 	} else if (args->cq_depth == 16) {
5177 		r3.field.token_depth_select = 2;
5178 	} else if (args->cq_depth == 32) {
5179 		r3.field.token_depth_select = 3;
5180 	} else if (args->cq_depth == 64) {
5181 		r3.field.token_depth_select = 4;
5182 	} else if (args->cq_depth == 128) {
5183 		r3.field.token_depth_select = 5;
5184 	} else if (args->cq_depth == 256) {
5185 		r3.field.token_depth_select = 6;
5186 	} else if (args->cq_depth == 512) {
5187 		r3.field.token_depth_select = 7;
5188 	} else if (args->cq_depth == 1024) {
5189 		r3.field.token_depth_select = 8;
5190 	} else {
5191 		DLB_HW_ERR(hw, "[%s():%d] Internal error: invalid CQ depth\n",
5192 			   __func__, __LINE__);
5193 		return -EFAULT;
5194 	}
5195 
5196 	DLB_CSR_WR(hw,
5197 		   DLB_CHP_LDB_CQ_TKN_DEPTH_SEL(port->id),
5198 		   r3.val);
5199 
5200 	r10.field.token_depth_select = r3.field.token_depth_select;
5201 	r10.field.ignore_depth = 0;
5202 	/* TDT algorithm: DLB must be able to write CQs with depth < 4 */
5203 	r10.field.enab_shallow_cq = 1;
5204 
5205 	DLB_CSR_WR(hw,
5206 		   DLB_LSP_CQ_LDB_TKN_DEPTH_SEL(port->id),
5207 		   r10.val);
5208 
5209 	/* To support CQs with depth less than 8, program the token count
5210 	 * register with a non-zero initial value. Operations such as domain
5211 	 * reset must take this initial value into account when quiescing the
5212 	 * CQ.
5213 	 */
5214 	port->init_tkn_cnt = 0;
5215 
5216 	if (args->cq_depth < 8) {
5217 		union dlb_lsp_cq_ldb_tkn_cnt r12 = { {0} };
5218 
5219 		port->init_tkn_cnt = 8 - args->cq_depth;
5220 
5221 		r12.field.token_count = port->init_tkn_cnt;
5222 
5223 		DLB_CSR_WR(hw,
5224 			   DLB_LSP_CQ_LDB_TKN_CNT(port->id),
5225 			   r12.val);
5226 	}
5227 
5228 	r4.field.limit = port->hist_list_entry_limit - 1;
5229 
5230 	DLB_CSR_WR(hw, DLB_CHP_HIST_LIST_LIM(port->id), r4.val);
5231 
5232 	r5.field.base = port->hist_list_entry_base;
5233 
5234 	DLB_CSR_WR(hw, DLB_CHP_HIST_LIST_BASE(port->id), r5.val);
5235 
5236 	r8.field.push_ptr = r5.field.base;
5237 	r8.field.generation = 0;
5238 
5239 	DLB_CSR_WR(hw, DLB_CHP_HIST_LIST_PUSH_PTR(port->id), r8.val);
5240 
5241 	r9.field.pop_ptr = r5.field.base;
5242 	r9.field.generation = 0;
5243 
5244 	DLB_CSR_WR(hw, DLB_CHP_HIST_LIST_POP_PTR(port->id), r9.val);
5245 
5246 	/* The inflight limit sets a cap on the number of QEs for which this CQ
5247 	 * can owe completions at one time.
5248 	 */
5249 	r6.field.limit = args->cq_history_list_size;
5250 
5251 	DLB_CSR_WR(hw, DLB_LSP_CQ_LDB_INFL_LIM(port->id), r6.val);
5252 
5253 	/* Disable the port's QID mappings */
5254 	r7.field.v = 0;
5255 
5256 	DLB_CSR_WR(hw, DLB_LSP_CQ2PRIOV(port->id), r7.val);
5257 
5258 	/* Two cache lines (128B) are dedicated for the port's pop counts */
5259 	r11.field.addr_l = pop_count_dma_base >> 7;
5260 
5261 	DLB_CSR_WR(hw, DLB_SYS_LDB_PP_ADDR_L(port->id), r11.val);
5262 
5263 	r12.field.addr_u = pop_count_dma_base >> 32;
5264 
5265 	DLB_CSR_WR(hw, DLB_SYS_LDB_PP_ADDR_U(port->id), r12.val);
5266 
5267 	for (i = 0; i < DLB_MAX_NUM_QIDS_PER_LDB_CQ; i++)
5268 		port->qid_map[i].state = DLB_QUEUE_UNMAPPED;
5269 
5270 	return 0;
5271 }
5272 
dlb_update_ldb_arb_threshold(struct dlb_hw * hw)5273 static void dlb_update_ldb_arb_threshold(struct dlb_hw *hw)
5274 {
5275 	union dlb_lsp_ctrl_config_0 r0 = { {0} };
5276 
5277 	/* From the hardware spec:
5278 	 * "The optimal value for ldb_arb_threshold is in the region of {8 *
5279 	 * #CQs}. It is expected therefore that the PF will change this value
5280 	 * dynamically as the number of active ports changes."
5281 	 */
5282 	r0.val = DLB_CSR_RD(hw, DLB_LSP_CTRL_CONFIG_0);
5283 
5284 	r0.field.ldb_arb_threshold = hw->pf.num_enabled_ldb_ports * 8;
5285 	r0.field.ldb_arb_ignore_empty = 1;
5286 	r0.field.ldb_arb_mode = 1;
5287 
5288 	DLB_CSR_WR(hw, DLB_LSP_CTRL_CONFIG_0, r0.val);
5289 
5290 	dlb_flush_csr(hw);
5291 }
5292 
dlb_configure_ldb_port(struct dlb_hw * hw,struct dlb_domain * domain,struct dlb_ldb_port * port,u64 pop_count_dma_base,u64 cq_dma_base,struct dlb_create_ldb_port_args * args)5293 static int dlb_configure_ldb_port(struct dlb_hw *hw,
5294 				  struct dlb_domain *domain,
5295 				  struct dlb_ldb_port *port,
5296 				  u64 pop_count_dma_base,
5297 				  u64 cq_dma_base,
5298 				  struct dlb_create_ldb_port_args *args)
5299 {
5300 	struct dlb_credit_pool *ldb_pool, *dir_pool;
5301 	int ret;
5302 
5303 	port->hist_list_entry_base = domain->hist_list_entry_base +
5304 				     domain->hist_list_entry_offset;
5305 	port->hist_list_entry_limit = port->hist_list_entry_base +
5306 				      args->cq_history_list_size;
5307 
5308 	domain->hist_list_entry_offset += args->cq_history_list_size;
5309 	domain->avail_hist_list_entries -= args->cq_history_list_size;
5310 
5311 	port->ldb_pool_used = !dlb_list_empty(&domain->used_ldb_queues) ||
5312 			      !dlb_list_empty(&domain->avail_ldb_queues);
5313 	port->dir_pool_used = !dlb_list_empty(&domain->used_dir_pq_pairs) ||
5314 			      !dlb_list_empty(&domain->avail_dir_pq_pairs);
5315 
5316 	if (port->ldb_pool_used) {
5317 		u32 cnt = args->ldb_credit_high_watermark;
5318 
5319 		ldb_pool = dlb_get_domain_ldb_pool(args->ldb_credit_pool_id,
5320 						   domain);
5321 		if (ldb_pool == NULL) {
5322 			DLB_HW_ERR(hw,
5323 				   "[%s()] Internal error: port validation failed\n",
5324 				   __func__);
5325 			return -EFAULT;
5326 		}
5327 
5328 		dlb_ldb_pool_update_credit_count(hw, ldb_pool->id, cnt);
5329 	} else {
5330 		args->ldb_credit_high_watermark = 0;
5331 		args->ldb_credit_low_watermark = 0;
5332 		args->ldb_credit_quantum = 0;
5333 	}
5334 
5335 	if (port->dir_pool_used) {
5336 		u32 cnt = args->dir_credit_high_watermark;
5337 
5338 		dir_pool = dlb_get_domain_dir_pool(args->dir_credit_pool_id,
5339 						   domain);
5340 		if (dir_pool == NULL) {
5341 			DLB_HW_ERR(hw,
5342 				   "[%s()] Internal error: port validation failed\n",
5343 				   __func__);
5344 			return -EFAULT;
5345 		}
5346 
5347 		dlb_dir_pool_update_credit_count(hw, dir_pool->id, cnt);
5348 	} else {
5349 		args->dir_credit_high_watermark = 0;
5350 		args->dir_credit_low_watermark = 0;
5351 		args->dir_credit_quantum = 0;
5352 	}
5353 
5354 	ret = dlb_ldb_port_configure_cq(hw,
5355 					port,
5356 					pop_count_dma_base,
5357 					cq_dma_base,
5358 					args);
5359 	if (ret < 0)
5360 		return ret;
5361 
5362 	ret = dlb_ldb_port_configure_pp(hw, domain, port, args);
5363 	if (ret < 0)
5364 		return ret;
5365 
5366 	dlb_ldb_port_cq_enable(hw, port);
5367 
5368 	port->num_mappings = 0;
5369 
5370 	port->enabled = true;
5371 
5372 	hw->pf.num_enabled_ldb_ports++;
5373 
5374 	dlb_update_ldb_arb_threshold(hw);
5375 
5376 	port->configured = true;
5377 
5378 	return 0;
5379 }
5380 
5381 /**
5382  * dlb_hw_create_ldb_port() - Allocate and initialize a load-balanced port and
5383  *	its resources.
5384  * @hw:	  Contains the current state of the DLB hardware.
5385  * @args: User-provided arguments.
5386  * @resp: Response to user.
5387  *
5388  * Return: returns < 0 on error, 0 otherwise. If the driver is unable to
5389  * satisfy a request, resp->status will be set accordingly.
5390  */
dlb_hw_create_ldb_port(struct dlb_hw * hw,u32 domain_id,struct dlb_create_ldb_port_args * args,u64 pop_count_dma_base,u64 cq_dma_base,struct dlb_cmd_response * resp)5391 int dlb_hw_create_ldb_port(struct dlb_hw *hw,
5392 			   u32 domain_id,
5393 			   struct dlb_create_ldb_port_args *args,
5394 			   u64 pop_count_dma_base,
5395 			   u64 cq_dma_base,
5396 			   struct dlb_cmd_response *resp)
5397 {
5398 	struct dlb_ldb_port *port;
5399 	struct dlb_domain *domain;
5400 	int ret;
5401 
5402 	dlb_log_create_ldb_port_args(hw,
5403 				     domain_id,
5404 				     pop_count_dma_base,
5405 				     cq_dma_base,
5406 				     args);
5407 
5408 	/* Verify that hardware resources are available before attempting to
5409 	 * satisfy the request. This simplifies the error unwinding code.
5410 	 */
5411 	if (dlb_verify_create_ldb_port_args(hw,
5412 					    domain_id,
5413 					    pop_count_dma_base,
5414 					    cq_dma_base,
5415 					    args,
5416 					    resp))
5417 		return -EINVAL;
5418 
5419 	domain = dlb_get_domain_from_id(hw, domain_id);
5420 	if (domain == NULL) {
5421 		DLB_HW_ERR(hw,
5422 			   "[%s():%d] Internal error: domain not found\n",
5423 			   __func__, __LINE__);
5424 		return -EFAULT;
5425 	}
5426 
5427 	port = DLB_DOM_LIST_HEAD(domain->avail_ldb_ports, typeof(*port));
5428 
5429 	/* Verification should catch this. */
5430 	if (port == NULL) {
5431 		DLB_HW_ERR(hw,
5432 			   "[%s():%d] Internal error: no available ldb ports\n",
5433 			   __func__, __LINE__);
5434 		return -EFAULT;
5435 	}
5436 
5437 	if (port->configured) {
5438 		DLB_HW_ERR(hw,
5439 			   "[%s()] Internal error: avail_ldb_ports contains configured ports.\n",
5440 			   __func__);
5441 		return -EFAULT;
5442 	}
5443 
5444 	ret = dlb_configure_ldb_port(hw,
5445 				     domain,
5446 				     port,
5447 				     pop_count_dma_base,
5448 				     cq_dma_base,
5449 				     args);
5450 	if (ret < 0)
5451 		return ret;
5452 
5453 	/* Configuration succeeded, so move the resource from the 'avail' to
5454 	 * the 'used' list.
5455 	 */
5456 	dlb_list_del(&domain->avail_ldb_ports, &port->domain_list);
5457 
5458 	dlb_list_add(&domain->used_ldb_ports, &port->domain_list);
5459 
5460 	resp->status = 0;
5461 	resp->id = port->id;
5462 
5463 	return 0;
5464 }
5465 
dlb_log_create_dir_port_args(struct dlb_hw * hw,u32 domain_id,u64 pop_count_dma_base,u64 cq_dma_base,struct dlb_create_dir_port_args * args)5466 static void dlb_log_create_dir_port_args(struct dlb_hw *hw,
5467 					 u32 domain_id,
5468 					 u64 pop_count_dma_base,
5469 					 u64 cq_dma_base,
5470 					 struct dlb_create_dir_port_args *args)
5471 {
5472 	DLB_HW_INFO(hw, "DLB create directed port arguments:\n");
5473 	DLB_HW_INFO(hw, "\tDomain ID:                 %d\n",
5474 		    domain_id);
5475 	DLB_HW_INFO(hw, "\tLDB credit pool ID:        %d\n",
5476 		    args->ldb_credit_pool_id);
5477 	DLB_HW_INFO(hw, "\tLDB credit high watermark: %d\n",
5478 		    args->ldb_credit_high_watermark);
5479 	DLB_HW_INFO(hw, "\tLDB credit low watermark:  %d\n",
5480 		    args->ldb_credit_low_watermark);
5481 	DLB_HW_INFO(hw, "\tLDB credit quantum:        %d\n",
5482 		    args->ldb_credit_quantum);
5483 	DLB_HW_INFO(hw, "\tDIR credit pool ID:        %d\n",
5484 		    args->dir_credit_pool_id);
5485 	DLB_HW_INFO(hw, "\tDIR credit high watermark: %d\n",
5486 		    args->dir_credit_high_watermark);
5487 	DLB_HW_INFO(hw, "\tDIR credit low watermark:  %d\n",
5488 		    args->dir_credit_low_watermark);
5489 	DLB_HW_INFO(hw, "\tDIR credit quantum:        %d\n",
5490 		    args->dir_credit_quantum);
5491 	DLB_HW_INFO(hw, "\tpop_count_address:         0x%"PRIx64"\n",
5492 		    pop_count_dma_base);
5493 	DLB_HW_INFO(hw, "\tCQ depth:                  %d\n",
5494 		    args->cq_depth);
5495 	DLB_HW_INFO(hw, "\tCQ base address:           0x%"PRIx64"\n",
5496 		    cq_dma_base);
5497 }
5498 
5499 static int
dlb_verify_create_dir_port_args(struct dlb_hw * hw,u32 domain_id,u64 pop_count_dma_base,u64 cq_dma_base,struct dlb_create_dir_port_args * args,struct dlb_cmd_response * resp)5500 dlb_verify_create_dir_port_args(struct dlb_hw *hw,
5501 				u32 domain_id,
5502 				u64 pop_count_dma_base,
5503 				u64 cq_dma_base,
5504 				struct dlb_create_dir_port_args *args,
5505 				struct dlb_cmd_response *resp)
5506 {
5507 	struct dlb_domain *domain;
5508 	struct dlb_credit_pool *pool;
5509 
5510 	domain = dlb_get_domain_from_id(hw, domain_id);
5511 
5512 	if (domain == NULL) {
5513 		resp->status = DLB_ST_INVALID_DOMAIN_ID;
5514 		return -1;
5515 	}
5516 
5517 	if (!domain->configured) {
5518 		resp->status = DLB_ST_DOMAIN_NOT_CONFIGURED;
5519 		return -1;
5520 	}
5521 
5522 	if (domain->started) {
5523 		resp->status = DLB_ST_DOMAIN_STARTED;
5524 		return -1;
5525 	}
5526 
5527 	/* If the user claims the queue is already configured, validate
5528 	 * the queue ID, its domain, and whether the queue is configured.
5529 	 */
5530 	if (args->queue_id != -1) {
5531 		struct dlb_dir_pq_pair *queue;
5532 
5533 		queue = dlb_get_domain_used_dir_pq(args->queue_id,
5534 						   domain);
5535 
5536 		if (queue  == NULL || queue->domain_id != domain->id ||
5537 		    !queue->queue_configured) {
5538 			resp->status = DLB_ST_INVALID_DIR_QUEUE_ID;
5539 			return -1;
5540 		}
5541 	}
5542 
5543 	/* If the port's queue is not configured, validate that a free
5544 	 * port-queue pair is available.
5545 	 */
5546 	if (args->queue_id == -1 &&
5547 	    dlb_list_empty(&domain->avail_dir_pq_pairs)) {
5548 		resp->status = DLB_ST_DIR_PORTS_UNAVAILABLE;
5549 		return -1;
5550 	}
5551 
5552 	/* If the scheduling domain has no LDB queues, we configure the
5553 	 * hardware to not supply the port with any LDB credits. In that
5554 	 * case, ignore the LDB credit arguments.
5555 	 */
5556 	if (!dlb_list_empty(&domain->used_ldb_queues) ||
5557 	    !dlb_list_empty(&domain->avail_ldb_queues)) {
5558 		pool = dlb_get_domain_ldb_pool(args->ldb_credit_pool_id,
5559 					       domain);
5560 
5561 		if (pool  == NULL || !pool->configured ||
5562 		    pool->domain_id != domain->id) {
5563 			resp->status = DLB_ST_INVALID_LDB_CREDIT_POOL_ID;
5564 			return -1;
5565 		}
5566 
5567 		if (args->ldb_credit_high_watermark > pool->avail_credits) {
5568 			resp->status = DLB_ST_LDB_CREDITS_UNAVAILABLE;
5569 			return -1;
5570 		}
5571 
5572 		if (args->ldb_credit_low_watermark >=
5573 		    args->ldb_credit_high_watermark) {
5574 			resp->status = DLB_ST_INVALID_LDB_CREDIT_LOW_WATERMARK;
5575 			return -1;
5576 		}
5577 
5578 		if (args->ldb_credit_quantum >=
5579 		    args->ldb_credit_high_watermark) {
5580 			resp->status = DLB_ST_INVALID_LDB_CREDIT_QUANTUM;
5581 			return -1;
5582 		}
5583 
5584 		if (args->ldb_credit_quantum > DLB_MAX_PORT_CREDIT_QUANTUM) {
5585 			resp->status = DLB_ST_INVALID_LDB_CREDIT_QUANTUM;
5586 			return -1;
5587 		}
5588 	}
5589 
5590 	pool = dlb_get_domain_dir_pool(args->dir_credit_pool_id,
5591 				       domain);
5592 
5593 	if (pool  == NULL || !pool->configured ||
5594 	    pool->domain_id != domain->id) {
5595 		resp->status = DLB_ST_INVALID_DIR_CREDIT_POOL_ID;
5596 		return -1;
5597 	}
5598 
5599 	if (args->dir_credit_high_watermark > pool->avail_credits) {
5600 		resp->status = DLB_ST_DIR_CREDITS_UNAVAILABLE;
5601 		return -1;
5602 	}
5603 
5604 	if (args->dir_credit_low_watermark >= args->dir_credit_high_watermark) {
5605 		resp->status = DLB_ST_INVALID_DIR_CREDIT_LOW_WATERMARK;
5606 		return -1;
5607 	}
5608 
5609 	if (args->dir_credit_quantum >= args->dir_credit_high_watermark) {
5610 		resp->status = DLB_ST_INVALID_DIR_CREDIT_QUANTUM;
5611 		return -1;
5612 	}
5613 
5614 	if (args->dir_credit_quantum > DLB_MAX_PORT_CREDIT_QUANTUM) {
5615 		resp->status = DLB_ST_INVALID_DIR_CREDIT_QUANTUM;
5616 		return -1;
5617 	}
5618 
5619 	/* Check cache-line alignment */
5620 	if ((pop_count_dma_base & 0x3F) != 0) {
5621 		resp->status = DLB_ST_INVALID_POP_COUNT_VIRT_ADDR;
5622 		return -1;
5623 	}
5624 
5625 	if ((cq_dma_base & 0x3F) != 0) {
5626 		resp->status = DLB_ST_INVALID_CQ_VIRT_ADDR;
5627 		return -1;
5628 	}
5629 
5630 	if (args->cq_depth != 8 &&
5631 	    args->cq_depth != 16 &&
5632 	    args->cq_depth != 32 &&
5633 	    args->cq_depth != 64 &&
5634 	    args->cq_depth != 128 &&
5635 	    args->cq_depth != 256 &&
5636 	    args->cq_depth != 512 &&
5637 	    args->cq_depth != 1024) {
5638 		resp->status = DLB_ST_INVALID_CQ_DEPTH;
5639 		return -1;
5640 	}
5641 
5642 	return 0;
5643 }
5644 
dlb_dir_port_configure_pp(struct dlb_hw * hw,struct dlb_domain * domain,struct dlb_dir_pq_pair * port,struct dlb_create_dir_port_args * args)5645 static int dlb_dir_port_configure_pp(struct dlb_hw *hw,
5646 				     struct dlb_domain *domain,
5647 				     struct dlb_dir_pq_pair *port,
5648 				     struct dlb_create_dir_port_args *args)
5649 {
5650 	union dlb_sys_dir_pp2ldbpool r0 = { {0} };
5651 	union dlb_sys_dir_pp2dirpool r1 = { {0} };
5652 	union dlb_sys_dir_pp2vf_pf r2 = { {0} };
5653 	union dlb_sys_dir_pp2vas r3 = { {0} };
5654 	union dlb_sys_dir_pp_v r4 = { {0} };
5655 	union dlb_chp_dir_pp_ldb_crd_hwm r6 = { {0} };
5656 	union dlb_chp_dir_pp_dir_crd_hwm r7 = { {0} };
5657 	union dlb_chp_dir_pp_ldb_crd_lwm r8 = { {0} };
5658 	union dlb_chp_dir_pp_dir_crd_lwm r9 = { {0} };
5659 	union dlb_chp_dir_pp_ldb_min_crd_qnt r10 = { {0} };
5660 	union dlb_chp_dir_pp_dir_min_crd_qnt r11 = { {0} };
5661 	union dlb_chp_dir_pp_ldb_crd_cnt r12 = { {0} };
5662 	union dlb_chp_dir_pp_dir_crd_cnt r13 = { {0} };
5663 	union dlb_chp_dir_ldb_pp2pool r14 = { {0} };
5664 	union dlb_chp_dir_dir_pp2pool r15 = { {0} };
5665 	union dlb_chp_dir_pp_crd_req_state r16 = { {0} };
5666 	union dlb_chp_dir_pp_ldb_push_ptr r17 = { {0} };
5667 	union dlb_chp_dir_pp_dir_push_ptr r18 = { {0} };
5668 
5669 	struct dlb_credit_pool *ldb_pool = NULL;
5670 	struct dlb_credit_pool *dir_pool = NULL;
5671 
5672 	if (port->ldb_pool_used) {
5673 		ldb_pool = dlb_get_domain_ldb_pool(args->ldb_credit_pool_id,
5674 						   domain);
5675 		if (ldb_pool == NULL) {
5676 			DLB_HW_ERR(hw,
5677 				   "[%s()] Internal error: port validation failed\n",
5678 				   __func__);
5679 			return -EFAULT;
5680 		}
5681 	}
5682 
5683 	if (port->dir_pool_used) {
5684 		dir_pool = dlb_get_domain_dir_pool(args->dir_credit_pool_id,
5685 						   domain);
5686 		if (dir_pool == NULL) {
5687 			DLB_HW_ERR(hw,
5688 				   "[%s()] Internal error: port validation failed\n",
5689 				   __func__);
5690 			return -EFAULT;
5691 		}
5692 	}
5693 
5694 	r0.field.ldbpool = (port->ldb_pool_used) ? ldb_pool->id : 0;
5695 
5696 	DLB_CSR_WR(hw,
5697 		   DLB_SYS_DIR_PP2LDBPOOL(port->id),
5698 		   r0.val);
5699 
5700 	r1.field.dirpool = (port->dir_pool_used) ? dir_pool->id : 0;
5701 
5702 	DLB_CSR_WR(hw,
5703 		   DLB_SYS_DIR_PP2DIRPOOL(port->id),
5704 		   r1.val);
5705 
5706 	r2.field.is_pf = 1;
5707 	r2.field.is_hw_dsi = 0;
5708 
5709 	DLB_CSR_WR(hw,
5710 		   DLB_SYS_DIR_PP2VF_PF(port->id),
5711 		   r2.val);
5712 
5713 	r3.field.vas = domain->id;
5714 
5715 	DLB_CSR_WR(hw,
5716 		   DLB_SYS_DIR_PP2VAS(port->id),
5717 		   r3.val);
5718 
5719 	r6.field.hwm = args->ldb_credit_high_watermark;
5720 
5721 	DLB_CSR_WR(hw,
5722 		   DLB_CHP_DIR_PP_LDB_CRD_HWM(port->id),
5723 		   r6.val);
5724 
5725 	r7.field.hwm = args->dir_credit_high_watermark;
5726 
5727 	DLB_CSR_WR(hw,
5728 		   DLB_CHP_DIR_PP_DIR_CRD_HWM(port->id),
5729 		   r7.val);
5730 
5731 	r8.field.lwm = args->ldb_credit_low_watermark;
5732 
5733 	DLB_CSR_WR(hw,
5734 		   DLB_CHP_DIR_PP_LDB_CRD_LWM(port->id),
5735 		   r8.val);
5736 
5737 	r9.field.lwm = args->dir_credit_low_watermark;
5738 
5739 	DLB_CSR_WR(hw,
5740 		   DLB_CHP_DIR_PP_DIR_CRD_LWM(port->id),
5741 		   r9.val);
5742 
5743 	r10.field.quanta = args->ldb_credit_quantum;
5744 
5745 	DLB_CSR_WR(hw,
5746 		   DLB_CHP_DIR_PP_LDB_MIN_CRD_QNT(port->id),
5747 		   r10.val);
5748 
5749 	r11.field.quanta = args->dir_credit_quantum;
5750 
5751 	DLB_CSR_WR(hw,
5752 		   DLB_CHP_DIR_PP_DIR_MIN_CRD_QNT(port->id),
5753 		   r11.val);
5754 
5755 	r12.field.count = args->ldb_credit_high_watermark;
5756 
5757 	DLB_CSR_WR(hw,
5758 		   DLB_CHP_DIR_PP_LDB_CRD_CNT(port->id),
5759 		   r12.val);
5760 
5761 	r13.field.count = args->dir_credit_high_watermark;
5762 
5763 	DLB_CSR_WR(hw,
5764 		   DLB_CHP_DIR_PP_DIR_CRD_CNT(port->id),
5765 		   r13.val);
5766 
5767 	r14.field.pool = (port->ldb_pool_used) ? ldb_pool->id : 0;
5768 
5769 	DLB_CSR_WR(hw,
5770 		   DLB_CHP_DIR_LDB_PP2POOL(port->id),
5771 		   r14.val);
5772 
5773 	r15.field.pool = (port->dir_pool_used) ? dir_pool->id : 0;
5774 
5775 	DLB_CSR_WR(hw,
5776 		   DLB_CHP_DIR_DIR_PP2POOL(port->id),
5777 		   r15.val);
5778 
5779 	r16.field.no_pp_credit_update = 0;
5780 
5781 	DLB_CSR_WR(hw,
5782 		   DLB_CHP_DIR_PP_CRD_REQ_STATE(port->id),
5783 		   r16.val);
5784 
5785 	r17.field.push_pointer = 0;
5786 
5787 	DLB_CSR_WR(hw,
5788 		   DLB_CHP_DIR_PP_LDB_PUSH_PTR(port->id),
5789 		   r17.val);
5790 
5791 	r18.field.push_pointer = 0;
5792 
5793 	DLB_CSR_WR(hw,
5794 		   DLB_CHP_DIR_PP_DIR_PUSH_PTR(port->id),
5795 		   r18.val);
5796 
5797 	r4.field.pp_v = 1;
5798 	r4.field.mb_dm = 0;
5799 
5800 	DLB_CSR_WR(hw, DLB_SYS_DIR_PP_V(port->id), r4.val);
5801 
5802 	return 0;
5803 }
5804 
dlb_dir_port_configure_cq(struct dlb_hw * hw,struct dlb_dir_pq_pair * port,u64 pop_count_dma_base,u64 cq_dma_base,struct dlb_create_dir_port_args * args)5805 static int dlb_dir_port_configure_cq(struct dlb_hw *hw,
5806 				     struct dlb_dir_pq_pair *port,
5807 				     u64 pop_count_dma_base,
5808 				     u64 cq_dma_base,
5809 				     struct dlb_create_dir_port_args *args)
5810 {
5811 	union dlb_sys_dir_cq_addr_l r0 = { {0} };
5812 	union dlb_sys_dir_cq_addr_u r1 = { {0} };
5813 	union dlb_sys_dir_cq2vf_pf r2 = { {0} };
5814 	union dlb_chp_dir_cq_tkn_depth_sel r3 = { {0} };
5815 	union dlb_lsp_cq_dir_tkn_depth_sel_dsi r4 = { {0} };
5816 	union dlb_sys_dir_pp_addr_l r5 = { {0} };
5817 	union dlb_sys_dir_pp_addr_u r6 = { {0} };
5818 
5819 	/* The CQ address is 64B-aligned, and the DLB only wants bits [63:6] */
5820 	r0.field.addr_l = cq_dma_base >> 6;
5821 
5822 	DLB_CSR_WR(hw, DLB_SYS_DIR_CQ_ADDR_L(port->id), r0.val);
5823 
5824 	r1.field.addr_u = cq_dma_base >> 32;
5825 
5826 	DLB_CSR_WR(hw, DLB_SYS_DIR_CQ_ADDR_U(port->id), r1.val);
5827 
5828 	r2.field.is_pf = 1;
5829 
5830 	DLB_CSR_WR(hw, DLB_SYS_DIR_CQ2VF_PF(port->id), r2.val);
5831 
5832 	if (args->cq_depth == 8) {
5833 		r3.field.token_depth_select = 1;
5834 	} else if (args->cq_depth == 16) {
5835 		r3.field.token_depth_select = 2;
5836 	} else if (args->cq_depth == 32) {
5837 		r3.field.token_depth_select = 3;
5838 	} else if (args->cq_depth == 64) {
5839 		r3.field.token_depth_select = 4;
5840 	} else if (args->cq_depth == 128) {
5841 		r3.field.token_depth_select = 5;
5842 	} else if (args->cq_depth == 256) {
5843 		r3.field.token_depth_select = 6;
5844 	} else if (args->cq_depth == 512) {
5845 		r3.field.token_depth_select = 7;
5846 	} else if (args->cq_depth == 1024) {
5847 		r3.field.token_depth_select = 8;
5848 	} else {
5849 		DLB_HW_ERR(hw, "[%s():%d] Internal error: invalid CQ depth\n",
5850 			   __func__, __LINE__);
5851 		return -EFAULT;
5852 	}
5853 
5854 	DLB_CSR_WR(hw,
5855 		   DLB_CHP_DIR_CQ_TKN_DEPTH_SEL(port->id),
5856 		   r3.val);
5857 
5858 	r4.field.token_depth_select = r3.field.token_depth_select;
5859 	r4.field.disable_wb_opt = 0;
5860 
5861 	DLB_CSR_WR(hw,
5862 		   DLB_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI(port->id),
5863 		   r4.val);
5864 
5865 	/* Two cache lines (128B) are dedicated for the port's pop counts */
5866 	r5.field.addr_l = pop_count_dma_base >> 7;
5867 
5868 	DLB_CSR_WR(hw, DLB_SYS_DIR_PP_ADDR_L(port->id), r5.val);
5869 
5870 	r6.field.addr_u = pop_count_dma_base >> 32;
5871 
5872 	DLB_CSR_WR(hw, DLB_SYS_DIR_PP_ADDR_U(port->id), r6.val);
5873 
5874 	return 0;
5875 }
5876 
dlb_configure_dir_port(struct dlb_hw * hw,struct dlb_domain * domain,struct dlb_dir_pq_pair * port,u64 pop_count_dma_base,u64 cq_dma_base,struct dlb_create_dir_port_args * args)5877 static int dlb_configure_dir_port(struct dlb_hw *hw,
5878 				  struct dlb_domain *domain,
5879 				  struct dlb_dir_pq_pair *port,
5880 				  u64 pop_count_dma_base,
5881 				  u64 cq_dma_base,
5882 				  struct dlb_create_dir_port_args *args)
5883 {
5884 	struct dlb_credit_pool *ldb_pool, *dir_pool;
5885 	int ret;
5886 
5887 	port->ldb_pool_used = !dlb_list_empty(&domain->used_ldb_queues) ||
5888 			      !dlb_list_empty(&domain->avail_ldb_queues);
5889 
5890 	/* Each directed port has a directed queue, hence this port requires
5891 	 * directed credits.
5892 	 */
5893 	port->dir_pool_used = true;
5894 
5895 	if (port->ldb_pool_used) {
5896 		u32 cnt = args->ldb_credit_high_watermark;
5897 
5898 		ldb_pool = dlb_get_domain_ldb_pool(args->ldb_credit_pool_id,
5899 						   domain);
5900 		if (ldb_pool == NULL) {
5901 			DLB_HW_ERR(hw,
5902 				   "[%s()] Internal error: port validation failed\n",
5903 				   __func__);
5904 			return -EFAULT;
5905 		}
5906 
5907 		dlb_ldb_pool_update_credit_count(hw, ldb_pool->id, cnt);
5908 	} else {
5909 		args->ldb_credit_high_watermark = 0;
5910 		args->ldb_credit_low_watermark = 0;
5911 		args->ldb_credit_quantum = 0;
5912 	}
5913 
5914 	dir_pool = dlb_get_domain_dir_pool(args->dir_credit_pool_id, domain);
5915 	if (dir_pool == NULL) {
5916 		DLB_HW_ERR(hw,
5917 			   "[%s()] Internal error: port validation failed\n",
5918 			   __func__);
5919 		return -EFAULT;
5920 	}
5921 
5922 	dlb_dir_pool_update_credit_count(hw,
5923 					 dir_pool->id,
5924 					 args->dir_credit_high_watermark);
5925 
5926 	ret = dlb_dir_port_configure_cq(hw,
5927 					port,
5928 					pop_count_dma_base,
5929 					cq_dma_base,
5930 					args);
5931 
5932 	if (ret < 0)
5933 		return ret;
5934 
5935 	ret = dlb_dir_port_configure_pp(hw, domain, port, args);
5936 	if (ret < 0)
5937 		return ret;
5938 
5939 	dlb_dir_port_cq_enable(hw, port);
5940 
5941 	port->enabled = true;
5942 
5943 	port->port_configured = true;
5944 
5945 	return 0;
5946 }
5947 
5948 /**
5949  * dlb_hw_create_dir_port() - Allocate and initialize a DLB directed port and
5950  *	queue. The port/queue pair have the same ID and name.
5951  * @hw:	  Contains the current state of the DLB hardware.
5952  * @args: User-provided arguments.
5953  * @resp: Response to user.
5954  *
5955  * Return: returns < 0 on error, 0 otherwise. If the driver is unable to
5956  * satisfy a request, resp->status will be set accordingly.
5957  */
dlb_hw_create_dir_port(struct dlb_hw * hw,u32 domain_id,struct dlb_create_dir_port_args * args,u64 pop_count_dma_base,u64 cq_dma_base,struct dlb_cmd_response * resp)5958 int dlb_hw_create_dir_port(struct dlb_hw *hw,
5959 			   u32 domain_id,
5960 			   struct dlb_create_dir_port_args *args,
5961 			   u64 pop_count_dma_base,
5962 			   u64 cq_dma_base,
5963 			   struct dlb_cmd_response *resp)
5964 {
5965 	struct dlb_dir_pq_pair *port;
5966 	struct dlb_domain *domain;
5967 	int ret;
5968 
5969 	dlb_log_create_dir_port_args(hw,
5970 				     domain_id,
5971 				     pop_count_dma_base,
5972 				     cq_dma_base,
5973 				     args);
5974 
5975 	/* Verify that hardware resources are available before attempting to
5976 	 * satisfy the request. This simplifies the error unwinding code.
5977 	 */
5978 	if (dlb_verify_create_dir_port_args(hw,
5979 					    domain_id,
5980 					    pop_count_dma_base,
5981 					    cq_dma_base,
5982 					    args,
5983 					    resp))
5984 		return -EINVAL;
5985 
5986 	domain = dlb_get_domain_from_id(hw, domain_id);
5987 	if (domain == NULL) {
5988 		DLB_HW_ERR(hw,
5989 			   "[%s():%d] Internal error: domain not found\n",
5990 			   __func__, __LINE__);
5991 		return -EFAULT;
5992 	}
5993 
5994 	if (args->queue_id != -1)
5995 		port = dlb_get_domain_used_dir_pq(args->queue_id,
5996 						  domain);
5997 	else
5998 		port = DLB_DOM_LIST_HEAD(domain->avail_dir_pq_pairs,
5999 					 typeof(*port));
6000 
6001 	/* Verification should catch this. */
6002 	if (port == NULL) {
6003 		DLB_HW_ERR(hw,
6004 			   "[%s():%d] Internal error: no available dir ports\n",
6005 			   __func__, __LINE__);
6006 		return -EFAULT;
6007 	}
6008 
6009 	ret = dlb_configure_dir_port(hw,
6010 				     domain,
6011 				     port,
6012 				     pop_count_dma_base,
6013 				     cq_dma_base,
6014 				     args);
6015 	if (ret < 0)
6016 		return ret;
6017 
6018 	/* Configuration succeeded, so move the resource from the 'avail' to
6019 	 * the 'used' list (if it's not already there).
6020 	 */
6021 	if (args->queue_id == -1) {
6022 		dlb_list_del(&domain->avail_dir_pq_pairs, &port->domain_list);
6023 
6024 		dlb_list_add(&domain->used_dir_pq_pairs, &port->domain_list);
6025 	}
6026 
6027 	resp->status = 0;
6028 	resp->id = port->id;
6029 
6030 	return 0;
6031 }
6032 
6033 static struct dlb_ldb_port *
dlb_get_domain_used_ldb_port(u32 id,struct dlb_domain * domain)6034 dlb_get_domain_used_ldb_port(u32 id, struct dlb_domain *domain)
6035 {
6036 	struct dlb_list_entry *iter;
6037 	struct dlb_ldb_port *port;
6038 	RTE_SET_USED(iter);
6039 
6040 	if (id >= DLB_MAX_NUM_LDB_PORTS)
6041 		return NULL;
6042 
6043 	DLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter)
6044 		if (port->id == id)
6045 			return port;
6046 
6047 	DLB_DOM_LIST_FOR(domain->avail_ldb_ports, port, iter)
6048 		if (port->id == id)
6049 			return port;
6050 
6051 	return NULL;
6052 }
6053 
6054 static void
dlb_log_pending_port_unmaps_args(struct dlb_hw * hw,struct dlb_pending_port_unmaps_args * args)6055 dlb_log_pending_port_unmaps_args(struct dlb_hw *hw,
6056 				 struct dlb_pending_port_unmaps_args *args)
6057 {
6058 	DLB_HW_INFO(hw, "DLB pending port unmaps arguments:\n");
6059 	DLB_HW_INFO(hw, "\tPort ID: %d\n", args->port_id);
6060 }
6061 
dlb_hw_pending_port_unmaps(struct dlb_hw * hw,u32 domain_id,struct dlb_pending_port_unmaps_args * args,struct dlb_cmd_response * resp)6062 int dlb_hw_pending_port_unmaps(struct dlb_hw *hw,
6063 			       u32 domain_id,
6064 			       struct dlb_pending_port_unmaps_args *args,
6065 			       struct dlb_cmd_response *resp)
6066 {
6067 	struct dlb_domain *domain;
6068 	struct dlb_ldb_port *port;
6069 
6070 	dlb_log_pending_port_unmaps_args(hw, args);
6071 
6072 	domain = dlb_get_domain_from_id(hw, domain_id);
6073 
6074 	if (domain == NULL) {
6075 		resp->status = DLB_ST_INVALID_DOMAIN_ID;
6076 		return -EINVAL;
6077 	}
6078 
6079 	port = dlb_get_domain_used_ldb_port(args->port_id, domain);
6080 	if (port == NULL || !port->configured) {
6081 		resp->status = DLB_ST_INVALID_PORT_ID;
6082 		return -EINVAL;
6083 	}
6084 
6085 	resp->id = port->num_pending_removals;
6086 
6087 	return 0;
6088 }
6089 
dlb_log_unmap_qid(struct dlb_hw * hw,u32 domain_id,struct dlb_unmap_qid_args * args)6090 static void dlb_log_unmap_qid(struct dlb_hw *hw,
6091 			      u32 domain_id,
6092 			      struct dlb_unmap_qid_args *args)
6093 {
6094 	DLB_HW_INFO(hw, "DLB unmap QID arguments:\n");
6095 	DLB_HW_INFO(hw, "\tDomain ID: %d\n",
6096 		    domain_id);
6097 	DLB_HW_INFO(hw, "\tPort ID:   %d\n",
6098 		    args->port_id);
6099 	DLB_HW_INFO(hw, "\tQueue ID:  %d\n",
6100 		    args->qid);
6101 	if (args->qid < DLB_MAX_NUM_LDB_QUEUES)
6102 		DLB_HW_INFO(hw, "\tQueue's num mappings:  %d\n",
6103 			    hw->rsrcs.ldb_queues[args->qid].num_mappings);
6104 }
6105 
dlb_get_domain_ldb_queue(u32 id,struct dlb_domain * domain)6106 static struct dlb_ldb_queue *dlb_get_domain_ldb_queue(u32 id,
6107 						      struct dlb_domain *domain)
6108 {
6109 	struct dlb_list_entry *iter;
6110 	struct dlb_ldb_queue *queue;
6111 	RTE_SET_USED(iter);
6112 
6113 	if (id >= DLB_MAX_NUM_LDB_QUEUES)
6114 		return NULL;
6115 
6116 	DLB_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter)
6117 		if (queue->id == id)
6118 			return queue;
6119 
6120 	return NULL;
6121 }
6122 
6123 static bool
dlb_port_find_slot_with_pending_map_queue(struct dlb_ldb_port * port,struct dlb_ldb_queue * queue,int * slot)6124 dlb_port_find_slot_with_pending_map_queue(struct dlb_ldb_port *port,
6125 					  struct dlb_ldb_queue *queue,
6126 					  int *slot)
6127 {
6128 	int i;
6129 
6130 	for (i = 0; i < DLB_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
6131 		struct dlb_ldb_port_qid_map *map = &port->qid_map[i];
6132 
6133 		if (map->state == DLB_QUEUE_UNMAP_IN_PROGRESS_PENDING_MAP &&
6134 		    map->pending_qid == queue->id)
6135 			break;
6136 	}
6137 
6138 	*slot = i;
6139 
6140 	return (i < DLB_MAX_NUM_QIDS_PER_LDB_CQ);
6141 }
6142 
dlb_verify_unmap_qid_args(struct dlb_hw * hw,u32 domain_id,struct dlb_unmap_qid_args * args,struct dlb_cmd_response * resp)6143 static int dlb_verify_unmap_qid_args(struct dlb_hw *hw,
6144 				     u32 domain_id,
6145 				     struct dlb_unmap_qid_args *args,
6146 				     struct dlb_cmd_response *resp)
6147 {
6148 	enum dlb_qid_map_state state;
6149 	struct dlb_domain *domain;
6150 	struct dlb_ldb_port *port;
6151 	struct dlb_ldb_queue *queue;
6152 	int slot;
6153 	int id;
6154 
6155 	domain = dlb_get_domain_from_id(hw, domain_id);
6156 
6157 	if (domain == NULL) {
6158 		resp->status = DLB_ST_INVALID_DOMAIN_ID;
6159 		return -1;
6160 	}
6161 
6162 	if (!domain->configured) {
6163 		resp->status = DLB_ST_DOMAIN_NOT_CONFIGURED;
6164 		return -1;
6165 	}
6166 
6167 	id = args->port_id;
6168 
6169 	port = dlb_get_domain_used_ldb_port(id, domain);
6170 
6171 	if (port == NULL || !port->configured) {
6172 		resp->status = DLB_ST_INVALID_PORT_ID;
6173 		return -1;
6174 	}
6175 
6176 	if (port->domain_id != domain->id) {
6177 		resp->status = DLB_ST_INVALID_PORT_ID;
6178 		return -1;
6179 	}
6180 
6181 	queue = dlb_get_domain_ldb_queue(args->qid, domain);
6182 
6183 	if (queue == NULL || !queue->configured) {
6184 		DLB_HW_ERR(hw, "[%s()] Can't unmap unconfigured queue %d\n",
6185 			   __func__, args->qid);
6186 		resp->status = DLB_ST_INVALID_QID;
6187 		return -1;
6188 	}
6189 
6190 	/* Verify that the port has the queue mapped. From the application's
6191 	 * perspective a queue is mapped if it is actually mapped, the map is
6192 	 * in progress, or the map is blocked pending an unmap.
6193 	 */
6194 	state = DLB_QUEUE_MAPPED;
6195 	if (dlb_port_find_slot_queue(port, state, queue, &slot))
6196 		return 0;
6197 
6198 	state = DLB_QUEUE_MAP_IN_PROGRESS;
6199 	if (dlb_port_find_slot_queue(port, state, queue, &slot))
6200 		return 0;
6201 
6202 	if (dlb_port_find_slot_with_pending_map_queue(port, queue, &slot))
6203 		return 0;
6204 
6205 	resp->status = DLB_ST_INVALID_QID;
6206 	return -1;
6207 }
6208 
dlb_hw_unmap_qid(struct dlb_hw * hw,u32 domain_id,struct dlb_unmap_qid_args * args,struct dlb_cmd_response * resp)6209 int dlb_hw_unmap_qid(struct dlb_hw *hw,
6210 		     u32 domain_id,
6211 		     struct dlb_unmap_qid_args *args,
6212 		     struct dlb_cmd_response *resp)
6213 {
6214 	enum dlb_qid_map_state state;
6215 	struct dlb_ldb_queue *queue;
6216 	struct dlb_ldb_port *port;
6217 	struct dlb_domain *domain;
6218 	bool unmap_complete;
6219 	int i, ret, id;
6220 
6221 	dlb_log_unmap_qid(hw, domain_id, args);
6222 
6223 	/* Verify that hardware resources are available before attempting to
6224 	 * satisfy the request. This simplifies the error unwinding code.
6225 	 */
6226 	if (dlb_verify_unmap_qid_args(hw, domain_id, args, resp))
6227 		return -EINVAL;
6228 
6229 	domain = dlb_get_domain_from_id(hw, domain_id);
6230 	if (domain == NULL) {
6231 		DLB_HW_ERR(hw,
6232 			   "[%s():%d] Internal error: domain not found\n",
6233 			   __func__, __LINE__);
6234 		return -EFAULT;
6235 	}
6236 
6237 	id = args->port_id;
6238 
6239 	port = dlb_get_domain_used_ldb_port(id, domain);
6240 	if (port == NULL) {
6241 		DLB_HW_ERR(hw,
6242 			   "[%s():%d] Internal error: port not found\n",
6243 			   __func__, __LINE__);
6244 		return -EFAULT;
6245 	}
6246 
6247 	queue = dlb_get_domain_ldb_queue(args->qid, domain);
6248 	if (queue == NULL) {
6249 		DLB_HW_ERR(hw,
6250 			   "[%s():%d] Internal error: queue not found\n",
6251 			   __func__, __LINE__);
6252 		return -EFAULT;
6253 	}
6254 
6255 	/* If the queue hasn't been mapped yet, we need to update the slot's
6256 	 * state and re-enable the queue's inflights.
6257 	 */
6258 	state = DLB_QUEUE_MAP_IN_PROGRESS;
6259 	if (dlb_port_find_slot_queue(port, state, queue, &i)) {
6260 		if (i >= DLB_MAX_NUM_QIDS_PER_LDB_CQ) {
6261 			DLB_HW_ERR(hw,
6262 				   "[%s():%d] Internal error: port slot tracking failed\n",
6263 				   __func__, __LINE__);
6264 			return -EFAULT;
6265 		}
6266 
6267 		/* Since the in-progress map was aborted, re-enable the QID's
6268 		 * inflights.
6269 		 */
6270 		if (queue->num_pending_additions == 0)
6271 			dlb_ldb_queue_set_inflight_limit(hw, queue);
6272 
6273 		state = DLB_QUEUE_UNMAPPED;
6274 		ret = dlb_port_slot_state_transition(hw, port, queue, i, state);
6275 		if (ret)
6276 			return ret;
6277 
6278 		goto unmap_qid_done;
6279 	}
6280 
6281 	/* If the queue mapping is on hold pending an unmap, we simply need to
6282 	 * update the slot's state.
6283 	 */
6284 	if (dlb_port_find_slot_with_pending_map_queue(port, queue, &i)) {
6285 		if (i >= DLB_MAX_NUM_QIDS_PER_LDB_CQ) {
6286 			DLB_HW_ERR(hw,
6287 				   "[%s():%d] Internal error: port slot tracking failed\n",
6288 				   __func__, __LINE__);
6289 			return -EFAULT;
6290 		}
6291 
6292 		state = DLB_QUEUE_UNMAP_IN_PROGRESS;
6293 		ret = dlb_port_slot_state_transition(hw, port, queue, i, state);
6294 		if (ret)
6295 			return ret;
6296 
6297 		goto unmap_qid_done;
6298 	}
6299 
6300 	state = DLB_QUEUE_MAPPED;
6301 	if (!dlb_port_find_slot_queue(port, state, queue, &i)) {
6302 		DLB_HW_ERR(hw,
6303 			   "[%s():%d] Internal error: no available CQ slots\n",
6304 			   __func__, __LINE__);
6305 		return -EFAULT;
6306 	}
6307 
6308 	if (i >= DLB_MAX_NUM_QIDS_PER_LDB_CQ) {
6309 		DLB_HW_ERR(hw,
6310 			   "[%s():%d] Internal error: port slot tracking failed\n",
6311 			   __func__, __LINE__);
6312 		return -EFAULT;
6313 	}
6314 
6315 	/* QID->CQ mapping removal is an asynchronous procedure. It requires
6316 	 * stopping the DLB from scheduling this CQ, draining all inflights
6317 	 * from the CQ, then unmapping the queue from the CQ. This function
6318 	 * simply marks the port as needing the queue unmapped, and (if
6319 	 * necessary) starts the unmapping worker thread.
6320 	 */
6321 	dlb_ldb_port_cq_disable(hw, port);
6322 
6323 	state = DLB_QUEUE_UNMAP_IN_PROGRESS;
6324 	ret = dlb_port_slot_state_transition(hw, port, queue, i, state);
6325 	if (ret)
6326 		return ret;
6327 
6328 	/* Attempt to finish the unmapping now, in case the port has no
6329 	 * outstanding inflights. If that's not the case, this will fail and
6330 	 * the unmapping will be completed at a later time.
6331 	 */
6332 	unmap_complete = dlb_domain_finish_unmap_port(hw, domain, port);
6333 
6334 	/* If the unmapping couldn't complete immediately, launch the worker
6335 	 * thread (if it isn't already launched) to finish it later.
6336 	 */
6337 	if (!unmap_complete && !os_worker_active(hw))
6338 		os_schedule_work(hw);
6339 
6340 unmap_qid_done:
6341 	resp->status = 0;
6342 
6343 	return 0;
6344 }
6345 
dlb_log_map_qid(struct dlb_hw * hw,u32 domain_id,struct dlb_map_qid_args * args)6346 static void dlb_log_map_qid(struct dlb_hw *hw,
6347 			    u32 domain_id,
6348 			    struct dlb_map_qid_args *args)
6349 {
6350 	DLB_HW_INFO(hw, "DLB map QID arguments:\n");
6351 	DLB_HW_INFO(hw, "\tDomain ID: %d\n", domain_id);
6352 	DLB_HW_INFO(hw, "\tPort ID:   %d\n", args->port_id);
6353 	DLB_HW_INFO(hw, "\tQueue ID:  %d\n", args->qid);
6354 	DLB_HW_INFO(hw, "\tPriority:  %d\n", args->priority);
6355 }
6356 
dlb_verify_map_qid_args(struct dlb_hw * hw,u32 domain_id,struct dlb_map_qid_args * args,struct dlb_cmd_response * resp)6357 static int dlb_verify_map_qid_args(struct dlb_hw *hw,
6358 				   u32 domain_id,
6359 				   struct dlb_map_qid_args *args,
6360 				   struct dlb_cmd_response *resp)
6361 {
6362 	struct dlb_domain *domain;
6363 	struct dlb_ldb_port *port;
6364 	struct dlb_ldb_queue *queue;
6365 	int id;
6366 
6367 	domain = dlb_get_domain_from_id(hw, domain_id);
6368 
6369 	if (domain == NULL) {
6370 		resp->status = DLB_ST_INVALID_DOMAIN_ID;
6371 		return -1;
6372 	}
6373 
6374 	if (!domain->configured) {
6375 		resp->status = DLB_ST_DOMAIN_NOT_CONFIGURED;
6376 		return -1;
6377 	}
6378 
6379 	id = args->port_id;
6380 
6381 	port = dlb_get_domain_used_ldb_port(id, domain);
6382 
6383 	if (port  == NULL || !port->configured) {
6384 		resp->status = DLB_ST_INVALID_PORT_ID;
6385 		return -1;
6386 	}
6387 
6388 	if (args->priority >= DLB_QID_PRIORITIES) {
6389 		resp->status = DLB_ST_INVALID_PRIORITY;
6390 		return -1;
6391 	}
6392 
6393 	queue = dlb_get_domain_ldb_queue(args->qid, domain);
6394 
6395 	if (queue  == NULL || !queue->configured) {
6396 		resp->status = DLB_ST_INVALID_QID;
6397 		return -1;
6398 	}
6399 
6400 	if (queue->domain_id != domain->id) {
6401 		resp->status = DLB_ST_INVALID_QID;
6402 		return -1;
6403 	}
6404 
6405 	if (port->domain_id != domain->id) {
6406 		resp->status = DLB_ST_INVALID_PORT_ID;
6407 		return -1;
6408 	}
6409 
6410 	return 0;
6411 }
6412 
dlb_verify_start_domain_args(struct dlb_hw * hw,u32 domain_id,struct dlb_cmd_response * resp)6413 static int dlb_verify_start_domain_args(struct dlb_hw *hw,
6414 					u32 domain_id,
6415 					struct dlb_cmd_response *resp)
6416 {
6417 	struct dlb_domain *domain;
6418 
6419 	domain = dlb_get_domain_from_id(hw, domain_id);
6420 
6421 	if (domain == NULL) {
6422 		resp->status = DLB_ST_INVALID_DOMAIN_ID;
6423 		return -1;
6424 	}
6425 
6426 	if (!domain->configured) {
6427 		resp->status = DLB_ST_DOMAIN_NOT_CONFIGURED;
6428 		return -1;
6429 	}
6430 
6431 	if (domain->started) {
6432 		resp->status = DLB_ST_DOMAIN_STARTED;
6433 		return -1;
6434 	}
6435 
6436 	return 0;
6437 }
6438 
dlb_verify_map_qid_slot_available(struct dlb_ldb_port * port,struct dlb_ldb_queue * queue,struct dlb_cmd_response * resp)6439 static int dlb_verify_map_qid_slot_available(struct dlb_ldb_port *port,
6440 					     struct dlb_ldb_queue *queue,
6441 					     struct dlb_cmd_response *resp)
6442 {
6443 	enum dlb_qid_map_state state;
6444 	int i;
6445 
6446 	/* Unused slot available? */
6447 	if (port->num_mappings < DLB_MAX_NUM_QIDS_PER_LDB_CQ)
6448 		return 0;
6449 
6450 	/* If the queue is already mapped (from the application's perspective),
6451 	 * this is simply a priority update.
6452 	 */
6453 	state = DLB_QUEUE_MAPPED;
6454 	if (dlb_port_find_slot_queue(port, state, queue, &i))
6455 		return 0;
6456 
6457 	state = DLB_QUEUE_MAP_IN_PROGRESS;
6458 	if (dlb_port_find_slot_queue(port, state, queue, &i))
6459 		return 0;
6460 
6461 	if (dlb_port_find_slot_with_pending_map_queue(port, queue, &i))
6462 		return 0;
6463 
6464 	/* If the slot contains an unmap in progress, it's considered
6465 	 * available.
6466 	 */
6467 	state = DLB_QUEUE_UNMAP_IN_PROGRESS;
6468 	if (dlb_port_find_slot(port, state, &i))
6469 		return 0;
6470 
6471 	state = DLB_QUEUE_UNMAPPED;
6472 	if (dlb_port_find_slot(port, state, &i))
6473 		return 0;
6474 
6475 	resp->status = DLB_ST_NO_QID_SLOTS_AVAILABLE;
6476 	return -EINVAL;
6477 }
6478 
dlb_ldb_port_change_qid_priority(struct dlb_hw * hw,struct dlb_ldb_port * port,int slot,struct dlb_map_qid_args * args)6479 static void dlb_ldb_port_change_qid_priority(struct dlb_hw *hw,
6480 					     struct dlb_ldb_port *port,
6481 					     int slot,
6482 					     struct dlb_map_qid_args *args)
6483 {
6484 	union dlb_lsp_cq2priov r0;
6485 
6486 	/* Read-modify-write the priority and valid bit register */
6487 	r0.val = DLB_CSR_RD(hw, DLB_LSP_CQ2PRIOV(port->id));
6488 
6489 	r0.field.v |= 1 << slot;
6490 	r0.field.prio |= (args->priority & 0x7) << slot * 3;
6491 
6492 	DLB_CSR_WR(hw, DLB_LSP_CQ2PRIOV(port->id), r0.val);
6493 
6494 	dlb_flush_csr(hw);
6495 
6496 	port->qid_map[slot].priority = args->priority;
6497 }
6498 
dlb_hw_map_qid(struct dlb_hw * hw,u32 domain_id,struct dlb_map_qid_args * args,struct dlb_cmd_response * resp)6499 int dlb_hw_map_qid(struct dlb_hw *hw,
6500 		   u32 domain_id,
6501 		   struct dlb_map_qid_args *args,
6502 		   struct dlb_cmd_response *resp)
6503 {
6504 	enum dlb_qid_map_state state;
6505 	struct dlb_ldb_queue *queue;
6506 	struct dlb_ldb_port *port;
6507 	struct dlb_domain *domain;
6508 	int ret, i, id;
6509 	u8 prio;
6510 
6511 	dlb_log_map_qid(hw, domain_id, args);
6512 
6513 	/* Verify that hardware resources are available before attempting to
6514 	 * satisfy the request. This simplifies the error unwinding code.
6515 	 */
6516 	if (dlb_verify_map_qid_args(hw, domain_id, args, resp))
6517 		return -EINVAL;
6518 
6519 	prio = args->priority;
6520 
6521 	domain = dlb_get_domain_from_id(hw, domain_id);
6522 	if (domain == NULL) {
6523 		DLB_HW_ERR(hw,
6524 			   "[%s():%d] Internal error: domain not found\n",
6525 			   __func__, __LINE__);
6526 		return -EFAULT;
6527 	}
6528 
6529 	id = args->port_id;
6530 
6531 	port = dlb_get_domain_used_ldb_port(id, domain);
6532 	if (port == NULL) {
6533 		DLB_HW_ERR(hw,
6534 			   "[%s():%d] Internal error: port not found\n",
6535 			   __func__, __LINE__);
6536 		return -EFAULT;
6537 	}
6538 
6539 	queue = dlb_get_domain_ldb_queue(args->qid, domain);
6540 	if (queue == NULL) {
6541 		DLB_HW_ERR(hw,
6542 			   "[%s():%d] Internal error: queue not found\n",
6543 			   __func__, __LINE__);
6544 		return -EFAULT;
6545 	}
6546 
6547 	/* If there are any outstanding detach operations for this port,
6548 	 * attempt to complete them. This may be necessary to free up a QID
6549 	 * slot for this requested mapping.
6550 	 */
6551 	if (port->num_pending_removals)
6552 		dlb_domain_finish_unmap_port(hw, domain, port);
6553 
6554 	ret = dlb_verify_map_qid_slot_available(port, queue, resp);
6555 	if (ret)
6556 		return ret;
6557 
6558 	/* Hardware requires disabling the CQ before mapping QIDs. */
6559 	if (port->enabled)
6560 		dlb_ldb_port_cq_disable(hw, port);
6561 
6562 	/* If this is only a priority change, don't perform the full QID->CQ
6563 	 * mapping procedure
6564 	 */
6565 	state = DLB_QUEUE_MAPPED;
6566 	if (dlb_port_find_slot_queue(port, state, queue, &i)) {
6567 		if (i >= DLB_MAX_NUM_QIDS_PER_LDB_CQ) {
6568 			DLB_HW_ERR(hw,
6569 				   "[%s():%d] Internal error: port slot tracking failed\n",
6570 				   __func__, __LINE__);
6571 			return -EFAULT;
6572 		}
6573 
6574 		if (prio != port->qid_map[i].priority) {
6575 			dlb_ldb_port_change_qid_priority(hw, port, i, args);
6576 			DLB_HW_INFO(hw, "DLB map: priority change only\n");
6577 		}
6578 
6579 		state = DLB_QUEUE_MAPPED;
6580 		ret = dlb_port_slot_state_transition(hw, port, queue, i, state);
6581 		if (ret)
6582 			return ret;
6583 
6584 		goto map_qid_done;
6585 	}
6586 
6587 	state = DLB_QUEUE_UNMAP_IN_PROGRESS;
6588 	if (dlb_port_find_slot_queue(port, state, queue, &i)) {
6589 		if (i >= DLB_MAX_NUM_QIDS_PER_LDB_CQ) {
6590 			DLB_HW_ERR(hw,
6591 				   "[%s():%d] Internal error: port slot tracking failed\n",
6592 				   __func__, __LINE__);
6593 			return -EFAULT;
6594 		}
6595 
6596 		if (prio != port->qid_map[i].priority) {
6597 			dlb_ldb_port_change_qid_priority(hw, port, i, args);
6598 			DLB_HW_INFO(hw, "DLB map: priority change only\n");
6599 		}
6600 
6601 		state = DLB_QUEUE_MAPPED;
6602 		ret = dlb_port_slot_state_transition(hw, port, queue, i, state);
6603 		if (ret)
6604 			return ret;
6605 
6606 		goto map_qid_done;
6607 	}
6608 
6609 	/* If this is a priority change on an in-progress mapping, don't
6610 	 * perform the full QID->CQ mapping procedure.
6611 	 */
6612 	state = DLB_QUEUE_MAP_IN_PROGRESS;
6613 	if (dlb_port_find_slot_queue(port, state, queue, &i)) {
6614 		if (i >= DLB_MAX_NUM_QIDS_PER_LDB_CQ) {
6615 			DLB_HW_ERR(hw,
6616 				   "[%s():%d] Internal error: port slot tracking failed\n",
6617 				   __func__, __LINE__);
6618 			return -EFAULT;
6619 		}
6620 
6621 		port->qid_map[i].priority = prio;
6622 
6623 		DLB_HW_INFO(hw, "DLB map: priority change only\n");
6624 
6625 		goto map_qid_done;
6626 	}
6627 
6628 	/* If this is a priority change on a pending mapping, update the
6629 	 * pending priority
6630 	 */
6631 	if (dlb_port_find_slot_with_pending_map_queue(port, queue, &i)) {
6632 		if (i >= DLB_MAX_NUM_QIDS_PER_LDB_CQ) {
6633 			DLB_HW_ERR(hw,
6634 				   "[%s():%d] Internal error: port slot tracking failed\n",
6635 				   __func__, __LINE__);
6636 			return -EFAULT;
6637 		}
6638 
6639 		port->qid_map[i].pending_priority = prio;
6640 
6641 		DLB_HW_INFO(hw, "DLB map: priority change only\n");
6642 
6643 		goto map_qid_done;
6644 	}
6645 
6646 	/* If all the CQ's slots are in use, then there's an unmap in progress
6647 	 * (guaranteed by dlb_verify_map_qid_slot_available()), so add this
6648 	 * mapping to pending_map and return. When the removal is completed for
6649 	 * the slot's current occupant, this mapping will be performed.
6650 	 */
6651 	if (!dlb_port_find_slot(port, DLB_QUEUE_UNMAPPED, &i)) {
6652 		if (dlb_port_find_slot(port, DLB_QUEUE_UNMAP_IN_PROGRESS, &i)) {
6653 			enum dlb_qid_map_state state;
6654 
6655 			if (i >= DLB_MAX_NUM_QIDS_PER_LDB_CQ) {
6656 				DLB_HW_ERR(hw,
6657 					   "[%s():%d] Internal error: port slot tracking failed\n",
6658 					   __func__, __LINE__);
6659 				return -EFAULT;
6660 			}
6661 
6662 			port->qid_map[i].pending_qid = queue->id;
6663 			port->qid_map[i].pending_priority = prio;
6664 
6665 			state = DLB_QUEUE_UNMAP_IN_PROGRESS_PENDING_MAP;
6666 
6667 			ret = dlb_port_slot_state_transition(hw, port, queue,
6668 							     i, state);
6669 			if (ret)
6670 				return ret;
6671 
6672 			DLB_HW_INFO(hw, "DLB map: map pending removal\n");
6673 
6674 			goto map_qid_done;
6675 		}
6676 	}
6677 
6678 	/* If the domain has started, a special "dynamic" CQ->queue mapping
6679 	 * procedure is required in order to safely update the CQ<->QID tables.
6680 	 * The "static" procedure cannot be used when traffic is flowing,
6681 	 * because the CQ<->QID tables cannot be updated atomically and the
6682 	 * scheduler won't see the new mapping unless the queue's if_status
6683 	 * changes, which isn't guaranteed.
6684 	 */
6685 	ret = dlb_ldb_port_map_qid(hw, domain, port, queue, prio);
6686 
6687 	/* If ret is less than zero, it's due to an internal error */
6688 	if (ret < 0)
6689 		return ret;
6690 
6691 map_qid_done:
6692 	if (port->enabled)
6693 		dlb_ldb_port_cq_enable(hw, port);
6694 
6695 	resp->status = 0;
6696 
6697 	return 0;
6698 }
6699 
dlb_log_start_domain(struct dlb_hw * hw,u32 domain_id)6700 static void dlb_log_start_domain(struct dlb_hw *hw, u32 domain_id)
6701 {
6702 	DLB_HW_INFO(hw, "DLB start domain arguments:\n");
6703 	DLB_HW_INFO(hw, "\tDomain ID: %d\n", domain_id);
6704 }
6705 
dlb_ldb_pool_write_credit_count_reg(struct dlb_hw * hw,u32 pool_id)6706 static void dlb_ldb_pool_write_credit_count_reg(struct dlb_hw *hw,
6707 						u32 pool_id)
6708 {
6709 	union dlb_chp_ldb_pool_crd_cnt r0 = { {0} };
6710 	struct dlb_credit_pool *pool;
6711 
6712 	pool = &hw->rsrcs.ldb_credit_pools[pool_id];
6713 
6714 	r0.field.count = pool->avail_credits;
6715 
6716 	DLB_CSR_WR(hw,
6717 		   DLB_CHP_LDB_POOL_CRD_CNT(pool->id),
6718 		   r0.val);
6719 }
6720 
dlb_dir_pool_write_credit_count_reg(struct dlb_hw * hw,u32 pool_id)6721 static void dlb_dir_pool_write_credit_count_reg(struct dlb_hw *hw,
6722 						u32 pool_id)
6723 {
6724 	union dlb_chp_dir_pool_crd_cnt r0 = { {0} };
6725 	struct dlb_credit_pool *pool;
6726 
6727 	pool = &hw->rsrcs.dir_credit_pools[pool_id];
6728 
6729 	r0.field.count = pool->avail_credits;
6730 
6731 	DLB_CSR_WR(hw,
6732 		   DLB_CHP_DIR_POOL_CRD_CNT(pool->id),
6733 		   r0.val);
6734 }
6735 
6736 /**
6737  * dlb_hw_start_domain() - Lock the domain configuration
6738  * @hw:	  Contains the current state of the DLB hardware.
6739  * @args: User-provided arguments.
6740  * @resp: Response to user.
6741  *
6742  * Return: returns < 0 on error, 0 otherwise. If the driver is unable to
6743  * satisfy a request, resp->status will be set accordingly.
6744  */
dlb_hw_start_domain(struct dlb_hw * hw,u32 domain_id,struct dlb_start_domain_args * arg,struct dlb_cmd_response * resp)6745 int dlb_hw_start_domain(struct dlb_hw *hw,
6746 			u32 domain_id,
6747 			struct dlb_start_domain_args *arg,
6748 			struct dlb_cmd_response *resp)
6749 {
6750 	struct dlb_list_entry *iter;
6751 	struct dlb_dir_pq_pair *dir_queue;
6752 	struct dlb_ldb_queue *ldb_queue;
6753 	struct dlb_credit_pool *pool;
6754 	struct dlb_domain *domain;
6755 	RTE_SET_USED(arg);
6756 	RTE_SET_USED(iter);
6757 
6758 	dlb_log_start_domain(hw, domain_id);
6759 
6760 	if (dlb_verify_start_domain_args(hw, domain_id, resp))
6761 		return -EINVAL;
6762 
6763 	domain = dlb_get_domain_from_id(hw, domain_id);
6764 	if (domain == NULL) {
6765 		DLB_HW_ERR(hw,
6766 			   "[%s():%d] Internal error: domain not found\n",
6767 			   __func__, __LINE__);
6768 		return -EFAULT;
6769 	}
6770 
6771 	/* Write the domain's pool credit counts, which have been updated
6772 	 * during port configuration. The sum of the pool credit count plus
6773 	 * each producer port's credit count must equal the pool's credit
6774 	 * allocation *before* traffic is sent.
6775 	 */
6776 	DLB_DOM_LIST_FOR(domain->used_ldb_credit_pools, pool, iter)
6777 		dlb_ldb_pool_write_credit_count_reg(hw, pool->id);
6778 
6779 	DLB_DOM_LIST_FOR(domain->used_dir_credit_pools, pool, iter)
6780 		dlb_dir_pool_write_credit_count_reg(hw, pool->id);
6781 
6782 	/* Enable load-balanced and directed queue write permissions for the
6783 	 * queues this domain owns. Without this, the DLB will drop all
6784 	 * incoming traffic to those queues.
6785 	 */
6786 	DLB_DOM_LIST_FOR(domain->used_ldb_queues, ldb_queue, iter) {
6787 		union dlb_sys_ldb_vasqid_v r0 = { {0} };
6788 		unsigned int offs;
6789 
6790 		r0.field.vasqid_v = 1;
6791 
6792 		offs = domain->id * DLB_MAX_NUM_LDB_QUEUES + ldb_queue->id;
6793 
6794 		DLB_CSR_WR(hw, DLB_SYS_LDB_VASQID_V(offs), r0.val);
6795 	}
6796 
6797 	DLB_DOM_LIST_FOR(domain->used_dir_pq_pairs, dir_queue, iter) {
6798 		union dlb_sys_dir_vasqid_v r0 = { {0} };
6799 		unsigned int offs;
6800 
6801 		r0.field.vasqid_v = 1;
6802 
6803 		offs = domain->id * DLB_MAX_NUM_DIR_PORTS + dir_queue->id;
6804 
6805 		DLB_CSR_WR(hw, DLB_SYS_DIR_VASQID_V(offs), r0.val);
6806 	}
6807 
6808 	dlb_flush_csr(hw);
6809 
6810 	domain->started = true;
6811 
6812 	resp->status = 0;
6813 
6814 	return 0;
6815 }
6816 
dlb_log_get_dir_queue_depth(struct dlb_hw * hw,u32 domain_id,u32 queue_id)6817 static void dlb_log_get_dir_queue_depth(struct dlb_hw *hw,
6818 					u32 domain_id,
6819 					u32 queue_id)
6820 {
6821 	DLB_HW_INFO(hw, "DLB get directed queue depth:\n");
6822 	DLB_HW_INFO(hw, "\tDomain ID: %d\n", domain_id);
6823 	DLB_HW_INFO(hw, "\tQueue ID: %d\n", queue_id);
6824 }
6825 
dlb_hw_get_dir_queue_depth(struct dlb_hw * hw,u32 domain_id,struct dlb_get_dir_queue_depth_args * args,struct dlb_cmd_response * resp)6826 int dlb_hw_get_dir_queue_depth(struct dlb_hw *hw,
6827 			       u32 domain_id,
6828 			       struct dlb_get_dir_queue_depth_args *args,
6829 			       struct dlb_cmd_response *resp)
6830 {
6831 	struct dlb_dir_pq_pair *queue;
6832 	struct dlb_domain *domain;
6833 	int id;
6834 
6835 	id = domain_id;
6836 
6837 	dlb_log_get_dir_queue_depth(hw, domain_id, args->queue_id);
6838 
6839 	domain = dlb_get_domain_from_id(hw, id);
6840 	if (domain == NULL) {
6841 		resp->status = DLB_ST_INVALID_DOMAIN_ID;
6842 		return -EINVAL;
6843 	}
6844 
6845 	id = args->queue_id;
6846 
6847 	queue = dlb_get_domain_used_dir_pq(args->queue_id, domain);
6848 	if (queue == NULL) {
6849 		resp->status = DLB_ST_INVALID_QID;
6850 		return -EINVAL;
6851 	}
6852 
6853 	resp->id = dlb_dir_queue_depth(hw, queue);
6854 
6855 	return 0;
6856 }
6857 
dlb_log_get_ldb_queue_depth(struct dlb_hw * hw,u32 domain_id,u32 queue_id)6858 static void dlb_log_get_ldb_queue_depth(struct dlb_hw *hw,
6859 					u32 domain_id,
6860 					u32 queue_id)
6861 {
6862 	DLB_HW_INFO(hw, "DLB get load-balanced queue depth:\n");
6863 	DLB_HW_INFO(hw, "\tDomain ID: %d\n", domain_id);
6864 	DLB_HW_INFO(hw, "\tQueue ID: %d\n", queue_id);
6865 }
6866 
dlb_hw_get_ldb_queue_depth(struct dlb_hw * hw,u32 domain_id,struct dlb_get_ldb_queue_depth_args * args,struct dlb_cmd_response * resp)6867 int dlb_hw_get_ldb_queue_depth(struct dlb_hw *hw,
6868 			       u32 domain_id,
6869 			       struct dlb_get_ldb_queue_depth_args *args,
6870 			       struct dlb_cmd_response *resp)
6871 {
6872 	union dlb_lsp_qid_aqed_active_cnt r0;
6873 	union dlb_lsp_qid_atq_enqueue_cnt r1;
6874 	union dlb_lsp_qid_ldb_enqueue_cnt r2;
6875 	struct dlb_ldb_queue *queue;
6876 	struct dlb_domain *domain;
6877 
6878 	dlb_log_get_ldb_queue_depth(hw, domain_id, args->queue_id);
6879 
6880 	domain = dlb_get_domain_from_id(hw, domain_id);
6881 	if (domain == NULL) {
6882 		resp->status = DLB_ST_INVALID_DOMAIN_ID;
6883 		return -EINVAL;
6884 	}
6885 
6886 	queue = dlb_get_domain_ldb_queue(args->queue_id, domain);
6887 	if (queue == NULL) {
6888 		resp->status = DLB_ST_INVALID_QID;
6889 		return -EINVAL;
6890 	}
6891 
6892 	r0.val = DLB_CSR_RD(hw,
6893 			    DLB_LSP_QID_AQED_ACTIVE_CNT(queue->id));
6894 
6895 	r1.val = DLB_CSR_RD(hw,
6896 			    DLB_LSP_QID_ATQ_ENQUEUE_CNT(queue->id));
6897 
6898 	r2.val = DLB_CSR_RD(hw,
6899 			    DLB_LSP_QID_LDB_ENQUEUE_CNT(queue->id));
6900 
6901 	resp->id = r0.val + r1.val + r2.val;
6902 
6903 	return 0;
6904 }
6905