1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2020 Intel Corporation
3  */
4 
5 #include "dlb2_user.h"
6 
7 #include "dlb2_hw_types.h"
8 #include "dlb2_mbox.h"
9 #include "dlb2_osdep.h"
10 #include "dlb2_osdep_bitmap.h"
11 #include "dlb2_osdep_types.h"
12 #include "dlb2_regs.h"
13 #include "dlb2_resource.h"
14 
15 #include "../../dlb2_priv.h"
16 #include "../../dlb2_inline_fns.h"
17 
18 #define DLB2_DOM_LIST_HEAD(head, type) \
19 	DLB2_LIST_HEAD((head), type, domain_list)
20 
21 #define DLB2_FUNC_LIST_HEAD(head, type) \
22 	DLB2_LIST_HEAD((head), type, func_list)
23 
24 #define DLB2_DOM_LIST_FOR(head, ptr, iter) \
25 	DLB2_LIST_FOR_EACH(head, ptr, domain_list, iter)
26 
27 #define DLB2_FUNC_LIST_FOR(head, ptr, iter) \
28 	DLB2_LIST_FOR_EACH(head, ptr, func_list, iter)
29 
30 #define DLB2_DOM_LIST_FOR_SAFE(head, ptr, ptr_tmp, it, it_tmp) \
31 	DLB2_LIST_FOR_EACH_SAFE((head), ptr, ptr_tmp, domain_list, it, it_tmp)
32 
33 #define DLB2_FUNC_LIST_FOR_SAFE(head, ptr, ptr_tmp, it, it_tmp) \
34 	DLB2_LIST_FOR_EACH_SAFE((head), ptr, ptr_tmp, func_list, it, it_tmp)
35 
dlb2_init_domain_rsrc_lists(struct dlb2_hw_domain * domain)36 static void dlb2_init_domain_rsrc_lists(struct dlb2_hw_domain *domain)
37 {
38 	int i;
39 
40 	dlb2_list_init_head(&domain->used_ldb_queues);
41 	dlb2_list_init_head(&domain->used_dir_pq_pairs);
42 	dlb2_list_init_head(&domain->avail_ldb_queues);
43 	dlb2_list_init_head(&domain->avail_dir_pq_pairs);
44 
45 	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
46 		dlb2_list_init_head(&domain->used_ldb_ports[i]);
47 	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
48 		dlb2_list_init_head(&domain->avail_ldb_ports[i]);
49 }
50 
dlb2_init_fn_rsrc_lists(struct dlb2_function_resources * rsrc)51 static void dlb2_init_fn_rsrc_lists(struct dlb2_function_resources *rsrc)
52 {
53 	int i;
54 
55 	dlb2_list_init_head(&rsrc->avail_domains);
56 	dlb2_list_init_head(&rsrc->used_domains);
57 	dlb2_list_init_head(&rsrc->avail_ldb_queues);
58 	dlb2_list_init_head(&rsrc->avail_dir_pq_pairs);
59 
60 	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
61 		dlb2_list_init_head(&rsrc->avail_ldb_ports[i]);
62 }
63 
dlb2_hw_enable_sparse_dir_cq_mode(struct dlb2_hw * hw)64 void dlb2_hw_enable_sparse_dir_cq_mode(struct dlb2_hw *hw)
65 {
66 	union dlb2_chp_cfg_chp_csr_ctrl r0;
67 
68 	r0.val = DLB2_CSR_RD(hw, DLB2_CHP_CFG_CHP_CSR_CTRL);
69 
70 	r0.field.cfg_64bytes_qe_dir_cq_mode = 1;
71 
72 	DLB2_CSR_WR(hw, DLB2_CHP_CFG_CHP_CSR_CTRL, r0.val);
73 }
74 
dlb2_hw_get_num_resources(struct dlb2_hw * hw,struct dlb2_get_num_resources_args * arg,bool vdev_req,unsigned int vdev_id)75 int dlb2_hw_get_num_resources(struct dlb2_hw *hw,
76 			      struct dlb2_get_num_resources_args *arg,
77 			      bool vdev_req,
78 			      unsigned int vdev_id)
79 {
80 	struct dlb2_function_resources *rsrcs;
81 	struct dlb2_bitmap *map;
82 	int i;
83 
84 	if (vdev_req && vdev_id >= DLB2_MAX_NUM_VDEVS)
85 		return -EINVAL;
86 
87 	if (vdev_req)
88 		rsrcs = &hw->vdev[vdev_id];
89 	else
90 		rsrcs = &hw->pf;
91 
92 	arg->num_sched_domains = rsrcs->num_avail_domains;
93 
94 	arg->num_ldb_queues = rsrcs->num_avail_ldb_queues;
95 
96 	arg->num_ldb_ports = 0;
97 	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
98 		arg->num_ldb_ports += rsrcs->num_avail_ldb_ports[i];
99 
100 	arg->num_cos_ldb_ports[0] = rsrcs->num_avail_ldb_ports[0];
101 	arg->num_cos_ldb_ports[1] = rsrcs->num_avail_ldb_ports[1];
102 	arg->num_cos_ldb_ports[2] = rsrcs->num_avail_ldb_ports[2];
103 	arg->num_cos_ldb_ports[3] = rsrcs->num_avail_ldb_ports[3];
104 
105 	arg->num_dir_ports = rsrcs->num_avail_dir_pq_pairs;
106 
107 	arg->num_atomic_inflights = rsrcs->num_avail_aqed_entries;
108 
109 	map = rsrcs->avail_hist_list_entries;
110 
111 	arg->num_hist_list_entries = dlb2_bitmap_count(map);
112 
113 	arg->max_contiguous_hist_list_entries =
114 		dlb2_bitmap_longest_set_range(map);
115 
116 	arg->num_ldb_credits = rsrcs->num_avail_qed_entries;
117 
118 	arg->num_dir_credits = rsrcs->num_avail_dqed_entries;
119 
120 	return 0;
121 }
122 
dlb2_hw_enable_sparse_ldb_cq_mode(struct dlb2_hw * hw)123 void dlb2_hw_enable_sparse_ldb_cq_mode(struct dlb2_hw *hw)
124 {
125 	union dlb2_chp_cfg_chp_csr_ctrl r0;
126 
127 	r0.val = DLB2_CSR_RD(hw, DLB2_CHP_CFG_CHP_CSR_CTRL);
128 
129 	r0.field.cfg_64bytes_qe_ldb_cq_mode = 1;
130 
131 	DLB2_CSR_WR(hw, DLB2_CHP_CFG_CHP_CSR_CTRL, r0.val);
132 }
133 
dlb2_resource_free(struct dlb2_hw * hw)134 void dlb2_resource_free(struct dlb2_hw *hw)
135 {
136 	int i;
137 
138 	if (hw->pf.avail_hist_list_entries)
139 		dlb2_bitmap_free(hw->pf.avail_hist_list_entries);
140 
141 	for (i = 0; i < DLB2_MAX_NUM_VDEVS; i++) {
142 		if (hw->vdev[i].avail_hist_list_entries)
143 			dlb2_bitmap_free(hw->vdev[i].avail_hist_list_entries);
144 	}
145 }
146 
dlb2_resource_init(struct dlb2_hw * hw)147 int dlb2_resource_init(struct dlb2_hw *hw)
148 {
149 	struct dlb2_list_entry *list;
150 	unsigned int i;
151 	int ret;
152 
153 	/*
154 	 * For optimal load-balancing, ports that map to one or more QIDs in
155 	 * common should not be in numerical sequence. This is application
156 	 * dependent, but the driver interleaves port IDs as much as possible
157 	 * to reduce the likelihood of this. This initial allocation maximizes
158 	 * the average distance between an ID and its immediate neighbors (i.e.
159 	 * the distance from 1 to 0 and to 2, the distance from 2 to 1 and to
160 	 * 3, etc.).
161 	 */
162 	u8 init_ldb_port_allocation[DLB2_MAX_NUM_LDB_PORTS] = {
163 		0,  7,  14,  5, 12,  3, 10,  1,  8, 15,  6, 13,  4, 11,  2,  9,
164 		16, 23, 30, 21, 28, 19, 26, 17, 24, 31, 22, 29, 20, 27, 18, 25,
165 		32, 39, 46, 37, 44, 35, 42, 33, 40, 47, 38, 45, 36, 43, 34, 41,
166 		48, 55, 62, 53, 60, 51, 58, 49, 56, 63, 54, 61, 52, 59, 50, 57,
167 	};
168 
169 	/* Zero-out resource tracking data structures */
170 	memset(&hw->rsrcs, 0, sizeof(hw->rsrcs));
171 	memset(&hw->pf, 0, sizeof(hw->pf));
172 
173 	dlb2_init_fn_rsrc_lists(&hw->pf);
174 
175 	for (i = 0; i < DLB2_MAX_NUM_VDEVS; i++) {
176 		memset(&hw->vdev[i], 0, sizeof(hw->vdev[i]));
177 		dlb2_init_fn_rsrc_lists(&hw->vdev[i]);
178 	}
179 
180 	for (i = 0; i < DLB2_MAX_NUM_DOMAINS; i++) {
181 		memset(&hw->domains[i], 0, sizeof(hw->domains[i]));
182 		dlb2_init_domain_rsrc_lists(&hw->domains[i]);
183 		hw->domains[i].parent_func = &hw->pf;
184 	}
185 
186 	/* Give all resources to the PF driver */
187 	hw->pf.num_avail_domains = DLB2_MAX_NUM_DOMAINS;
188 	for (i = 0; i < hw->pf.num_avail_domains; i++) {
189 		list = &hw->domains[i].func_list;
190 
191 		dlb2_list_add(&hw->pf.avail_domains, list);
192 	}
193 
194 	hw->pf.num_avail_ldb_queues = DLB2_MAX_NUM_LDB_QUEUES;
195 	for (i = 0; i < hw->pf.num_avail_ldb_queues; i++) {
196 		list = &hw->rsrcs.ldb_queues[i].func_list;
197 
198 		dlb2_list_add(&hw->pf.avail_ldb_queues, list);
199 	}
200 
201 	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
202 		hw->pf.num_avail_ldb_ports[i] =
203 			DLB2_MAX_NUM_LDB_PORTS / DLB2_NUM_COS_DOMAINS;
204 
205 	for (i = 0; i < DLB2_MAX_NUM_LDB_PORTS; i++) {
206 		int cos_id = i >> DLB2_NUM_COS_DOMAINS;
207 		struct dlb2_ldb_port *port;
208 
209 		port = &hw->rsrcs.ldb_ports[init_ldb_port_allocation[i]];
210 
211 		dlb2_list_add(&hw->pf.avail_ldb_ports[cos_id],
212 			      &port->func_list);
213 	}
214 
215 	hw->pf.num_avail_dir_pq_pairs = DLB2_MAX_NUM_DIR_PORTS;
216 	for (i = 0; i < hw->pf.num_avail_dir_pq_pairs; i++) {
217 		list = &hw->rsrcs.dir_pq_pairs[i].func_list;
218 
219 		dlb2_list_add(&hw->pf.avail_dir_pq_pairs, list);
220 	}
221 
222 	hw->pf.num_avail_qed_entries = DLB2_MAX_NUM_LDB_CREDITS;
223 	hw->pf.num_avail_dqed_entries = DLB2_MAX_NUM_DIR_CREDITS;
224 	hw->pf.num_avail_aqed_entries = DLB2_MAX_NUM_AQED_ENTRIES;
225 
226 	ret = dlb2_bitmap_alloc(&hw->pf.avail_hist_list_entries,
227 				DLB2_MAX_NUM_HIST_LIST_ENTRIES);
228 	if (ret)
229 		goto unwind;
230 
231 	ret = dlb2_bitmap_fill(hw->pf.avail_hist_list_entries);
232 	if (ret)
233 		goto unwind;
234 
235 	for (i = 0; i < DLB2_MAX_NUM_VDEVS; i++) {
236 		ret = dlb2_bitmap_alloc(&hw->vdev[i].avail_hist_list_entries,
237 					DLB2_MAX_NUM_HIST_LIST_ENTRIES);
238 		if (ret)
239 			goto unwind;
240 
241 		ret = dlb2_bitmap_zero(hw->vdev[i].avail_hist_list_entries);
242 		if (ret)
243 			goto unwind;
244 	}
245 
246 	/* Initialize the hardware resource IDs */
247 	for (i = 0; i < DLB2_MAX_NUM_DOMAINS; i++) {
248 		hw->domains[i].id.phys_id = i;
249 		hw->domains[i].id.vdev_owned = false;
250 	}
251 
252 	for (i = 0; i < DLB2_MAX_NUM_LDB_QUEUES; i++) {
253 		hw->rsrcs.ldb_queues[i].id.phys_id = i;
254 		hw->rsrcs.ldb_queues[i].id.vdev_owned = false;
255 	}
256 
257 	for (i = 0; i < DLB2_MAX_NUM_LDB_PORTS; i++) {
258 		hw->rsrcs.ldb_ports[i].id.phys_id = i;
259 		hw->rsrcs.ldb_ports[i].id.vdev_owned = false;
260 	}
261 
262 	for (i = 0; i < DLB2_MAX_NUM_DIR_PORTS; i++) {
263 		hw->rsrcs.dir_pq_pairs[i].id.phys_id = i;
264 		hw->rsrcs.dir_pq_pairs[i].id.vdev_owned = false;
265 	}
266 
267 	for (i = 0; i < DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS; i++) {
268 		hw->rsrcs.sn_groups[i].id = i;
269 		/* Default mode (0) is 64 sequence numbers per queue */
270 		hw->rsrcs.sn_groups[i].mode = 0;
271 		hw->rsrcs.sn_groups[i].sequence_numbers_per_queue = 64;
272 		hw->rsrcs.sn_groups[i].slot_use_bitmap = 0;
273 	}
274 
275 	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
276 		hw->cos_reservation[i] = 100 / DLB2_NUM_COS_DOMAINS;
277 
278 	return 0;
279 
280 unwind:
281 	dlb2_resource_free(hw);
282 
283 	return ret;
284 }
285 
dlb2_clr_pmcsr_disable(struct dlb2_hw * hw)286 void dlb2_clr_pmcsr_disable(struct dlb2_hw *hw)
287 {
288 	union dlb2_cfg_mstr_cfg_pm_pmcsr_disable r0;
289 
290 	r0.val = DLB2_CSR_RD(hw, DLB2_CFG_MSTR_CFG_PM_PMCSR_DISABLE);
291 
292 	r0.field.disable = 0;
293 
294 	DLB2_CSR_WR(hw, DLB2_CFG_MSTR_CFG_PM_PMCSR_DISABLE, r0.val);
295 }
296 
dlb2_configure_domain_credits(struct dlb2_hw * hw,struct dlb2_hw_domain * domain)297 static void dlb2_configure_domain_credits(struct dlb2_hw *hw,
298 					  struct dlb2_hw_domain *domain)
299 {
300 	union dlb2_chp_cfg_ldb_vas_crd r0 = { {0} };
301 	union dlb2_chp_cfg_dir_vas_crd r1 = { {0} };
302 
303 	r0.field.count = domain->num_ldb_credits;
304 
305 	DLB2_CSR_WR(hw, DLB2_CHP_CFG_LDB_VAS_CRD(domain->id.phys_id), r0.val);
306 
307 	r1.field.count = domain->num_dir_credits;
308 
309 	DLB2_CSR_WR(hw, DLB2_CHP_CFG_DIR_VAS_CRD(domain->id.phys_id), r1.val);
310 }
311 
312 static struct dlb2_ldb_port *
dlb2_get_next_ldb_port(struct dlb2_hw * hw,struct dlb2_function_resources * rsrcs,u32 domain_id,u32 cos_id)313 dlb2_get_next_ldb_port(struct dlb2_hw *hw,
314 		       struct dlb2_function_resources *rsrcs,
315 		       u32 domain_id,
316 		       u32 cos_id)
317 {
318 	struct dlb2_list_entry *iter;
319 	struct dlb2_ldb_port *port;
320 	RTE_SET_USED(iter);
321 	/*
322 	 * To reduce the odds of consecutive load-balanced ports mapping to the
323 	 * same queue(s), the driver attempts to allocate ports whose neighbors
324 	 * are owned by a different domain.
325 	 */
326 	DLB2_FUNC_LIST_FOR(rsrcs->avail_ldb_ports[cos_id], port, iter) {
327 		u32 next, prev;
328 		u32 phys_id;
329 
330 		phys_id = port->id.phys_id;
331 		next = phys_id + 1;
332 		prev = phys_id - 1;
333 
334 		if (phys_id == DLB2_MAX_NUM_LDB_PORTS - 1)
335 			next = 0;
336 		if (phys_id == 0)
337 			prev = DLB2_MAX_NUM_LDB_PORTS - 1;
338 
339 		if (!hw->rsrcs.ldb_ports[next].owned ||
340 		    hw->rsrcs.ldb_ports[next].domain_id.phys_id == domain_id)
341 			continue;
342 
343 		if (!hw->rsrcs.ldb_ports[prev].owned ||
344 		    hw->rsrcs.ldb_ports[prev].domain_id.phys_id == domain_id)
345 			continue;
346 
347 		return port;
348 	}
349 
350 	/*
351 	 * Failing that, the driver looks for a port with one neighbor owned by
352 	 * a different domain and the other unallocated.
353 	 */
354 	DLB2_FUNC_LIST_FOR(rsrcs->avail_ldb_ports[cos_id], port, iter) {
355 		u32 next, prev;
356 		u32 phys_id;
357 
358 		phys_id = port->id.phys_id;
359 		next = phys_id + 1;
360 		prev = phys_id - 1;
361 
362 		if (phys_id == DLB2_MAX_NUM_LDB_PORTS - 1)
363 			next = 0;
364 		if (phys_id == 0)
365 			prev = DLB2_MAX_NUM_LDB_PORTS - 1;
366 
367 		if (!hw->rsrcs.ldb_ports[prev].owned &&
368 		    hw->rsrcs.ldb_ports[next].owned &&
369 		    hw->rsrcs.ldb_ports[next].domain_id.phys_id != domain_id)
370 			return port;
371 
372 		if (!hw->rsrcs.ldb_ports[next].owned &&
373 		    hw->rsrcs.ldb_ports[prev].owned &&
374 		    hw->rsrcs.ldb_ports[prev].domain_id.phys_id != domain_id)
375 			return port;
376 	}
377 
378 	/*
379 	 * Failing that, the driver looks for a port with both neighbors
380 	 * unallocated.
381 	 */
382 	DLB2_FUNC_LIST_FOR(rsrcs->avail_ldb_ports[cos_id], port, iter) {
383 		u32 next, prev;
384 		u32 phys_id;
385 
386 		phys_id = port->id.phys_id;
387 		next = phys_id + 1;
388 		prev = phys_id - 1;
389 
390 		if (phys_id == DLB2_MAX_NUM_LDB_PORTS - 1)
391 			next = 0;
392 		if (phys_id == 0)
393 			prev = DLB2_MAX_NUM_LDB_PORTS - 1;
394 
395 		if (!hw->rsrcs.ldb_ports[prev].owned &&
396 		    !hw->rsrcs.ldb_ports[next].owned)
397 			return port;
398 	}
399 
400 	/* If all else fails, the driver returns the next available port. */
401 	return DLB2_FUNC_LIST_HEAD(rsrcs->avail_ldb_ports[cos_id],
402 				   typeof(*port));
403 }
404 
__dlb2_attach_ldb_ports(struct dlb2_hw * hw,struct dlb2_function_resources * rsrcs,struct dlb2_hw_domain * domain,u32 num_ports,u32 cos_id,struct dlb2_cmd_response * resp)405 static int __dlb2_attach_ldb_ports(struct dlb2_hw *hw,
406 				   struct dlb2_function_resources *rsrcs,
407 				   struct dlb2_hw_domain *domain,
408 				   u32 num_ports,
409 				   u32 cos_id,
410 				   struct dlb2_cmd_response *resp)
411 {
412 	unsigned int i;
413 
414 	if (rsrcs->num_avail_ldb_ports[cos_id] < num_ports) {
415 		resp->status = DLB2_ST_LDB_PORTS_UNAVAILABLE;
416 		return -EINVAL;
417 	}
418 
419 	for (i = 0; i < num_ports; i++) {
420 		struct dlb2_ldb_port *port;
421 
422 		port = dlb2_get_next_ldb_port(hw, rsrcs,
423 					      domain->id.phys_id, cos_id);
424 		if (port == NULL) {
425 			DLB2_HW_ERR(hw,
426 				    "[%s()] Internal error: domain validation failed\n",
427 				    __func__);
428 			return -EFAULT;
429 		}
430 
431 		dlb2_list_del(&rsrcs->avail_ldb_ports[cos_id],
432 			      &port->func_list);
433 
434 		port->domain_id = domain->id;
435 		port->owned = true;
436 
437 		dlb2_list_add(&domain->avail_ldb_ports[cos_id],
438 			      &port->domain_list);
439 	}
440 
441 	rsrcs->num_avail_ldb_ports[cos_id] -= num_ports;
442 
443 	return 0;
444 }
445 
dlb2_attach_ldb_ports(struct dlb2_hw * hw,struct dlb2_function_resources * rsrcs,struct dlb2_hw_domain * domain,struct dlb2_create_sched_domain_args * args,struct dlb2_cmd_response * resp)446 static int dlb2_attach_ldb_ports(struct dlb2_hw *hw,
447 				 struct dlb2_function_resources *rsrcs,
448 				 struct dlb2_hw_domain *domain,
449 				 struct dlb2_create_sched_domain_args *args,
450 				 struct dlb2_cmd_response *resp)
451 {
452 	unsigned int i, j;
453 	int ret;
454 
455 	if (args->cos_strict) {
456 		for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
457 			u32 num = args->num_cos_ldb_ports[i];
458 
459 			/* Allocate ports from specific classes-of-service */
460 			ret = __dlb2_attach_ldb_ports(hw,
461 						      rsrcs,
462 						      domain,
463 						      num,
464 						      i,
465 						      resp);
466 			if (ret)
467 				return ret;
468 		}
469 	} else {
470 		unsigned int k;
471 		u32 cos_id;
472 
473 		/*
474 		 * Attempt to allocate from specific class-of-service, but
475 		 * fallback to the other classes if that fails.
476 		 */
477 		for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
478 			for (j = 0; j < args->num_cos_ldb_ports[i]; j++) {
479 				for (k = 0; k < DLB2_NUM_COS_DOMAINS; k++) {
480 					cos_id = (i + k) % DLB2_NUM_COS_DOMAINS;
481 
482 					ret = __dlb2_attach_ldb_ports(hw,
483 								      rsrcs,
484 								      domain,
485 								      1,
486 								      cos_id,
487 								      resp);
488 					if (ret == 0)
489 						break;
490 				}
491 
492 				if (ret < 0)
493 					return ret;
494 			}
495 		}
496 	}
497 
498 	/* Allocate num_ldb_ports from any class-of-service */
499 	for (i = 0; i < args->num_ldb_ports; i++) {
500 		for (j = 0; j < DLB2_NUM_COS_DOMAINS; j++) {
501 			ret = __dlb2_attach_ldb_ports(hw,
502 						      rsrcs,
503 						      domain,
504 						      1,
505 						      j,
506 						      resp);
507 			if (ret == 0)
508 				break;
509 		}
510 
511 		if (ret < 0)
512 			return ret;
513 	}
514 
515 	return 0;
516 }
517 
dlb2_attach_dir_ports(struct dlb2_hw * hw,struct dlb2_function_resources * rsrcs,struct dlb2_hw_domain * domain,u32 num_ports,struct dlb2_cmd_response * resp)518 static int dlb2_attach_dir_ports(struct dlb2_hw *hw,
519 				 struct dlb2_function_resources *rsrcs,
520 				 struct dlb2_hw_domain *domain,
521 				 u32 num_ports,
522 				 struct dlb2_cmd_response *resp)
523 {
524 	unsigned int i;
525 
526 	if (rsrcs->num_avail_dir_pq_pairs < num_ports) {
527 		resp->status = DLB2_ST_DIR_PORTS_UNAVAILABLE;
528 		return -EINVAL;
529 	}
530 
531 	for (i = 0; i < num_ports; i++) {
532 		struct dlb2_dir_pq_pair *port;
533 
534 		port = DLB2_FUNC_LIST_HEAD(rsrcs->avail_dir_pq_pairs,
535 					   typeof(*port));
536 		if (port == NULL) {
537 			DLB2_HW_ERR(hw,
538 				    "[%s()] Internal error: domain validation failed\n",
539 				    __func__);
540 			return -EFAULT;
541 		}
542 
543 		dlb2_list_del(&rsrcs->avail_dir_pq_pairs, &port->func_list);
544 
545 		port->domain_id = domain->id;
546 		port->owned = true;
547 
548 		dlb2_list_add(&domain->avail_dir_pq_pairs, &port->domain_list);
549 	}
550 
551 	rsrcs->num_avail_dir_pq_pairs -= num_ports;
552 
553 	return 0;
554 }
555 
dlb2_attach_ldb_credits(struct dlb2_function_resources * rsrcs,struct dlb2_hw_domain * domain,u32 num_credits,struct dlb2_cmd_response * resp)556 static int dlb2_attach_ldb_credits(struct dlb2_function_resources *rsrcs,
557 				   struct dlb2_hw_domain *domain,
558 				   u32 num_credits,
559 				   struct dlb2_cmd_response *resp)
560 {
561 	if (rsrcs->num_avail_qed_entries < num_credits) {
562 		resp->status = DLB2_ST_LDB_CREDITS_UNAVAILABLE;
563 		return -EINVAL;
564 	}
565 
566 	rsrcs->num_avail_qed_entries -= num_credits;
567 	domain->num_ldb_credits += num_credits;
568 	return 0;
569 }
570 
dlb2_attach_dir_credits(struct dlb2_function_resources * rsrcs,struct dlb2_hw_domain * domain,u32 num_credits,struct dlb2_cmd_response * resp)571 static int dlb2_attach_dir_credits(struct dlb2_function_resources *rsrcs,
572 				   struct dlb2_hw_domain *domain,
573 				   u32 num_credits,
574 				   struct dlb2_cmd_response *resp)
575 {
576 	if (rsrcs->num_avail_dqed_entries < num_credits) {
577 		resp->status = DLB2_ST_DIR_CREDITS_UNAVAILABLE;
578 		return -EINVAL;
579 	}
580 
581 	rsrcs->num_avail_dqed_entries -= num_credits;
582 	domain->num_dir_credits += num_credits;
583 	return 0;
584 }
585 
dlb2_attach_atomic_inflights(struct dlb2_function_resources * rsrcs,struct dlb2_hw_domain * domain,u32 num_atomic_inflights,struct dlb2_cmd_response * resp)586 static int dlb2_attach_atomic_inflights(struct dlb2_function_resources *rsrcs,
587 					struct dlb2_hw_domain *domain,
588 					u32 num_atomic_inflights,
589 					struct dlb2_cmd_response *resp)
590 {
591 	if (rsrcs->num_avail_aqed_entries < num_atomic_inflights) {
592 		resp->status = DLB2_ST_ATOMIC_INFLIGHTS_UNAVAILABLE;
593 		return -EINVAL;
594 	}
595 
596 	rsrcs->num_avail_aqed_entries -= num_atomic_inflights;
597 	domain->num_avail_aqed_entries += num_atomic_inflights;
598 	return 0;
599 }
600 
601 static int
dlb2_attach_domain_hist_list_entries(struct dlb2_function_resources * rsrcs,struct dlb2_hw_domain * domain,u32 num_hist_list_entries,struct dlb2_cmd_response * resp)602 dlb2_attach_domain_hist_list_entries(struct dlb2_function_resources *rsrcs,
603 				     struct dlb2_hw_domain *domain,
604 				     u32 num_hist_list_entries,
605 				     struct dlb2_cmd_response *resp)
606 {
607 	struct dlb2_bitmap *bitmap;
608 	int base;
609 
610 	if (num_hist_list_entries) {
611 		bitmap = rsrcs->avail_hist_list_entries;
612 
613 		base = dlb2_bitmap_find_set_bit_range(bitmap,
614 						      num_hist_list_entries);
615 		if (base < 0)
616 			goto error;
617 
618 		domain->total_hist_list_entries = num_hist_list_entries;
619 		domain->avail_hist_list_entries = num_hist_list_entries;
620 		domain->hist_list_entry_base = base;
621 		domain->hist_list_entry_offset = 0;
622 
623 		dlb2_bitmap_clear_range(bitmap, base, num_hist_list_entries);
624 	}
625 	return 0;
626 
627 error:
628 	resp->status = DLB2_ST_HIST_LIST_ENTRIES_UNAVAILABLE;
629 	return -EINVAL;
630 }
631 
dlb2_attach_ldb_queues(struct dlb2_hw * hw,struct dlb2_function_resources * rsrcs,struct dlb2_hw_domain * domain,u32 num_queues,struct dlb2_cmd_response * resp)632 static int dlb2_attach_ldb_queues(struct dlb2_hw *hw,
633 				  struct dlb2_function_resources *rsrcs,
634 				  struct dlb2_hw_domain *domain,
635 				  u32 num_queues,
636 				  struct dlb2_cmd_response *resp)
637 {
638 	unsigned int i;
639 
640 	if (rsrcs->num_avail_ldb_queues < num_queues) {
641 		resp->status = DLB2_ST_LDB_QUEUES_UNAVAILABLE;
642 		return -EINVAL;
643 	}
644 
645 	for (i = 0; i < num_queues; i++) {
646 		struct dlb2_ldb_queue *queue;
647 
648 		queue = DLB2_FUNC_LIST_HEAD(rsrcs->avail_ldb_queues,
649 					    typeof(*queue));
650 		if (queue == NULL) {
651 			DLB2_HW_ERR(hw,
652 				    "[%s()] Internal error: domain validation failed\n",
653 				    __func__);
654 			return -EFAULT;
655 		}
656 
657 		dlb2_list_del(&rsrcs->avail_ldb_queues, &queue->func_list);
658 
659 		queue->domain_id = domain->id;
660 		queue->owned = true;
661 
662 		dlb2_list_add(&domain->avail_ldb_queues, &queue->domain_list);
663 	}
664 
665 	rsrcs->num_avail_ldb_queues -= num_queues;
666 
667 	return 0;
668 }
669 
670 static int
dlb2_domain_attach_resources(struct dlb2_hw * hw,struct dlb2_function_resources * rsrcs,struct dlb2_hw_domain * domain,struct dlb2_create_sched_domain_args * args,struct dlb2_cmd_response * resp)671 dlb2_domain_attach_resources(struct dlb2_hw *hw,
672 			     struct dlb2_function_resources *rsrcs,
673 			     struct dlb2_hw_domain *domain,
674 			     struct dlb2_create_sched_domain_args *args,
675 			     struct dlb2_cmd_response *resp)
676 {
677 	int ret;
678 
679 	ret = dlb2_attach_ldb_queues(hw,
680 				     rsrcs,
681 				     domain,
682 				     args->num_ldb_queues,
683 				     resp);
684 	if (ret < 0)
685 		return ret;
686 
687 	ret = dlb2_attach_ldb_ports(hw,
688 				    rsrcs,
689 				    domain,
690 				    args,
691 				    resp);
692 	if (ret < 0)
693 		return ret;
694 
695 	ret = dlb2_attach_dir_ports(hw,
696 				    rsrcs,
697 				    domain,
698 				    args->num_dir_ports,
699 				    resp);
700 	if (ret < 0)
701 		return ret;
702 
703 	ret = dlb2_attach_ldb_credits(rsrcs,
704 				      domain,
705 				      args->num_ldb_credits,
706 				      resp);
707 	if (ret < 0)
708 		return ret;
709 
710 	ret = dlb2_attach_dir_credits(rsrcs,
711 				      domain,
712 				      args->num_dir_credits,
713 				      resp);
714 	if (ret < 0)
715 		return ret;
716 
717 	ret = dlb2_attach_domain_hist_list_entries(rsrcs,
718 						   domain,
719 						   args->num_hist_list_entries,
720 						   resp);
721 	if (ret < 0)
722 		return ret;
723 
724 	ret = dlb2_attach_atomic_inflights(rsrcs,
725 					   domain,
726 					   args->num_atomic_inflights,
727 					   resp);
728 	if (ret < 0)
729 		return ret;
730 
731 	dlb2_configure_domain_credits(hw, domain);
732 
733 	domain->configured = true;
734 
735 	domain->started = false;
736 
737 	rsrcs->num_avail_domains--;
738 
739 	return 0;
740 }
741 
742 static int
dlb2_verify_create_sched_dom_args(struct dlb2_function_resources * rsrcs,struct dlb2_create_sched_domain_args * args,struct dlb2_cmd_response * resp)743 dlb2_verify_create_sched_dom_args(struct dlb2_function_resources *rsrcs,
744 				  struct dlb2_create_sched_domain_args *args,
745 				  struct dlb2_cmd_response *resp)
746 {
747 	u32 num_avail_ldb_ports, req_ldb_ports;
748 	struct dlb2_bitmap *avail_hl_entries;
749 	unsigned int max_contig_hl_range;
750 	int i;
751 
752 	avail_hl_entries = rsrcs->avail_hist_list_entries;
753 
754 	max_contig_hl_range = dlb2_bitmap_longest_set_range(avail_hl_entries);
755 
756 	num_avail_ldb_ports = 0;
757 	req_ldb_ports = 0;
758 	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
759 		num_avail_ldb_ports += rsrcs->num_avail_ldb_ports[i];
760 
761 		req_ldb_ports += args->num_cos_ldb_ports[i];
762 	}
763 
764 	req_ldb_ports += args->num_ldb_ports;
765 
766 	if (rsrcs->num_avail_domains < 1) {
767 		resp->status = DLB2_ST_DOMAIN_UNAVAILABLE;
768 		return -EINVAL;
769 	}
770 
771 	if (rsrcs->num_avail_ldb_queues < args->num_ldb_queues) {
772 		resp->status = DLB2_ST_LDB_QUEUES_UNAVAILABLE;
773 		return -EINVAL;
774 	}
775 
776 	if (req_ldb_ports > num_avail_ldb_ports) {
777 		resp->status = DLB2_ST_LDB_PORTS_UNAVAILABLE;
778 		return -EINVAL;
779 	}
780 
781 	for (i = 0; args->cos_strict && i < DLB2_NUM_COS_DOMAINS; i++) {
782 		if (args->num_cos_ldb_ports[i] >
783 		    rsrcs->num_avail_ldb_ports[i]) {
784 			resp->status = DLB2_ST_LDB_PORTS_UNAVAILABLE;
785 			return -EINVAL;
786 		}
787 	}
788 
789 	if (args->num_ldb_queues > 0 && req_ldb_ports == 0) {
790 		resp->status = DLB2_ST_LDB_PORT_REQUIRED_FOR_LDB_QUEUES;
791 		return -EINVAL;
792 	}
793 
794 	if (rsrcs->num_avail_dir_pq_pairs < args->num_dir_ports) {
795 		resp->status = DLB2_ST_DIR_PORTS_UNAVAILABLE;
796 		return -EINVAL;
797 	}
798 
799 	if (rsrcs->num_avail_qed_entries < args->num_ldb_credits) {
800 		resp->status = DLB2_ST_LDB_CREDITS_UNAVAILABLE;
801 		return -EINVAL;
802 	}
803 
804 	if (rsrcs->num_avail_dqed_entries < args->num_dir_credits) {
805 		resp->status = DLB2_ST_DIR_CREDITS_UNAVAILABLE;
806 		return -EINVAL;
807 	}
808 
809 	if (rsrcs->num_avail_aqed_entries < args->num_atomic_inflights) {
810 		resp->status = DLB2_ST_ATOMIC_INFLIGHTS_UNAVAILABLE;
811 		return -EINVAL;
812 	}
813 
814 	if (max_contig_hl_range < args->num_hist_list_entries) {
815 		resp->status = DLB2_ST_HIST_LIST_ENTRIES_UNAVAILABLE;
816 		return -EINVAL;
817 	}
818 
819 	return 0;
820 }
821 
822 static void
dlb2_log_create_sched_domain_args(struct dlb2_hw * hw,struct dlb2_create_sched_domain_args * args,bool vdev_req,unsigned int vdev_id)823 dlb2_log_create_sched_domain_args(struct dlb2_hw *hw,
824 				  struct dlb2_create_sched_domain_args *args,
825 				  bool vdev_req,
826 				  unsigned int vdev_id)
827 {
828 	DLB2_HW_DBG(hw, "DLB2 create sched domain arguments:\n");
829 	if (vdev_req)
830 		DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
831 	DLB2_HW_DBG(hw, "\tNumber of LDB queues:          %d\n",
832 		    args->num_ldb_queues);
833 	DLB2_HW_DBG(hw, "\tNumber of LDB ports (any CoS): %d\n",
834 		    args->num_ldb_ports);
835 	DLB2_HW_DBG(hw, "\tNumber of LDB ports (CoS 0):   %d\n",
836 		    args->num_cos_ldb_ports[0]);
837 	DLB2_HW_DBG(hw, "\tNumber of LDB ports (CoS 1):   %d\n",
838 		    args->num_cos_ldb_ports[1]);
839 	DLB2_HW_DBG(hw, "\tNumber of LDB ports (CoS 2):   %d\n",
840 		    args->num_cos_ldb_ports[1]);
841 	DLB2_HW_DBG(hw, "\tNumber of LDB ports (CoS 3):   %d\n",
842 		    args->num_cos_ldb_ports[1]);
843 	DLB2_HW_DBG(hw, "\tStrict CoS allocation:         %d\n",
844 		    args->cos_strict);
845 	DLB2_HW_DBG(hw, "\tNumber of DIR ports:           %d\n",
846 		    args->num_dir_ports);
847 	DLB2_HW_DBG(hw, "\tNumber of ATM inflights:       %d\n",
848 		    args->num_atomic_inflights);
849 	DLB2_HW_DBG(hw, "\tNumber of hist list entries:   %d\n",
850 		    args->num_hist_list_entries);
851 	DLB2_HW_DBG(hw, "\tNumber of LDB credits:         %d\n",
852 		    args->num_ldb_credits);
853 	DLB2_HW_DBG(hw, "\tNumber of DIR credits:         %d\n",
854 		    args->num_dir_credits);
855 }
856 
857 /**
858  * dlb2_hw_create_sched_domain() - Allocate and initialize a DLB scheduling
859  *	domain and its resources.
860  * @hw:	Contains the current state of the DLB2 hardware.
861  * @args: User-provided arguments.
862  * @resp: Response to user.
863  * @vdev_req: Request came from a virtual device.
864  * @vdev_id: If vdev_req is true, this contains the virtual device's ID.
865  *
866  * Return: returns < 0 on error, 0 otherwise. If the driver is unable to
867  * satisfy a request, resp->status will be set accordingly.
868  */
dlb2_hw_create_sched_domain(struct dlb2_hw * hw,struct dlb2_create_sched_domain_args * args,struct dlb2_cmd_response * resp,bool vdev_req,unsigned int vdev_id)869 int dlb2_hw_create_sched_domain(struct dlb2_hw *hw,
870 				struct dlb2_create_sched_domain_args *args,
871 				struct dlb2_cmd_response *resp,
872 				bool vdev_req,
873 				unsigned int vdev_id)
874 {
875 	struct dlb2_function_resources *rsrcs;
876 	struct dlb2_hw_domain *domain;
877 	int ret;
878 
879 	rsrcs = (vdev_req) ? &hw->vdev[vdev_id] : &hw->pf;
880 
881 	dlb2_log_create_sched_domain_args(hw, args, vdev_req, vdev_id);
882 
883 	/*
884 	 * Verify that hardware resources are available before attempting to
885 	 * satisfy the request. This simplifies the error unwinding code.
886 	 */
887 	ret = dlb2_verify_create_sched_dom_args(rsrcs, args, resp);
888 	if (ret)
889 		return ret;
890 
891 	domain = DLB2_FUNC_LIST_HEAD(rsrcs->avail_domains, typeof(*domain));
892 	if (domain == NULL) {
893 		DLB2_HW_ERR(hw,
894 			    "[%s():%d] Internal error: no available domains\n",
895 			    __func__, __LINE__);
896 		return -EFAULT;
897 	}
898 
899 	if (domain->configured) {
900 		DLB2_HW_ERR(hw,
901 			    "[%s()] Internal error: avail_domains contains configured domains.\n",
902 			    __func__);
903 		return -EFAULT;
904 	}
905 
906 	dlb2_init_domain_rsrc_lists(domain);
907 
908 	ret = dlb2_domain_attach_resources(hw, rsrcs, domain, args, resp);
909 	if (ret < 0) {
910 		DLB2_HW_ERR(hw,
911 			    "[%s()] Internal error: failed to verify args.\n",
912 			    __func__);
913 
914 		return ret;
915 	}
916 
917 	dlb2_list_del(&rsrcs->avail_domains, &domain->func_list);
918 
919 	dlb2_list_add(&rsrcs->used_domains, &domain->func_list);
920 
921 	resp->id = (vdev_req) ? domain->id.virt_id : domain->id.phys_id;
922 	resp->status = 0;
923 
924 	return 0;
925 }
926 
927 /*
928  * The PF driver cannot assume that a register write will affect subsequent HCW
929  * writes. To ensure a write completes, the driver must read back a CSR. This
930  * function only need be called for configuration that can occur after the
931  * domain has started; prior to starting, applications can't send HCWs.
932  */
dlb2_flush_csr(struct dlb2_hw * hw)933 static inline void dlb2_flush_csr(struct dlb2_hw *hw)
934 {
935 	DLB2_CSR_RD(hw, DLB2_SYS_TOTAL_VAS);
936 }
937 
dlb2_dir_port_cq_disable(struct dlb2_hw * hw,struct dlb2_dir_pq_pair * port)938 static void dlb2_dir_port_cq_disable(struct dlb2_hw *hw,
939 				     struct dlb2_dir_pq_pair *port)
940 {
941 	union dlb2_lsp_cq_dir_dsbl reg;
942 
943 	reg.field.disabled = 1;
944 
945 	DLB2_CSR_WR(hw, DLB2_LSP_CQ_DIR_DSBL(port->id.phys_id), reg.val);
946 
947 	dlb2_flush_csr(hw);
948 }
949 
dlb2_dir_cq_token_count(struct dlb2_hw * hw,struct dlb2_dir_pq_pair * port)950 static u32 dlb2_dir_cq_token_count(struct dlb2_hw *hw,
951 				   struct dlb2_dir_pq_pair *port)
952 {
953 	union dlb2_lsp_cq_dir_tkn_cnt r0;
954 
955 	r0.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ_DIR_TKN_CNT(port->id.phys_id));
956 
957 	/*
958 	 * Account for the initial token count, which is used in order to
959 	 * provide a CQ with depth less than 8.
960 	 */
961 
962 	return r0.field.count - port->init_tkn_cnt;
963 }
964 
dlb2_drain_dir_cq(struct dlb2_hw * hw,struct dlb2_dir_pq_pair * port)965 static int dlb2_drain_dir_cq(struct dlb2_hw *hw,
966 			     struct dlb2_dir_pq_pair *port)
967 {
968 	unsigned int port_id = port->id.phys_id;
969 	u32 cnt;
970 
971 	/* Return any outstanding tokens */
972 	cnt = dlb2_dir_cq_token_count(hw, port);
973 
974 	if (cnt != 0) {
975 		struct dlb2_hcw hcw_mem[8], *hcw;
976 		void  *pp_addr;
977 
978 		pp_addr = os_map_producer_port(hw, port_id, false);
979 
980 		/* Point hcw to a 64B-aligned location */
981 		hcw = (struct dlb2_hcw *)((uintptr_t)&hcw_mem[4] & ~0x3F);
982 
983 		/*
984 		 * Program the first HCW for a batch token return and
985 		 * the rest as NOOPS
986 		 */
987 		memset(hcw, 0, 4 * sizeof(*hcw));
988 		hcw->cq_token = 1;
989 		hcw->lock_id = cnt - 1;
990 
991 		dlb2_movdir64b(pp_addr, hcw);
992 
993 		os_fence_hcw(hw, pp_addr);
994 
995 		os_unmap_producer_port(hw, pp_addr);
996 	}
997 
998 	return 0;
999 }
1000 
dlb2_dir_port_cq_enable(struct dlb2_hw * hw,struct dlb2_dir_pq_pair * port)1001 static void dlb2_dir_port_cq_enable(struct dlb2_hw *hw,
1002 				    struct dlb2_dir_pq_pair *port)
1003 {
1004 	union dlb2_lsp_cq_dir_dsbl reg;
1005 
1006 	reg.field.disabled = 0;
1007 
1008 	DLB2_CSR_WR(hw, DLB2_LSP_CQ_DIR_DSBL(port->id.phys_id), reg.val);
1009 
1010 	dlb2_flush_csr(hw);
1011 }
1012 
dlb2_domain_drain_dir_cqs(struct dlb2_hw * hw,struct dlb2_hw_domain * domain,bool toggle_port)1013 static int dlb2_domain_drain_dir_cqs(struct dlb2_hw *hw,
1014 				     struct dlb2_hw_domain *domain,
1015 				     bool toggle_port)
1016 {
1017 	struct dlb2_list_entry *iter;
1018 	struct dlb2_dir_pq_pair *port;
1019 	int ret;
1020 	RTE_SET_USED(iter);
1021 
1022 	DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
1023 		/*
1024 		 * Can't drain a port if it's not configured, and there's
1025 		 * nothing to drain if its queue is unconfigured.
1026 		 */
1027 		if (!port->port_configured || !port->queue_configured)
1028 			continue;
1029 
1030 		if (toggle_port)
1031 			dlb2_dir_port_cq_disable(hw, port);
1032 
1033 		ret = dlb2_drain_dir_cq(hw, port);
1034 		if (ret < 0)
1035 			return ret;
1036 
1037 		if (toggle_port)
1038 			dlb2_dir_port_cq_enable(hw, port);
1039 	}
1040 
1041 	return 0;
1042 }
1043 
dlb2_dir_queue_depth(struct dlb2_hw * hw,struct dlb2_dir_pq_pair * queue)1044 static u32 dlb2_dir_queue_depth(struct dlb2_hw *hw,
1045 				struct dlb2_dir_pq_pair *queue)
1046 {
1047 	union dlb2_lsp_qid_dir_enqueue_cnt r0;
1048 
1049 	r0.val = DLB2_CSR_RD(hw,
1050 			     DLB2_LSP_QID_DIR_ENQUEUE_CNT(queue->id.phys_id));
1051 
1052 	return r0.field.count;
1053 }
1054 
dlb2_dir_queue_is_empty(struct dlb2_hw * hw,struct dlb2_dir_pq_pair * queue)1055 static bool dlb2_dir_queue_is_empty(struct dlb2_hw *hw,
1056 				    struct dlb2_dir_pq_pair *queue)
1057 {
1058 	return dlb2_dir_queue_depth(hw, queue) == 0;
1059 }
1060 
dlb2_domain_dir_queues_empty(struct dlb2_hw * hw,struct dlb2_hw_domain * domain)1061 static bool dlb2_domain_dir_queues_empty(struct dlb2_hw *hw,
1062 					 struct dlb2_hw_domain *domain)
1063 {
1064 	struct dlb2_list_entry *iter;
1065 	struct dlb2_dir_pq_pair *queue;
1066 	RTE_SET_USED(iter);
1067 
1068 	DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, queue, iter) {
1069 		if (!dlb2_dir_queue_is_empty(hw, queue))
1070 			return false;
1071 	}
1072 
1073 	return true;
1074 }
1075 
dlb2_domain_drain_dir_queues(struct dlb2_hw * hw,struct dlb2_hw_domain * domain)1076 static int dlb2_domain_drain_dir_queues(struct dlb2_hw *hw,
1077 					struct dlb2_hw_domain *domain)
1078 {
1079 	int i, ret;
1080 
1081 	/* If the domain hasn't been started, there's no traffic to drain */
1082 	if (!domain->started)
1083 		return 0;
1084 
1085 	for (i = 0; i < DLB2_MAX_QID_EMPTY_CHECK_LOOPS; i++) {
1086 		ret = dlb2_domain_drain_dir_cqs(hw, domain, true);
1087 		if (ret < 0)
1088 			return ret;
1089 
1090 		if (dlb2_domain_dir_queues_empty(hw, domain))
1091 			break;
1092 	}
1093 
1094 	if (i == DLB2_MAX_QID_EMPTY_CHECK_LOOPS) {
1095 		DLB2_HW_ERR(hw,
1096 			    "[%s()] Internal error: failed to empty queues\n",
1097 			    __func__);
1098 		return -EFAULT;
1099 	}
1100 
1101 	/*
1102 	 * Drain the CQs one more time. For the queues to go empty, they would
1103 	 * have scheduled one or more QEs.
1104 	 */
1105 	ret = dlb2_domain_drain_dir_cqs(hw, domain, true);
1106 	if (ret < 0)
1107 		return ret;
1108 
1109 	return 0;
1110 }
1111 
dlb2_ldb_port_cq_enable(struct dlb2_hw * hw,struct dlb2_ldb_port * port)1112 static void dlb2_ldb_port_cq_enable(struct dlb2_hw *hw,
1113 				    struct dlb2_ldb_port *port)
1114 {
1115 	union dlb2_lsp_cq_ldb_dsbl reg;
1116 
1117 	/*
1118 	 * Don't re-enable the port if a removal is pending. The caller should
1119 	 * mark this port as enabled (if it isn't already), and when the
1120 	 * removal completes the port will be enabled.
1121 	 */
1122 	if (port->num_pending_removals)
1123 		return;
1124 
1125 	reg.field.disabled = 0;
1126 
1127 	DLB2_CSR_WR(hw, DLB2_LSP_CQ_LDB_DSBL(port->id.phys_id), reg.val);
1128 
1129 	dlb2_flush_csr(hw);
1130 }
1131 
dlb2_ldb_port_cq_disable(struct dlb2_hw * hw,struct dlb2_ldb_port * port)1132 static void dlb2_ldb_port_cq_disable(struct dlb2_hw *hw,
1133 				     struct dlb2_ldb_port *port)
1134 {
1135 	union dlb2_lsp_cq_ldb_dsbl reg;
1136 
1137 	reg.field.disabled = 1;
1138 
1139 	DLB2_CSR_WR(hw, DLB2_LSP_CQ_LDB_DSBL(port->id.phys_id), reg.val);
1140 
1141 	dlb2_flush_csr(hw);
1142 }
1143 
dlb2_ldb_cq_inflight_count(struct dlb2_hw * hw,struct dlb2_ldb_port * port)1144 static u32 dlb2_ldb_cq_inflight_count(struct dlb2_hw *hw,
1145 				      struct dlb2_ldb_port *port)
1146 {
1147 	union dlb2_lsp_cq_ldb_infl_cnt r0;
1148 
1149 	r0.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ_LDB_INFL_CNT(port->id.phys_id));
1150 
1151 	return r0.field.count;
1152 }
1153 
dlb2_ldb_cq_token_count(struct dlb2_hw * hw,struct dlb2_ldb_port * port)1154 static u32 dlb2_ldb_cq_token_count(struct dlb2_hw *hw,
1155 				   struct dlb2_ldb_port *port)
1156 {
1157 	union dlb2_lsp_cq_ldb_tkn_cnt r0;
1158 
1159 	r0.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ_LDB_TKN_CNT(port->id.phys_id));
1160 
1161 	/*
1162 	 * Account for the initial token count, which is used in order to
1163 	 * provide a CQ with depth less than 8.
1164 	 */
1165 
1166 	return r0.field.token_count - port->init_tkn_cnt;
1167 }
1168 
dlb2_drain_ldb_cq(struct dlb2_hw * hw,struct dlb2_ldb_port * port)1169 static int dlb2_drain_ldb_cq(struct dlb2_hw *hw, struct dlb2_ldb_port *port)
1170 {
1171 	u32 infl_cnt, tkn_cnt;
1172 	unsigned int i;
1173 
1174 	infl_cnt = dlb2_ldb_cq_inflight_count(hw, port);
1175 	tkn_cnt = dlb2_ldb_cq_token_count(hw, port);
1176 
1177 	if (infl_cnt || tkn_cnt) {
1178 		struct dlb2_hcw hcw_mem[8], *hcw;
1179 		void  *pp_addr;
1180 
1181 		pp_addr = os_map_producer_port(hw, port->id.phys_id, true);
1182 
1183 		/* Point hcw to a 64B-aligned location */
1184 		hcw = (struct dlb2_hcw *)((uintptr_t)&hcw_mem[4] & ~0x3F);
1185 
1186 		/*
1187 		 * Program the first HCW for a completion and token return and
1188 		 * the other HCWs as NOOPS
1189 		 */
1190 
1191 		memset(hcw, 0, 4 * sizeof(*hcw));
1192 		hcw->qe_comp = (infl_cnt > 0);
1193 		hcw->cq_token = (tkn_cnt > 0);
1194 		hcw->lock_id = tkn_cnt - 1;
1195 
1196 		/* Return tokens in the first HCW */
1197 		dlb2_movdir64b(pp_addr, hcw);
1198 
1199 		hcw->cq_token = 0;
1200 
1201 		/* Issue remaining completions (if any) */
1202 		for (i = 1; i < infl_cnt; i++)
1203 			dlb2_movdir64b(pp_addr, hcw);
1204 
1205 		os_fence_hcw(hw, pp_addr);
1206 
1207 		os_unmap_producer_port(hw, pp_addr);
1208 	}
1209 
1210 	return 0;
1211 }
1212 
dlb2_domain_drain_ldb_cqs(struct dlb2_hw * hw,struct dlb2_hw_domain * domain,bool toggle_port)1213 static int dlb2_domain_drain_ldb_cqs(struct dlb2_hw *hw,
1214 				     struct dlb2_hw_domain *domain,
1215 				     bool toggle_port)
1216 {
1217 	struct dlb2_list_entry *iter;
1218 	struct dlb2_ldb_port *port;
1219 	int ret, i;
1220 	RTE_SET_USED(iter);
1221 
1222 	/* If the domain hasn't been started, there's no traffic to drain */
1223 	if (!domain->started)
1224 		return 0;
1225 
1226 	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
1227 		DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
1228 			if (toggle_port)
1229 				dlb2_ldb_port_cq_disable(hw, port);
1230 
1231 			ret = dlb2_drain_ldb_cq(hw, port);
1232 			if (ret < 0)
1233 				return ret;
1234 
1235 			if (toggle_port)
1236 				dlb2_ldb_port_cq_enable(hw, port);
1237 		}
1238 	}
1239 
1240 	return 0;
1241 }
1242 
dlb2_ldb_queue_depth(struct dlb2_hw * hw,struct dlb2_ldb_queue * queue)1243 static u32 dlb2_ldb_queue_depth(struct dlb2_hw *hw,
1244 				struct dlb2_ldb_queue *queue)
1245 {
1246 	union dlb2_lsp_qid_aqed_active_cnt r0;
1247 	union dlb2_lsp_qid_atm_active r1;
1248 	union dlb2_lsp_qid_ldb_enqueue_cnt r2;
1249 
1250 	r0.val = DLB2_CSR_RD(hw,
1251 			     DLB2_LSP_QID_AQED_ACTIVE_CNT(queue->id.phys_id));
1252 	r1.val = DLB2_CSR_RD(hw,
1253 			     DLB2_LSP_QID_ATM_ACTIVE(queue->id.phys_id));
1254 
1255 	r2.val = DLB2_CSR_RD(hw,
1256 			     DLB2_LSP_QID_LDB_ENQUEUE_CNT(queue->id.phys_id));
1257 
1258 	return r0.field.count + r1.field.count + r2.field.count;
1259 }
1260 
dlb2_ldb_queue_is_empty(struct dlb2_hw * hw,struct dlb2_ldb_queue * queue)1261 static bool dlb2_ldb_queue_is_empty(struct dlb2_hw *hw,
1262 				    struct dlb2_ldb_queue *queue)
1263 {
1264 	return dlb2_ldb_queue_depth(hw, queue) == 0;
1265 }
1266 
dlb2_domain_mapped_queues_empty(struct dlb2_hw * hw,struct dlb2_hw_domain * domain)1267 static bool dlb2_domain_mapped_queues_empty(struct dlb2_hw *hw,
1268 					    struct dlb2_hw_domain *domain)
1269 {
1270 	struct dlb2_list_entry *iter;
1271 	struct dlb2_ldb_queue *queue;
1272 	RTE_SET_USED(iter);
1273 
1274 	DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
1275 		if (queue->num_mappings == 0)
1276 			continue;
1277 
1278 		if (!dlb2_ldb_queue_is_empty(hw, queue))
1279 			return false;
1280 	}
1281 
1282 	return true;
1283 }
1284 
dlb2_domain_drain_mapped_queues(struct dlb2_hw * hw,struct dlb2_hw_domain * domain)1285 static int dlb2_domain_drain_mapped_queues(struct dlb2_hw *hw,
1286 					   struct dlb2_hw_domain *domain)
1287 {
1288 	int i, ret;
1289 
1290 	/* If the domain hasn't been started, there's no traffic to drain */
1291 	if (!domain->started)
1292 		return 0;
1293 
1294 	if (domain->num_pending_removals > 0) {
1295 		DLB2_HW_ERR(hw,
1296 			    "[%s()] Internal error: failed to unmap domain queues\n",
1297 			    __func__);
1298 		return -EFAULT;
1299 	}
1300 
1301 	for (i = 0; i < DLB2_MAX_QID_EMPTY_CHECK_LOOPS; i++) {
1302 		ret = dlb2_domain_drain_ldb_cqs(hw, domain, true);
1303 		if (ret < 0)
1304 			return ret;
1305 
1306 		if (dlb2_domain_mapped_queues_empty(hw, domain))
1307 			break;
1308 	}
1309 
1310 	if (i == DLB2_MAX_QID_EMPTY_CHECK_LOOPS) {
1311 		DLB2_HW_ERR(hw,
1312 			    "[%s()] Internal error: failed to empty queues\n",
1313 			    __func__);
1314 		return -EFAULT;
1315 	}
1316 
1317 	/*
1318 	 * Drain the CQs one more time. For the queues to go empty, they would
1319 	 * have scheduled one or more QEs.
1320 	 */
1321 	ret = dlb2_domain_drain_ldb_cqs(hw, domain, true);
1322 	if (ret < 0)
1323 		return ret;
1324 
1325 	return 0;
1326 }
1327 
dlb2_domain_enable_ldb_cqs(struct dlb2_hw * hw,struct dlb2_hw_domain * domain)1328 static void dlb2_domain_enable_ldb_cqs(struct dlb2_hw *hw,
1329 				       struct dlb2_hw_domain *domain)
1330 {
1331 	struct dlb2_list_entry *iter;
1332 	struct dlb2_ldb_port *port;
1333 	int i;
1334 	RTE_SET_USED(iter);
1335 
1336 	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
1337 		DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
1338 			port->enabled = true;
1339 
1340 			dlb2_ldb_port_cq_enable(hw, port);
1341 		}
1342 	}
1343 }
1344 
1345 static struct dlb2_ldb_queue *
dlb2_get_ldb_queue_from_id(struct dlb2_hw * hw,u32 id,bool vdev_req,unsigned int vdev_id)1346 dlb2_get_ldb_queue_from_id(struct dlb2_hw *hw,
1347 			   u32 id,
1348 			   bool vdev_req,
1349 			   unsigned int vdev_id)
1350 {
1351 	struct dlb2_list_entry *iter1;
1352 	struct dlb2_list_entry *iter2;
1353 	struct dlb2_function_resources *rsrcs;
1354 	struct dlb2_hw_domain *domain;
1355 	struct dlb2_ldb_queue *queue;
1356 	RTE_SET_USED(iter1);
1357 	RTE_SET_USED(iter2);
1358 
1359 	if (id >= DLB2_MAX_NUM_LDB_QUEUES)
1360 		return NULL;
1361 
1362 	rsrcs = (vdev_req) ? &hw->vdev[vdev_id] : &hw->pf;
1363 
1364 	if (!vdev_req)
1365 		return &hw->rsrcs.ldb_queues[id];
1366 
1367 	DLB2_FUNC_LIST_FOR(rsrcs->used_domains, domain, iter1) {
1368 		DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter2)
1369 			if (queue->id.virt_id == id)
1370 				return queue;
1371 	}
1372 
1373 	DLB2_FUNC_LIST_FOR(rsrcs->avail_ldb_queues, queue, iter1)
1374 		if (queue->id.virt_id == id)
1375 			return queue;
1376 
1377 	return NULL;
1378 }
1379 
dlb2_get_domain_from_id(struct dlb2_hw * hw,u32 id,bool vdev_req,unsigned int vdev_id)1380 static struct dlb2_hw_domain *dlb2_get_domain_from_id(struct dlb2_hw *hw,
1381 						      u32 id,
1382 						      bool vdev_req,
1383 						      unsigned int vdev_id)
1384 {
1385 	struct dlb2_list_entry *iteration;
1386 	struct dlb2_function_resources *rsrcs;
1387 	struct dlb2_hw_domain *domain;
1388 	RTE_SET_USED(iteration);
1389 
1390 	if (id >= DLB2_MAX_NUM_DOMAINS)
1391 		return NULL;
1392 
1393 	if (!vdev_req)
1394 		return &hw->domains[id];
1395 
1396 	rsrcs = &hw->vdev[vdev_id];
1397 
1398 	DLB2_FUNC_LIST_FOR(rsrcs->used_domains, domain, iteration)
1399 		if (domain->id.virt_id == id)
1400 			return domain;
1401 
1402 	return NULL;
1403 }
1404 
dlb2_port_slot_state_transition(struct dlb2_hw * hw,struct dlb2_ldb_port * port,struct dlb2_ldb_queue * queue,int slot,enum dlb2_qid_map_state new_state)1405 static int dlb2_port_slot_state_transition(struct dlb2_hw *hw,
1406 					   struct dlb2_ldb_port *port,
1407 					   struct dlb2_ldb_queue *queue,
1408 					   int slot,
1409 					   enum dlb2_qid_map_state new_state)
1410 {
1411 	enum dlb2_qid_map_state curr_state = port->qid_map[slot].state;
1412 	struct dlb2_hw_domain *domain;
1413 	int domain_id;
1414 
1415 	domain_id = port->domain_id.phys_id;
1416 
1417 	domain = dlb2_get_domain_from_id(hw, domain_id, false, 0);
1418 	if (domain == NULL) {
1419 		DLB2_HW_ERR(hw,
1420 			    "[%s()] Internal error: unable to find domain %d\n",
1421 			    __func__, domain_id);
1422 		return -EINVAL;
1423 	}
1424 
1425 	switch (curr_state) {
1426 	case DLB2_QUEUE_UNMAPPED:
1427 		switch (new_state) {
1428 		case DLB2_QUEUE_MAPPED:
1429 			queue->num_mappings++;
1430 			port->num_mappings++;
1431 			break;
1432 		case DLB2_QUEUE_MAP_IN_PROG:
1433 			queue->num_pending_additions++;
1434 			domain->num_pending_additions++;
1435 			break;
1436 		default:
1437 			goto error;
1438 		}
1439 		break;
1440 	case DLB2_QUEUE_MAPPED:
1441 		switch (new_state) {
1442 		case DLB2_QUEUE_UNMAPPED:
1443 			queue->num_mappings--;
1444 			port->num_mappings--;
1445 			break;
1446 		case DLB2_QUEUE_UNMAP_IN_PROG:
1447 			port->num_pending_removals++;
1448 			domain->num_pending_removals++;
1449 			break;
1450 		case DLB2_QUEUE_MAPPED:
1451 			/* Priority change, nothing to update */
1452 			break;
1453 		default:
1454 			goto error;
1455 		}
1456 		break;
1457 	case DLB2_QUEUE_MAP_IN_PROG:
1458 		switch (new_state) {
1459 		case DLB2_QUEUE_UNMAPPED:
1460 			queue->num_pending_additions--;
1461 			domain->num_pending_additions--;
1462 			break;
1463 		case DLB2_QUEUE_MAPPED:
1464 			queue->num_mappings++;
1465 			port->num_mappings++;
1466 			queue->num_pending_additions--;
1467 			domain->num_pending_additions--;
1468 			break;
1469 		default:
1470 			goto error;
1471 		}
1472 		break;
1473 	case DLB2_QUEUE_UNMAP_IN_PROG:
1474 		switch (new_state) {
1475 		case DLB2_QUEUE_UNMAPPED:
1476 			port->num_pending_removals--;
1477 			domain->num_pending_removals--;
1478 			queue->num_mappings--;
1479 			port->num_mappings--;
1480 			break;
1481 		case DLB2_QUEUE_MAPPED:
1482 			port->num_pending_removals--;
1483 			domain->num_pending_removals--;
1484 			break;
1485 		case DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP:
1486 			/* Nothing to update */
1487 			break;
1488 		default:
1489 			goto error;
1490 		}
1491 		break;
1492 	case DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP:
1493 		switch (new_state) {
1494 		case DLB2_QUEUE_UNMAP_IN_PROG:
1495 			/* Nothing to update */
1496 			break;
1497 		case DLB2_QUEUE_UNMAPPED:
1498 			/*
1499 			 * An UNMAP_IN_PROG_PENDING_MAP slot briefly
1500 			 * becomes UNMAPPED before it transitions to
1501 			 * MAP_IN_PROG.
1502 			 */
1503 			queue->num_mappings--;
1504 			port->num_mappings--;
1505 			port->num_pending_removals--;
1506 			domain->num_pending_removals--;
1507 			break;
1508 		default:
1509 			goto error;
1510 		}
1511 		break;
1512 	default:
1513 		goto error;
1514 	}
1515 
1516 	port->qid_map[slot].state = new_state;
1517 
1518 	DLB2_HW_DBG(hw,
1519 		    "[%s()] queue %d -> port %d state transition (%d -> %d)\n",
1520 		    __func__, queue->id.phys_id, port->id.phys_id,
1521 		    curr_state, new_state);
1522 	return 0;
1523 
1524 error:
1525 	DLB2_HW_ERR(hw,
1526 		    "[%s()] Internal error: invalid queue %d -> port %d state transition (%d -> %d)\n",
1527 		    __func__, queue->id.phys_id, port->id.phys_id,
1528 		    curr_state, new_state);
1529 	return -EFAULT;
1530 }
1531 
dlb2_port_find_slot(struct dlb2_ldb_port * port,enum dlb2_qid_map_state state,int * slot)1532 static bool dlb2_port_find_slot(struct dlb2_ldb_port *port,
1533 				enum dlb2_qid_map_state state,
1534 				int *slot)
1535 {
1536 	int i;
1537 
1538 	for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
1539 		if (port->qid_map[i].state == state)
1540 			break;
1541 	}
1542 
1543 	*slot = i;
1544 
1545 	return (i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ);
1546 }
1547 
dlb2_port_find_slot_queue(struct dlb2_ldb_port * port,enum dlb2_qid_map_state state,struct dlb2_ldb_queue * queue,int * slot)1548 static bool dlb2_port_find_slot_queue(struct dlb2_ldb_port *port,
1549 				      enum dlb2_qid_map_state state,
1550 				      struct dlb2_ldb_queue *queue,
1551 				      int *slot)
1552 {
1553 	int i;
1554 
1555 	for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
1556 		if (port->qid_map[i].state == state &&
1557 		    port->qid_map[i].qid == queue->id.phys_id)
1558 			break;
1559 	}
1560 
1561 	*slot = i;
1562 
1563 	return (i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ);
1564 }
1565 
1566 /*
1567  * dlb2_ldb_queue_{enable, disable}_mapped_cqs() don't operate exactly as
1568  * their function names imply, and should only be called by the dynamic CQ
1569  * mapping code.
1570  */
dlb2_ldb_queue_disable_mapped_cqs(struct dlb2_hw * hw,struct dlb2_hw_domain * domain,struct dlb2_ldb_queue * queue)1571 static void dlb2_ldb_queue_disable_mapped_cqs(struct dlb2_hw *hw,
1572 					      struct dlb2_hw_domain *domain,
1573 					      struct dlb2_ldb_queue *queue)
1574 {
1575 	struct dlb2_list_entry *iter;
1576 	struct dlb2_ldb_port *port;
1577 	int slot, i;
1578 	RTE_SET_USED(iter);
1579 
1580 	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
1581 		DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
1582 			enum dlb2_qid_map_state state = DLB2_QUEUE_MAPPED;
1583 
1584 			if (!dlb2_port_find_slot_queue(port, state,
1585 						       queue, &slot))
1586 				continue;
1587 
1588 			if (port->enabled)
1589 				dlb2_ldb_port_cq_disable(hw, port);
1590 		}
1591 	}
1592 }
1593 
dlb2_ldb_queue_enable_mapped_cqs(struct dlb2_hw * hw,struct dlb2_hw_domain * domain,struct dlb2_ldb_queue * queue)1594 static void dlb2_ldb_queue_enable_mapped_cqs(struct dlb2_hw *hw,
1595 					     struct dlb2_hw_domain *domain,
1596 					     struct dlb2_ldb_queue *queue)
1597 {
1598 	struct dlb2_list_entry *iter;
1599 	struct dlb2_ldb_port *port;
1600 	int slot, i;
1601 	RTE_SET_USED(iter);
1602 
1603 	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
1604 		DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
1605 			enum dlb2_qid_map_state state = DLB2_QUEUE_MAPPED;
1606 
1607 			if (!dlb2_port_find_slot_queue(port, state,
1608 						       queue, &slot))
1609 				continue;
1610 
1611 			if (port->enabled)
1612 				dlb2_ldb_port_cq_enable(hw, port);
1613 		}
1614 	}
1615 }
1616 
dlb2_ldb_port_clear_queue_if_status(struct dlb2_hw * hw,struct dlb2_ldb_port * port,int slot)1617 static void dlb2_ldb_port_clear_queue_if_status(struct dlb2_hw *hw,
1618 						struct dlb2_ldb_port *port,
1619 						int slot)
1620 {
1621 	union dlb2_lsp_ldb_sched_ctrl r0 = { {0} };
1622 
1623 	r0.field.cq = port->id.phys_id;
1624 	r0.field.qidix = slot;
1625 	r0.field.value = 0;
1626 	r0.field.inflight_ok_v = 1;
1627 
1628 	DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL, r0.val);
1629 
1630 	dlb2_flush_csr(hw);
1631 }
1632 
dlb2_ldb_port_set_queue_if_status(struct dlb2_hw * hw,struct dlb2_ldb_port * port,int slot)1633 static void dlb2_ldb_port_set_queue_if_status(struct dlb2_hw *hw,
1634 					      struct dlb2_ldb_port *port,
1635 					      int slot)
1636 {
1637 	union dlb2_lsp_ldb_sched_ctrl r0 = { {0} };
1638 
1639 	r0.field.cq = port->id.phys_id;
1640 	r0.field.qidix = slot;
1641 	r0.field.value = 1;
1642 	r0.field.inflight_ok_v = 1;
1643 
1644 	DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL, r0.val);
1645 
1646 	dlb2_flush_csr(hw);
1647 }
1648 
dlb2_ldb_port_map_qid_static(struct dlb2_hw * hw,struct dlb2_ldb_port * p,struct dlb2_ldb_queue * q,u8 priority)1649 static int dlb2_ldb_port_map_qid_static(struct dlb2_hw *hw,
1650 					struct dlb2_ldb_port *p,
1651 					struct dlb2_ldb_queue *q,
1652 					u8 priority)
1653 {
1654 	union dlb2_lsp_cq2priov r0;
1655 	union dlb2_lsp_cq2qid0 r1;
1656 	union dlb2_atm_qid2cqidix_00 r2;
1657 	union dlb2_lsp_qid2cqidix_00 r3;
1658 	union dlb2_lsp_qid2cqidix2_00 r4;
1659 	enum dlb2_qid_map_state state;
1660 	int i;
1661 
1662 	/* Look for a pending or already mapped slot, else an unused slot */
1663 	if (!dlb2_port_find_slot_queue(p, DLB2_QUEUE_MAP_IN_PROG, q, &i) &&
1664 	    !dlb2_port_find_slot_queue(p, DLB2_QUEUE_MAPPED, q, &i) &&
1665 	    !dlb2_port_find_slot(p, DLB2_QUEUE_UNMAPPED, &i)) {
1666 		DLB2_HW_ERR(hw,
1667 			    "[%s():%d] Internal error: CQ has no available QID mapping slots\n",
1668 			    __func__, __LINE__);
1669 		return -EFAULT;
1670 	}
1671 
1672 	if (i >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
1673 		DLB2_HW_ERR(hw,
1674 			    "[%s():%d] Internal error: port slot tracking failed\n",
1675 			    __func__, __LINE__);
1676 		return -EFAULT;
1677 	}
1678 
1679 	/* Read-modify-write the priority and valid bit register */
1680 	r0.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ2PRIOV(p->id.phys_id));
1681 
1682 	r0.field.v |= 1 << i;
1683 	r0.field.prio |= (priority & 0x7) << i * 3;
1684 
1685 	DLB2_CSR_WR(hw, DLB2_LSP_CQ2PRIOV(p->id.phys_id), r0.val);
1686 
1687 	/* Read-modify-write the QID map register */
1688 	if (i < 4)
1689 		r1.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ2QID0(p->id.phys_id));
1690 	else
1691 		r1.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ2QID1(p->id.phys_id));
1692 
1693 	if (i == 0 || i == 4)
1694 		r1.field.qid_p0 = q->id.phys_id;
1695 	if (i == 1 || i == 5)
1696 		r1.field.qid_p1 = q->id.phys_id;
1697 	if (i == 2 || i == 6)
1698 		r1.field.qid_p2 = q->id.phys_id;
1699 	if (i == 3 || i == 7)
1700 		r1.field.qid_p3 = q->id.phys_id;
1701 
1702 	if (i < 4)
1703 		DLB2_CSR_WR(hw, DLB2_LSP_CQ2QID0(p->id.phys_id), r1.val);
1704 	else
1705 		DLB2_CSR_WR(hw, DLB2_LSP_CQ2QID1(p->id.phys_id), r1.val);
1706 
1707 	r2.val = DLB2_CSR_RD(hw,
1708 			     DLB2_ATM_QID2CQIDIX(q->id.phys_id,
1709 						 p->id.phys_id / 4));
1710 
1711 	r3.val = DLB2_CSR_RD(hw,
1712 			     DLB2_LSP_QID2CQIDIX(q->id.phys_id,
1713 						 p->id.phys_id / 4));
1714 
1715 	r4.val = DLB2_CSR_RD(hw,
1716 			     DLB2_LSP_QID2CQIDIX2(q->id.phys_id,
1717 						  p->id.phys_id / 4));
1718 
1719 	switch (p->id.phys_id % 4) {
1720 	case 0:
1721 		r2.field.cq_p0 |= 1 << i;
1722 		r3.field.cq_p0 |= 1 << i;
1723 		r4.field.cq_p0 |= 1 << i;
1724 		break;
1725 
1726 	case 1:
1727 		r2.field.cq_p1 |= 1 << i;
1728 		r3.field.cq_p1 |= 1 << i;
1729 		r4.field.cq_p1 |= 1 << i;
1730 		break;
1731 
1732 	case 2:
1733 		r2.field.cq_p2 |= 1 << i;
1734 		r3.field.cq_p2 |= 1 << i;
1735 		r4.field.cq_p2 |= 1 << i;
1736 		break;
1737 
1738 	case 3:
1739 		r2.field.cq_p3 |= 1 << i;
1740 		r3.field.cq_p3 |= 1 << i;
1741 		r4.field.cq_p3 |= 1 << i;
1742 		break;
1743 	}
1744 
1745 	DLB2_CSR_WR(hw,
1746 		    DLB2_ATM_QID2CQIDIX(q->id.phys_id, p->id.phys_id / 4),
1747 		    r2.val);
1748 
1749 	DLB2_CSR_WR(hw,
1750 		    DLB2_LSP_QID2CQIDIX(q->id.phys_id, p->id.phys_id / 4),
1751 		    r3.val);
1752 
1753 	DLB2_CSR_WR(hw,
1754 		    DLB2_LSP_QID2CQIDIX2(q->id.phys_id, p->id.phys_id / 4),
1755 		    r4.val);
1756 
1757 	dlb2_flush_csr(hw);
1758 
1759 	p->qid_map[i].qid = q->id.phys_id;
1760 	p->qid_map[i].priority = priority;
1761 
1762 	state = DLB2_QUEUE_MAPPED;
1763 
1764 	return dlb2_port_slot_state_transition(hw, p, q, i, state);
1765 }
1766 
dlb2_ldb_port_set_has_work_bits(struct dlb2_hw * hw,struct dlb2_ldb_port * port,struct dlb2_ldb_queue * queue,int slot)1767 static int dlb2_ldb_port_set_has_work_bits(struct dlb2_hw *hw,
1768 					   struct dlb2_ldb_port *port,
1769 					   struct dlb2_ldb_queue *queue,
1770 					   int slot)
1771 {
1772 	union dlb2_lsp_qid_aqed_active_cnt r0;
1773 	union dlb2_lsp_qid_ldb_enqueue_cnt r1;
1774 	union dlb2_lsp_ldb_sched_ctrl r2 = { {0} };
1775 
1776 	/* Set the atomic scheduling haswork bit */
1777 	r0.val = DLB2_CSR_RD(hw,
1778 			     DLB2_LSP_QID_AQED_ACTIVE_CNT(queue->id.phys_id));
1779 
1780 	r2.field.cq = port->id.phys_id;
1781 	r2.field.qidix = slot;
1782 	r2.field.value = 1;
1783 	r2.field.rlist_haswork_v = r0.field.count > 0;
1784 
1785 	/* Set the non-atomic scheduling haswork bit */
1786 	DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL, r2.val);
1787 
1788 	r1.val = DLB2_CSR_RD(hw,
1789 			     DLB2_LSP_QID_LDB_ENQUEUE_CNT(queue->id.phys_id));
1790 
1791 	memset(&r2, 0, sizeof(r2));
1792 
1793 	r2.field.cq = port->id.phys_id;
1794 	r2.field.qidix = slot;
1795 	r2.field.value = 1;
1796 	r2.field.nalb_haswork_v = (r1.field.count > 0);
1797 
1798 	DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL, r2.val);
1799 
1800 	dlb2_flush_csr(hw);
1801 
1802 	return 0;
1803 }
1804 
dlb2_ldb_port_clear_has_work_bits(struct dlb2_hw * hw,struct dlb2_ldb_port * port,u8 slot)1805 static void dlb2_ldb_port_clear_has_work_bits(struct dlb2_hw *hw,
1806 					      struct dlb2_ldb_port *port,
1807 					      u8 slot)
1808 {
1809 	union dlb2_lsp_ldb_sched_ctrl r2 = { {0} };
1810 
1811 	r2.field.cq = port->id.phys_id;
1812 	r2.field.qidix = slot;
1813 	r2.field.value = 0;
1814 	r2.field.rlist_haswork_v = 1;
1815 
1816 	DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL, r2.val);
1817 
1818 	memset(&r2, 0, sizeof(r2));
1819 
1820 	r2.field.cq = port->id.phys_id;
1821 	r2.field.qidix = slot;
1822 	r2.field.value = 0;
1823 	r2.field.nalb_haswork_v = 1;
1824 
1825 	DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL, r2.val);
1826 
1827 	dlb2_flush_csr(hw);
1828 }
1829 
dlb2_ldb_queue_set_inflight_limit(struct dlb2_hw * hw,struct dlb2_ldb_queue * queue)1830 static void dlb2_ldb_queue_set_inflight_limit(struct dlb2_hw *hw,
1831 					      struct dlb2_ldb_queue *queue)
1832 {
1833 	union dlb2_lsp_qid_ldb_infl_lim r0 = { {0} };
1834 
1835 	r0.field.limit = queue->num_qid_inflights;
1836 
1837 	DLB2_CSR_WR(hw, DLB2_LSP_QID_LDB_INFL_LIM(queue->id.phys_id), r0.val);
1838 }
1839 
dlb2_ldb_queue_clear_inflight_limit(struct dlb2_hw * hw,struct dlb2_ldb_queue * queue)1840 static void dlb2_ldb_queue_clear_inflight_limit(struct dlb2_hw *hw,
1841 						struct dlb2_ldb_queue *queue)
1842 {
1843 	DLB2_CSR_WR(hw,
1844 		    DLB2_LSP_QID_LDB_INFL_LIM(queue->id.phys_id),
1845 		    DLB2_LSP_QID_LDB_INFL_LIM_RST);
1846 }
1847 
dlb2_ldb_port_finish_map_qid_dynamic(struct dlb2_hw * hw,struct dlb2_hw_domain * domain,struct dlb2_ldb_port * port,struct dlb2_ldb_queue * queue)1848 static int dlb2_ldb_port_finish_map_qid_dynamic(struct dlb2_hw *hw,
1849 						struct dlb2_hw_domain *domain,
1850 						struct dlb2_ldb_port *port,
1851 						struct dlb2_ldb_queue *queue)
1852 {
1853 	struct dlb2_list_entry *iter;
1854 	union dlb2_lsp_qid_ldb_infl_cnt r0;
1855 	enum dlb2_qid_map_state state;
1856 	int slot, ret, i;
1857 	u8 prio;
1858 	RTE_SET_USED(iter);
1859 
1860 	r0.val = DLB2_CSR_RD(hw,
1861 			     DLB2_LSP_QID_LDB_INFL_CNT(queue->id.phys_id));
1862 
1863 	if (r0.field.count) {
1864 		DLB2_HW_ERR(hw,
1865 			    "[%s()] Internal error: non-zero QID inflight count\n",
1866 			    __func__);
1867 		return -EINVAL;
1868 	}
1869 
1870 	/*
1871 	 * Static map the port and set its corresponding has_work bits.
1872 	 */
1873 	state = DLB2_QUEUE_MAP_IN_PROG;
1874 	if (!dlb2_port_find_slot_queue(port, state, queue, &slot))
1875 		return -EINVAL;
1876 
1877 	if (slot >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
1878 		DLB2_HW_ERR(hw,
1879 			    "[%s():%d] Internal error: port slot tracking failed\n",
1880 			    __func__, __LINE__);
1881 		return -EFAULT;
1882 	}
1883 
1884 	prio = port->qid_map[slot].priority;
1885 
1886 	/*
1887 	 * Update the CQ2QID, CQ2PRIOV, and QID2CQIDX registers, and
1888 	 * the port's qid_map state.
1889 	 */
1890 	ret = dlb2_ldb_port_map_qid_static(hw, port, queue, prio);
1891 	if (ret)
1892 		return ret;
1893 
1894 	ret = dlb2_ldb_port_set_has_work_bits(hw, port, queue, slot);
1895 	if (ret)
1896 		return ret;
1897 
1898 	/*
1899 	 * Ensure IF_status(cq,qid) is 0 before enabling the port to
1900 	 * prevent spurious schedules to cause the queue's inflight
1901 	 * count to increase.
1902 	 */
1903 	dlb2_ldb_port_clear_queue_if_status(hw, port, slot);
1904 
1905 	/* Reset the queue's inflight status */
1906 	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
1907 		DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
1908 			state = DLB2_QUEUE_MAPPED;
1909 			if (!dlb2_port_find_slot_queue(port, state,
1910 						       queue, &slot))
1911 				continue;
1912 
1913 			dlb2_ldb_port_set_queue_if_status(hw, port, slot);
1914 		}
1915 	}
1916 
1917 	dlb2_ldb_queue_set_inflight_limit(hw, queue);
1918 
1919 	/* Re-enable CQs mapped to this queue */
1920 	dlb2_ldb_queue_enable_mapped_cqs(hw, domain, queue);
1921 
1922 	/* If this queue has other mappings pending, clear its inflight limit */
1923 	if (queue->num_pending_additions > 0)
1924 		dlb2_ldb_queue_clear_inflight_limit(hw, queue);
1925 
1926 	return 0;
1927 }
1928 
1929 /**
1930  * dlb2_ldb_port_map_qid_dynamic() - perform a "dynamic" QID->CQ mapping
1931  * @hw: dlb2_hw handle for a particular device.
1932  * @port: load-balanced port
1933  * @queue: load-balanced queue
1934  * @priority: queue servicing priority
1935  *
1936  * Returns 0 if the queue was mapped, 1 if the mapping is scheduled to occur
1937  * at a later point, and <0 if an error occurred.
1938  */
dlb2_ldb_port_map_qid_dynamic(struct dlb2_hw * hw,struct dlb2_ldb_port * port,struct dlb2_ldb_queue * queue,u8 priority)1939 static int dlb2_ldb_port_map_qid_dynamic(struct dlb2_hw *hw,
1940 					 struct dlb2_ldb_port *port,
1941 					 struct dlb2_ldb_queue *queue,
1942 					 u8 priority)
1943 {
1944 	union dlb2_lsp_qid_ldb_infl_cnt r0 = { {0} };
1945 	enum dlb2_qid_map_state state;
1946 	struct dlb2_hw_domain *domain;
1947 	int domain_id, slot, ret;
1948 
1949 	domain_id = port->domain_id.phys_id;
1950 
1951 	domain = dlb2_get_domain_from_id(hw, domain_id, false, 0);
1952 	if (domain == NULL) {
1953 		DLB2_HW_ERR(hw,
1954 			    "[%s()] Internal error: unable to find domain %d\n",
1955 			    __func__, port->domain_id.phys_id);
1956 		return -EINVAL;
1957 	}
1958 
1959 	/*
1960 	 * Set the QID inflight limit to 0 to prevent further scheduling of the
1961 	 * queue.
1962 	 */
1963 	DLB2_CSR_WR(hw, DLB2_LSP_QID_LDB_INFL_LIM(queue->id.phys_id), 0);
1964 
1965 	if (!dlb2_port_find_slot(port, DLB2_QUEUE_UNMAPPED, &slot)) {
1966 		DLB2_HW_ERR(hw,
1967 			    "Internal error: No available unmapped slots\n");
1968 		return -EFAULT;
1969 	}
1970 
1971 	if (slot >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
1972 		DLB2_HW_ERR(hw,
1973 			    "[%s():%d] Internal error: port slot tracking failed\n",
1974 			    __func__, __LINE__);
1975 		return -EFAULT;
1976 	}
1977 
1978 	port->qid_map[slot].qid = queue->id.phys_id;
1979 	port->qid_map[slot].priority = priority;
1980 
1981 	state = DLB2_QUEUE_MAP_IN_PROG;
1982 	ret = dlb2_port_slot_state_transition(hw, port, queue, slot, state);
1983 	if (ret)
1984 		return ret;
1985 
1986 	r0.val = DLB2_CSR_RD(hw,
1987 			     DLB2_LSP_QID_LDB_INFL_CNT(queue->id.phys_id));
1988 
1989 	if (r0.field.count) {
1990 		/*
1991 		 * The queue is owed completions so it's not safe to map it
1992 		 * yet. Schedule a kernel thread to complete the mapping later,
1993 		 * once software has completed all the queue's inflight events.
1994 		 */
1995 		if (!os_worker_active(hw))
1996 			os_schedule_work(hw);
1997 
1998 		return 1;
1999 	}
2000 
2001 	/*
2002 	 * Disable the affected CQ, and the CQs already mapped to the QID,
2003 	 * before reading the QID's inflight count a second time. There is an
2004 	 * unlikely race in which the QID may schedule one more QE after we
2005 	 * read an inflight count of 0, and disabling the CQs guarantees that
2006 	 * the race will not occur after a re-read of the inflight count
2007 	 * register.
2008 	 */
2009 	if (port->enabled)
2010 		dlb2_ldb_port_cq_disable(hw, port);
2011 
2012 	dlb2_ldb_queue_disable_mapped_cqs(hw, domain, queue);
2013 
2014 	r0.val = DLB2_CSR_RD(hw,
2015 			     DLB2_LSP_QID_LDB_INFL_CNT(queue->id.phys_id));
2016 
2017 	if (r0.field.count) {
2018 		if (port->enabled)
2019 			dlb2_ldb_port_cq_enable(hw, port);
2020 
2021 		dlb2_ldb_queue_enable_mapped_cqs(hw, domain, queue);
2022 
2023 		/*
2024 		 * The queue is owed completions so it's not safe to map it
2025 		 * yet. Schedule a kernel thread to complete the mapping later,
2026 		 * once software has completed all the queue's inflight events.
2027 		 */
2028 		if (!os_worker_active(hw))
2029 			os_schedule_work(hw);
2030 
2031 		return 1;
2032 	}
2033 
2034 	return dlb2_ldb_port_finish_map_qid_dynamic(hw, domain, port, queue);
2035 }
2036 
dlb2_domain_finish_map_port(struct dlb2_hw * hw,struct dlb2_hw_domain * domain,struct dlb2_ldb_port * port)2037 static void dlb2_domain_finish_map_port(struct dlb2_hw *hw,
2038 					struct dlb2_hw_domain *domain,
2039 					struct dlb2_ldb_port *port)
2040 {
2041 	int i;
2042 
2043 	for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
2044 		union dlb2_lsp_qid_ldb_infl_cnt r0;
2045 		struct dlb2_ldb_queue *queue;
2046 		int qid;
2047 
2048 		if (port->qid_map[i].state != DLB2_QUEUE_MAP_IN_PROG)
2049 			continue;
2050 
2051 		qid = port->qid_map[i].qid;
2052 
2053 		queue = dlb2_get_ldb_queue_from_id(hw, qid, false, 0);
2054 
2055 		if (queue == NULL) {
2056 			DLB2_HW_ERR(hw,
2057 				    "[%s()] Internal error: unable to find queue %d\n",
2058 				    __func__, qid);
2059 			continue;
2060 		}
2061 
2062 		r0.val = DLB2_CSR_RD(hw, DLB2_LSP_QID_LDB_INFL_CNT(qid));
2063 
2064 		if (r0.field.count)
2065 			continue;
2066 
2067 		/*
2068 		 * Disable the affected CQ, and the CQs already mapped to the
2069 		 * QID, before reading the QID's inflight count a second time.
2070 		 * There is an unlikely race in which the QID may schedule one
2071 		 * more QE after we read an inflight count of 0, and disabling
2072 		 * the CQs guarantees that the race will not occur after a
2073 		 * re-read of the inflight count register.
2074 		 */
2075 		if (port->enabled)
2076 			dlb2_ldb_port_cq_disable(hw, port);
2077 
2078 		dlb2_ldb_queue_disable_mapped_cqs(hw, domain, queue);
2079 
2080 		r0.val = DLB2_CSR_RD(hw, DLB2_LSP_QID_LDB_INFL_CNT(qid));
2081 
2082 		if (r0.field.count) {
2083 			if (port->enabled)
2084 				dlb2_ldb_port_cq_enable(hw, port);
2085 
2086 			dlb2_ldb_queue_enable_mapped_cqs(hw, domain, queue);
2087 
2088 			continue;
2089 		}
2090 
2091 		dlb2_ldb_port_finish_map_qid_dynamic(hw, domain, port, queue);
2092 	}
2093 }
2094 
2095 static unsigned int
dlb2_domain_finish_map_qid_procedures(struct dlb2_hw * hw,struct dlb2_hw_domain * domain)2096 dlb2_domain_finish_map_qid_procedures(struct dlb2_hw *hw,
2097 				      struct dlb2_hw_domain *domain)
2098 {
2099 	struct dlb2_list_entry *iter;
2100 	struct dlb2_ldb_port *port;
2101 	int i;
2102 	RTE_SET_USED(iter);
2103 
2104 	if (!domain->configured || domain->num_pending_additions == 0)
2105 		return 0;
2106 
2107 	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2108 		DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)
2109 			dlb2_domain_finish_map_port(hw, domain, port);
2110 	}
2111 
2112 	return domain->num_pending_additions;
2113 }
2114 
dlb2_ldb_port_unmap_qid(struct dlb2_hw * hw,struct dlb2_ldb_port * port,struct dlb2_ldb_queue * queue)2115 static int dlb2_ldb_port_unmap_qid(struct dlb2_hw *hw,
2116 				   struct dlb2_ldb_port *port,
2117 				   struct dlb2_ldb_queue *queue)
2118 {
2119 	enum dlb2_qid_map_state mapped, in_progress, pending_map, unmapped;
2120 	union dlb2_lsp_cq2priov r0;
2121 	union dlb2_atm_qid2cqidix_00 r1;
2122 	union dlb2_lsp_qid2cqidix_00 r2;
2123 	union dlb2_lsp_qid2cqidix2_00 r3;
2124 	u32 queue_id;
2125 	u32 port_id;
2126 	int i;
2127 
2128 	/* Find the queue's slot */
2129 	mapped = DLB2_QUEUE_MAPPED;
2130 	in_progress = DLB2_QUEUE_UNMAP_IN_PROG;
2131 	pending_map = DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP;
2132 
2133 	if (!dlb2_port_find_slot_queue(port, mapped, queue, &i) &&
2134 	    !dlb2_port_find_slot_queue(port, in_progress, queue, &i) &&
2135 	    !dlb2_port_find_slot_queue(port, pending_map, queue, &i)) {
2136 		DLB2_HW_ERR(hw,
2137 			    "[%s():%d] Internal error: QID %d isn't mapped\n",
2138 			    __func__, __LINE__, queue->id.phys_id);
2139 		return -EFAULT;
2140 	}
2141 
2142 	if (i >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
2143 		DLB2_HW_ERR(hw,
2144 			    "[%s():%d] Internal error: port slot tracking failed\n",
2145 			    __func__, __LINE__);
2146 		return -EFAULT;
2147 	}
2148 
2149 	port_id = port->id.phys_id;
2150 	queue_id = queue->id.phys_id;
2151 
2152 	/* Read-modify-write the priority and valid bit register */
2153 	r0.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ2PRIOV(port_id));
2154 
2155 	r0.field.v &= ~(1 << i);
2156 
2157 	DLB2_CSR_WR(hw, DLB2_LSP_CQ2PRIOV(port_id), r0.val);
2158 
2159 	r1.val = DLB2_CSR_RD(hw,
2160 			     DLB2_ATM_QID2CQIDIX(queue_id, port_id / 4));
2161 
2162 	r2.val = DLB2_CSR_RD(hw,
2163 			     DLB2_LSP_QID2CQIDIX(queue_id, port_id / 4));
2164 
2165 	r3.val = DLB2_CSR_RD(hw,
2166 			     DLB2_LSP_QID2CQIDIX2(queue_id, port_id / 4));
2167 
2168 	switch (port_id % 4) {
2169 	case 0:
2170 		r1.field.cq_p0 &= ~(1 << i);
2171 		r2.field.cq_p0 &= ~(1 << i);
2172 		r3.field.cq_p0 &= ~(1 << i);
2173 		break;
2174 
2175 	case 1:
2176 		r1.field.cq_p1 &= ~(1 << i);
2177 		r2.field.cq_p1 &= ~(1 << i);
2178 		r3.field.cq_p1 &= ~(1 << i);
2179 		break;
2180 
2181 	case 2:
2182 		r1.field.cq_p2 &= ~(1 << i);
2183 		r2.field.cq_p2 &= ~(1 << i);
2184 		r3.field.cq_p2 &= ~(1 << i);
2185 		break;
2186 
2187 	case 3:
2188 		r1.field.cq_p3 &= ~(1 << i);
2189 		r2.field.cq_p3 &= ~(1 << i);
2190 		r3.field.cq_p3 &= ~(1 << i);
2191 		break;
2192 	}
2193 
2194 	DLB2_CSR_WR(hw,
2195 		    DLB2_ATM_QID2CQIDIX(queue_id, port_id / 4),
2196 		    r1.val);
2197 
2198 	DLB2_CSR_WR(hw,
2199 		    DLB2_LSP_QID2CQIDIX(queue_id, port_id / 4),
2200 		    r2.val);
2201 
2202 	DLB2_CSR_WR(hw,
2203 		    DLB2_LSP_QID2CQIDIX2(queue_id, port_id / 4),
2204 		    r3.val);
2205 
2206 	dlb2_flush_csr(hw);
2207 
2208 	unmapped = DLB2_QUEUE_UNMAPPED;
2209 
2210 	return dlb2_port_slot_state_transition(hw, port, queue, i, unmapped);
2211 }
2212 
dlb2_ldb_port_map_qid(struct dlb2_hw * hw,struct dlb2_hw_domain * domain,struct dlb2_ldb_port * port,struct dlb2_ldb_queue * queue,u8 prio)2213 static int dlb2_ldb_port_map_qid(struct dlb2_hw *hw,
2214 				 struct dlb2_hw_domain *domain,
2215 				 struct dlb2_ldb_port *port,
2216 				 struct dlb2_ldb_queue *queue,
2217 				 u8 prio)
2218 {
2219 	if (domain->started)
2220 		return dlb2_ldb_port_map_qid_dynamic(hw, port, queue, prio);
2221 	else
2222 		return dlb2_ldb_port_map_qid_static(hw, port, queue, prio);
2223 }
2224 
2225 static void
dlb2_domain_finish_unmap_port_slot(struct dlb2_hw * hw,struct dlb2_hw_domain * domain,struct dlb2_ldb_port * port,int slot)2226 dlb2_domain_finish_unmap_port_slot(struct dlb2_hw *hw,
2227 				   struct dlb2_hw_domain *domain,
2228 				   struct dlb2_ldb_port *port,
2229 				   int slot)
2230 {
2231 	enum dlb2_qid_map_state state;
2232 	struct dlb2_ldb_queue *queue;
2233 
2234 	queue = &hw->rsrcs.ldb_queues[port->qid_map[slot].qid];
2235 
2236 	state = port->qid_map[slot].state;
2237 
2238 	/* Update the QID2CQIDX and CQ2QID vectors */
2239 	dlb2_ldb_port_unmap_qid(hw, port, queue);
2240 
2241 	/*
2242 	 * Ensure the QID will not be serviced by this {CQ, slot} by clearing
2243 	 * the has_work bits
2244 	 */
2245 	dlb2_ldb_port_clear_has_work_bits(hw, port, slot);
2246 
2247 	/* Reset the {CQ, slot} to its default state */
2248 	dlb2_ldb_port_set_queue_if_status(hw, port, slot);
2249 
2250 	/* Re-enable the CQ if it wasn't manually disabled by the user */
2251 	if (port->enabled)
2252 		dlb2_ldb_port_cq_enable(hw, port);
2253 
2254 	/*
2255 	 * If there is a mapping that is pending this slot's removal, perform
2256 	 * the mapping now.
2257 	 */
2258 	if (state == DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP) {
2259 		struct dlb2_ldb_port_qid_map *map;
2260 		struct dlb2_ldb_queue *map_queue;
2261 		u8 prio;
2262 
2263 		map = &port->qid_map[slot];
2264 
2265 		map->qid = map->pending_qid;
2266 		map->priority = map->pending_priority;
2267 
2268 		map_queue = &hw->rsrcs.ldb_queues[map->qid];
2269 		prio = map->priority;
2270 
2271 		dlb2_ldb_port_map_qid(hw, domain, port, map_queue, prio);
2272 	}
2273 }
2274 
dlb2_domain_finish_unmap_port(struct dlb2_hw * hw,struct dlb2_hw_domain * domain,struct dlb2_ldb_port * port)2275 static bool dlb2_domain_finish_unmap_port(struct dlb2_hw *hw,
2276 					  struct dlb2_hw_domain *domain,
2277 					  struct dlb2_ldb_port *port)
2278 {
2279 	union dlb2_lsp_cq_ldb_infl_cnt r0;
2280 	int i;
2281 
2282 	if (port->num_pending_removals == 0)
2283 		return false;
2284 
2285 	/*
2286 	 * The unmap requires all the CQ's outstanding inflights to be
2287 	 * completed.
2288 	 */
2289 	r0.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ_LDB_INFL_CNT(port->id.phys_id));
2290 	if (r0.field.count > 0)
2291 		return false;
2292 
2293 	for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
2294 		struct dlb2_ldb_port_qid_map *map;
2295 
2296 		map = &port->qid_map[i];
2297 
2298 		if (map->state != DLB2_QUEUE_UNMAP_IN_PROG &&
2299 		    map->state != DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP)
2300 			continue;
2301 
2302 		dlb2_domain_finish_unmap_port_slot(hw, domain, port, i);
2303 	}
2304 
2305 	return true;
2306 }
2307 
2308 static unsigned int
dlb2_domain_finish_unmap_qid_procedures(struct dlb2_hw * hw,struct dlb2_hw_domain * domain)2309 dlb2_domain_finish_unmap_qid_procedures(struct dlb2_hw *hw,
2310 					struct dlb2_hw_domain *domain)
2311 {
2312 	struct dlb2_list_entry *iter;
2313 	struct dlb2_ldb_port *port;
2314 	int i;
2315 	RTE_SET_USED(iter);
2316 
2317 	if (!domain->configured || domain->num_pending_removals == 0)
2318 		return 0;
2319 
2320 	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2321 		DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)
2322 			dlb2_domain_finish_unmap_port(hw, domain, port);
2323 	}
2324 
2325 	return domain->num_pending_removals;
2326 }
2327 
dlb2_domain_disable_ldb_cqs(struct dlb2_hw * hw,struct dlb2_hw_domain * domain)2328 static void dlb2_domain_disable_ldb_cqs(struct dlb2_hw *hw,
2329 					struct dlb2_hw_domain *domain)
2330 {
2331 	struct dlb2_list_entry *iter;
2332 	struct dlb2_ldb_port *port;
2333 	int i;
2334 	RTE_SET_USED(iter);
2335 
2336 	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2337 		DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
2338 			port->enabled = false;
2339 
2340 			dlb2_ldb_port_cq_disable(hw, port);
2341 		}
2342 	}
2343 }
2344 
dlb2_log_reset_domain(struct dlb2_hw * hw,u32 domain_id,bool vdev_req,unsigned int vdev_id)2345 static void dlb2_log_reset_domain(struct dlb2_hw *hw,
2346 				  u32 domain_id,
2347 				  bool vdev_req,
2348 				  unsigned int vdev_id)
2349 {
2350 	DLB2_HW_DBG(hw, "DLB2 reset domain:\n");
2351 	if (vdev_req)
2352 		DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
2353 	DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id);
2354 }
2355 
dlb2_domain_disable_dir_vpps(struct dlb2_hw * hw,struct dlb2_hw_domain * domain,unsigned int vdev_id)2356 static void dlb2_domain_disable_dir_vpps(struct dlb2_hw *hw,
2357 					 struct dlb2_hw_domain *domain,
2358 					 unsigned int vdev_id)
2359 {
2360 	struct dlb2_list_entry *iter;
2361 	union dlb2_sys_vf_dir_vpp_v r1;
2362 	struct dlb2_dir_pq_pair *port;
2363 	RTE_SET_USED(iter);
2364 
2365 	r1.field.vpp_v = 0;
2366 
2367 	DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
2368 		unsigned int offs;
2369 		u32 virt_id;
2370 
2371 		if (hw->virt_mode == DLB2_VIRT_SRIOV)
2372 			virt_id = port->id.virt_id;
2373 		else
2374 			virt_id = port->id.phys_id;
2375 
2376 		offs = vdev_id * DLB2_MAX_NUM_DIR_PORTS + virt_id;
2377 
2378 		DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VPP_V(offs), r1.val);
2379 	}
2380 }
2381 
dlb2_domain_disable_ldb_vpps(struct dlb2_hw * hw,struct dlb2_hw_domain * domain,unsigned int vdev_id)2382 static void dlb2_domain_disable_ldb_vpps(struct dlb2_hw *hw,
2383 					 struct dlb2_hw_domain *domain,
2384 					 unsigned int vdev_id)
2385 {
2386 	struct dlb2_list_entry *iter;
2387 	union dlb2_sys_vf_ldb_vpp_v r1;
2388 	struct dlb2_ldb_port *port;
2389 	int i;
2390 	RTE_SET_USED(iter);
2391 
2392 	r1.field.vpp_v = 0;
2393 
2394 	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2395 		DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
2396 			unsigned int offs;
2397 			u32 virt_id;
2398 
2399 			if (hw->virt_mode == DLB2_VIRT_SRIOV)
2400 				virt_id = port->id.virt_id;
2401 			else
2402 				virt_id = port->id.phys_id;
2403 
2404 			offs = vdev_id * DLB2_MAX_NUM_LDB_PORTS + virt_id;
2405 
2406 			DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VPP_V(offs), r1.val);
2407 		}
2408 	}
2409 }
2410 
2411 static void
dlb2_domain_disable_ldb_port_interrupts(struct dlb2_hw * hw,struct dlb2_hw_domain * domain)2412 dlb2_domain_disable_ldb_port_interrupts(struct dlb2_hw *hw,
2413 					struct dlb2_hw_domain *domain)
2414 {
2415 	struct dlb2_list_entry *iter;
2416 	union dlb2_chp_ldb_cq_int_enb r0 = { {0} };
2417 	union dlb2_chp_ldb_cq_wd_enb r1 = { {0} };
2418 	struct dlb2_ldb_port *port;
2419 	int i;
2420 	RTE_SET_USED(iter);
2421 
2422 	r0.field.en_tim = 0;
2423 	r0.field.en_depth = 0;
2424 
2425 	r1.field.wd_enable = 0;
2426 
2427 	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2428 		DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
2429 			DLB2_CSR_WR(hw,
2430 				    DLB2_CHP_LDB_CQ_INT_ENB(port->id.phys_id),
2431 				    r0.val);
2432 
2433 			DLB2_CSR_WR(hw,
2434 				    DLB2_CHP_LDB_CQ_WD_ENB(port->id.phys_id),
2435 				    r1.val);
2436 		}
2437 	}
2438 }
2439 
2440 static void
dlb2_domain_disable_dir_port_interrupts(struct dlb2_hw * hw,struct dlb2_hw_domain * domain)2441 dlb2_domain_disable_dir_port_interrupts(struct dlb2_hw *hw,
2442 					struct dlb2_hw_domain *domain)
2443 {
2444 	struct dlb2_list_entry *iter;
2445 	union dlb2_chp_dir_cq_int_enb r0 = { {0} };
2446 	union dlb2_chp_dir_cq_wd_enb r1 = { {0} };
2447 	struct dlb2_dir_pq_pair *port;
2448 	RTE_SET_USED(iter);
2449 
2450 	r0.field.en_tim = 0;
2451 	r0.field.en_depth = 0;
2452 
2453 	r1.field.wd_enable = 0;
2454 
2455 	DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
2456 		DLB2_CSR_WR(hw,
2457 			    DLB2_CHP_DIR_CQ_INT_ENB(port->id.phys_id),
2458 			    r0.val);
2459 
2460 		DLB2_CSR_WR(hw,
2461 			    DLB2_CHP_DIR_CQ_WD_ENB(port->id.phys_id),
2462 			    r1.val);
2463 	}
2464 }
2465 
2466 static void
dlb2_domain_disable_ldb_queue_write_perms(struct dlb2_hw * hw,struct dlb2_hw_domain * domain)2467 dlb2_domain_disable_ldb_queue_write_perms(struct dlb2_hw *hw,
2468 					  struct dlb2_hw_domain *domain)
2469 {
2470 	int domain_offset = domain->id.phys_id * DLB2_MAX_NUM_LDB_QUEUES;
2471 	struct dlb2_list_entry *iter;
2472 	struct dlb2_ldb_queue *queue;
2473 	RTE_SET_USED(iter);
2474 
2475 	DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
2476 		union dlb2_sys_ldb_vasqid_v r0 = { {0} };
2477 		union dlb2_sys_ldb_qid2vqid r1 = { {0} };
2478 		union dlb2_sys_vf_ldb_vqid_v r2 = { {0} };
2479 		union dlb2_sys_vf_ldb_vqid2qid r3 = { {0} };
2480 		int idx;
2481 
2482 		idx = domain_offset + queue->id.phys_id;
2483 
2484 		DLB2_CSR_WR(hw, DLB2_SYS_LDB_VASQID_V(idx), r0.val);
2485 
2486 		if (queue->id.vdev_owned) {
2487 			DLB2_CSR_WR(hw,
2488 				    DLB2_SYS_LDB_QID2VQID(queue->id.phys_id),
2489 				    r1.val);
2490 
2491 			idx = queue->id.vdev_id * DLB2_MAX_NUM_LDB_QUEUES +
2492 				queue->id.virt_id;
2493 
2494 			DLB2_CSR_WR(hw,
2495 				    DLB2_SYS_VF_LDB_VQID_V(idx),
2496 				    r2.val);
2497 
2498 			DLB2_CSR_WR(hw,
2499 				    DLB2_SYS_VF_LDB_VQID2QID(idx),
2500 				    r3.val);
2501 		}
2502 	}
2503 }
2504 
2505 static void
dlb2_domain_disable_dir_queue_write_perms(struct dlb2_hw * hw,struct dlb2_hw_domain * domain)2506 dlb2_domain_disable_dir_queue_write_perms(struct dlb2_hw *hw,
2507 					  struct dlb2_hw_domain *domain)
2508 {
2509 	int domain_offset = domain->id.phys_id * DLB2_MAX_NUM_DIR_PORTS;
2510 	struct dlb2_list_entry *iter;
2511 	struct dlb2_dir_pq_pair *queue;
2512 	RTE_SET_USED(iter);
2513 
2514 	DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, queue, iter) {
2515 		union dlb2_sys_dir_vasqid_v r0 = { {0} };
2516 		union dlb2_sys_vf_dir_vqid_v r1 = { {0} };
2517 		union dlb2_sys_vf_dir_vqid2qid r2 = { {0} };
2518 		int idx;
2519 
2520 		idx = domain_offset + queue->id.phys_id;
2521 
2522 		DLB2_CSR_WR(hw, DLB2_SYS_DIR_VASQID_V(idx), r0.val);
2523 
2524 		if (queue->id.vdev_owned) {
2525 			idx = queue->id.vdev_id * DLB2_MAX_NUM_DIR_PORTS +
2526 				queue->id.virt_id;
2527 
2528 			DLB2_CSR_WR(hw,
2529 				    DLB2_SYS_VF_DIR_VQID_V(idx),
2530 				    r1.val);
2531 
2532 			DLB2_CSR_WR(hw,
2533 				    DLB2_SYS_VF_DIR_VQID2QID(idx),
2534 				    r2.val);
2535 		}
2536 	}
2537 }
2538 
dlb2_domain_disable_ldb_seq_checks(struct dlb2_hw * hw,struct dlb2_hw_domain * domain)2539 static void dlb2_domain_disable_ldb_seq_checks(struct dlb2_hw *hw,
2540 					       struct dlb2_hw_domain *domain)
2541 {
2542 	struct dlb2_list_entry *iter;
2543 	union dlb2_chp_sn_chk_enbl r1;
2544 	struct dlb2_ldb_port *port;
2545 	int i;
2546 	RTE_SET_USED(iter);
2547 
2548 	r1.field.en = 0;
2549 
2550 	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2551 		DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)
2552 			DLB2_CSR_WR(hw,
2553 				    DLB2_CHP_SN_CHK_ENBL(port->id.phys_id),
2554 				    r1.val);
2555 	}
2556 }
2557 
dlb2_domain_wait_for_ldb_cqs_to_empty(struct dlb2_hw * hw,struct dlb2_hw_domain * domain)2558 static int dlb2_domain_wait_for_ldb_cqs_to_empty(struct dlb2_hw *hw,
2559 						 struct dlb2_hw_domain *domain)
2560 {
2561 	struct dlb2_list_entry *iter;
2562 	struct dlb2_ldb_port *port;
2563 	int i;
2564 	RTE_SET_USED(iter);
2565 
2566 	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2567 		DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
2568 			int i;
2569 
2570 			for (i = 0; i < DLB2_MAX_CQ_COMP_CHECK_LOOPS; i++) {
2571 				if (dlb2_ldb_cq_inflight_count(hw, port) == 0)
2572 					break;
2573 			}
2574 
2575 			if (i == DLB2_MAX_CQ_COMP_CHECK_LOOPS) {
2576 				DLB2_HW_ERR(hw,
2577 					    "[%s()] Internal error: failed to flush load-balanced port %d's completions.\n",
2578 					    __func__, port->id.phys_id);
2579 				return -EFAULT;
2580 			}
2581 		}
2582 	}
2583 
2584 	return 0;
2585 }
2586 
dlb2_domain_disable_dir_cqs(struct dlb2_hw * hw,struct dlb2_hw_domain * domain)2587 static void dlb2_domain_disable_dir_cqs(struct dlb2_hw *hw,
2588 					struct dlb2_hw_domain *domain)
2589 {
2590 	struct dlb2_list_entry *iter;
2591 	struct dlb2_dir_pq_pair *port;
2592 	RTE_SET_USED(iter);
2593 
2594 	DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
2595 		port->enabled = false;
2596 
2597 		dlb2_dir_port_cq_disable(hw, port);
2598 	}
2599 }
2600 
2601 static void
dlb2_domain_disable_dir_producer_ports(struct dlb2_hw * hw,struct dlb2_hw_domain * domain)2602 dlb2_domain_disable_dir_producer_ports(struct dlb2_hw *hw,
2603 				       struct dlb2_hw_domain *domain)
2604 {
2605 	struct dlb2_list_entry *iter;
2606 	struct dlb2_dir_pq_pair *port;
2607 	union dlb2_sys_dir_pp_v r1;
2608 	RTE_SET_USED(iter);
2609 
2610 	r1.field.pp_v = 0;
2611 
2612 	DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter)
2613 		DLB2_CSR_WR(hw,
2614 			    DLB2_SYS_DIR_PP_V(port->id.phys_id),
2615 			    r1.val);
2616 }
2617 
2618 static void
dlb2_domain_disable_ldb_producer_ports(struct dlb2_hw * hw,struct dlb2_hw_domain * domain)2619 dlb2_domain_disable_ldb_producer_ports(struct dlb2_hw *hw,
2620 				       struct dlb2_hw_domain *domain)
2621 {
2622 	struct dlb2_list_entry *iter;
2623 	union dlb2_sys_ldb_pp_v r1;
2624 	struct dlb2_ldb_port *port;
2625 	int i;
2626 	RTE_SET_USED(iter);
2627 
2628 	r1.field.pp_v = 0;
2629 
2630 	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2631 		DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)
2632 			DLB2_CSR_WR(hw,
2633 				    DLB2_SYS_LDB_PP_V(port->id.phys_id),
2634 				    r1.val);
2635 	}
2636 }
2637 
dlb2_domain_verify_reset_success(struct dlb2_hw * hw,struct dlb2_hw_domain * domain)2638 static int dlb2_domain_verify_reset_success(struct dlb2_hw *hw,
2639 					    struct dlb2_hw_domain *domain)
2640 {
2641 	struct dlb2_list_entry *iter;
2642 	struct dlb2_dir_pq_pair *dir_port;
2643 	struct dlb2_ldb_port *ldb_port;
2644 	struct dlb2_ldb_queue *queue;
2645 	int i;
2646 	RTE_SET_USED(iter);
2647 
2648 	/*
2649 	 * Confirm that all the domain's queue's inflight counts and AQED
2650 	 * active counts are 0.
2651 	 */
2652 	DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
2653 		if (!dlb2_ldb_queue_is_empty(hw, queue)) {
2654 			DLB2_HW_ERR(hw,
2655 				    "[%s()] Internal error: failed to empty ldb queue %d\n",
2656 				    __func__, queue->id.phys_id);
2657 			return -EFAULT;
2658 		}
2659 	}
2660 
2661 	/* Confirm that all the domain's CQs inflight and token counts are 0. */
2662 	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2663 		DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], ldb_port, iter) {
2664 			if (dlb2_ldb_cq_inflight_count(hw, ldb_port) ||
2665 			    dlb2_ldb_cq_token_count(hw, ldb_port)) {
2666 				DLB2_HW_ERR(hw,
2667 					    "[%s()] Internal error: failed to empty ldb port %d\n",
2668 					    __func__, ldb_port->id.phys_id);
2669 				return -EFAULT;
2670 			}
2671 		}
2672 	}
2673 
2674 	DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, dir_port, iter) {
2675 		if (!dlb2_dir_queue_is_empty(hw, dir_port)) {
2676 			DLB2_HW_ERR(hw,
2677 				    "[%s()] Internal error: failed to empty dir queue %d\n",
2678 				    __func__, dir_port->id.phys_id);
2679 			return -EFAULT;
2680 		}
2681 
2682 		if (dlb2_dir_cq_token_count(hw, dir_port)) {
2683 			DLB2_HW_ERR(hw,
2684 				    "[%s()] Internal error: failed to empty dir port %d\n",
2685 				    __func__, dir_port->id.phys_id);
2686 			return -EFAULT;
2687 		}
2688 	}
2689 
2690 	return 0;
2691 }
2692 
__dlb2_domain_reset_ldb_port_registers(struct dlb2_hw * hw,struct dlb2_ldb_port * port)2693 static void __dlb2_domain_reset_ldb_port_registers(struct dlb2_hw *hw,
2694 						   struct dlb2_ldb_port *port)
2695 {
2696 	DLB2_CSR_WR(hw,
2697 		    DLB2_SYS_LDB_PP2VAS(port->id.phys_id),
2698 		    DLB2_SYS_LDB_PP2VAS_RST);
2699 
2700 	DLB2_CSR_WR(hw,
2701 		    DLB2_CHP_LDB_CQ2VAS(port->id.phys_id),
2702 		    DLB2_CHP_LDB_CQ2VAS_RST);
2703 
2704 	DLB2_CSR_WR(hw,
2705 		    DLB2_SYS_LDB_PP2VDEV(port->id.phys_id),
2706 		    DLB2_SYS_LDB_PP2VDEV_RST);
2707 
2708 	if (port->id.vdev_owned) {
2709 		unsigned int offs;
2710 		u32 virt_id;
2711 
2712 		/*
2713 		 * DLB uses producer port address bits 17:12 to determine the
2714 		 * producer port ID. In Scalable IOV mode, PP accesses come
2715 		 * through the PF MMIO window for the physical producer port,
2716 		 * so for translation purposes the virtual and physical port
2717 		 * IDs are equal.
2718 		 */
2719 		if (hw->virt_mode == DLB2_VIRT_SRIOV)
2720 			virt_id = port->id.virt_id;
2721 		else
2722 			virt_id = port->id.phys_id;
2723 
2724 		offs = port->id.vdev_id * DLB2_MAX_NUM_LDB_PORTS + virt_id;
2725 
2726 		DLB2_CSR_WR(hw,
2727 			    DLB2_SYS_VF_LDB_VPP2PP(offs),
2728 			    DLB2_SYS_VF_LDB_VPP2PP_RST);
2729 
2730 		DLB2_CSR_WR(hw,
2731 			    DLB2_SYS_VF_LDB_VPP_V(offs),
2732 			    DLB2_SYS_VF_LDB_VPP_V_RST);
2733 	}
2734 
2735 	DLB2_CSR_WR(hw,
2736 		    DLB2_SYS_LDB_PP_V(port->id.phys_id),
2737 		    DLB2_SYS_LDB_PP_V_RST);
2738 
2739 	DLB2_CSR_WR(hw,
2740 		    DLB2_LSP_CQ_LDB_DSBL(port->id.phys_id),
2741 		    DLB2_LSP_CQ_LDB_DSBL_RST);
2742 
2743 	DLB2_CSR_WR(hw,
2744 		    DLB2_CHP_LDB_CQ_DEPTH(port->id.phys_id),
2745 		    DLB2_CHP_LDB_CQ_DEPTH_RST);
2746 
2747 	DLB2_CSR_WR(hw,
2748 		    DLB2_LSP_CQ_LDB_INFL_LIM(port->id.phys_id),
2749 		    DLB2_LSP_CQ_LDB_INFL_LIM_RST);
2750 
2751 	DLB2_CSR_WR(hw,
2752 		    DLB2_CHP_HIST_LIST_LIM(port->id.phys_id),
2753 		    DLB2_CHP_HIST_LIST_LIM_RST);
2754 
2755 	DLB2_CSR_WR(hw,
2756 		    DLB2_CHP_HIST_LIST_BASE(port->id.phys_id),
2757 		    DLB2_CHP_HIST_LIST_BASE_RST);
2758 
2759 	DLB2_CSR_WR(hw,
2760 		    DLB2_CHP_HIST_LIST_POP_PTR(port->id.phys_id),
2761 		    DLB2_CHP_HIST_LIST_POP_PTR_RST);
2762 
2763 	DLB2_CSR_WR(hw,
2764 		    DLB2_CHP_HIST_LIST_PUSH_PTR(port->id.phys_id),
2765 		    DLB2_CHP_HIST_LIST_PUSH_PTR_RST);
2766 
2767 	DLB2_CSR_WR(hw,
2768 		    DLB2_CHP_LDB_CQ_INT_DEPTH_THRSH(port->id.phys_id),
2769 		    DLB2_CHP_LDB_CQ_INT_DEPTH_THRSH_RST);
2770 
2771 	DLB2_CSR_WR(hw,
2772 		    DLB2_CHP_LDB_CQ_TMR_THRSH(port->id.phys_id),
2773 		    DLB2_CHP_LDB_CQ_TMR_THRSH_RST);
2774 
2775 	DLB2_CSR_WR(hw,
2776 		    DLB2_CHP_LDB_CQ_INT_ENB(port->id.phys_id),
2777 		    DLB2_CHP_LDB_CQ_INT_ENB_RST);
2778 
2779 	DLB2_CSR_WR(hw,
2780 		    DLB2_SYS_LDB_CQ_ISR(port->id.phys_id),
2781 		    DLB2_SYS_LDB_CQ_ISR_RST);
2782 
2783 	DLB2_CSR_WR(hw,
2784 		    DLB2_LSP_CQ_LDB_TKN_DEPTH_SEL(port->id.phys_id),
2785 		    DLB2_LSP_CQ_LDB_TKN_DEPTH_SEL_RST);
2786 
2787 	DLB2_CSR_WR(hw,
2788 		    DLB2_CHP_LDB_CQ_TKN_DEPTH_SEL(port->id.phys_id),
2789 		    DLB2_CHP_LDB_CQ_TKN_DEPTH_SEL_RST);
2790 
2791 	DLB2_CSR_WR(hw,
2792 		    DLB2_CHP_LDB_CQ_WPTR(port->id.phys_id),
2793 		    DLB2_CHP_LDB_CQ_WPTR_RST);
2794 
2795 	DLB2_CSR_WR(hw,
2796 		    DLB2_LSP_CQ_LDB_TKN_CNT(port->id.phys_id),
2797 		    DLB2_LSP_CQ_LDB_TKN_CNT_RST);
2798 
2799 	DLB2_CSR_WR(hw,
2800 		    DLB2_SYS_LDB_CQ_ADDR_L(port->id.phys_id),
2801 		    DLB2_SYS_LDB_CQ_ADDR_L_RST);
2802 
2803 	DLB2_CSR_WR(hw,
2804 		    DLB2_SYS_LDB_CQ_ADDR_U(port->id.phys_id),
2805 		    DLB2_SYS_LDB_CQ_ADDR_U_RST);
2806 
2807 	DLB2_CSR_WR(hw,
2808 		    DLB2_SYS_LDB_CQ_AT(port->id.phys_id),
2809 		    DLB2_SYS_LDB_CQ_AT_RST);
2810 
2811 	DLB2_CSR_WR(hw,
2812 		    DLB2_SYS_LDB_CQ_PASID(port->id.phys_id),
2813 		    DLB2_SYS_LDB_CQ_PASID_RST);
2814 
2815 	DLB2_CSR_WR(hw,
2816 		    DLB2_SYS_LDB_CQ2VF_PF_RO(port->id.phys_id),
2817 		    DLB2_SYS_LDB_CQ2VF_PF_RO_RST);
2818 
2819 	DLB2_CSR_WR(hw,
2820 		    DLB2_LSP_CQ_LDB_TOT_SCH_CNTL(port->id.phys_id),
2821 		    DLB2_LSP_CQ_LDB_TOT_SCH_CNTL_RST);
2822 
2823 	DLB2_CSR_WR(hw,
2824 		    DLB2_LSP_CQ_LDB_TOT_SCH_CNTH(port->id.phys_id),
2825 		    DLB2_LSP_CQ_LDB_TOT_SCH_CNTH_RST);
2826 
2827 	DLB2_CSR_WR(hw,
2828 		    DLB2_LSP_CQ2QID0(port->id.phys_id),
2829 		    DLB2_LSP_CQ2QID0_RST);
2830 
2831 	DLB2_CSR_WR(hw,
2832 		    DLB2_LSP_CQ2QID1(port->id.phys_id),
2833 		    DLB2_LSP_CQ2QID1_RST);
2834 
2835 	DLB2_CSR_WR(hw,
2836 		    DLB2_LSP_CQ2PRIOV(port->id.phys_id),
2837 		    DLB2_LSP_CQ2PRIOV_RST);
2838 }
2839 
dlb2_domain_reset_ldb_port_registers(struct dlb2_hw * hw,struct dlb2_hw_domain * domain)2840 static void dlb2_domain_reset_ldb_port_registers(struct dlb2_hw *hw,
2841 						 struct dlb2_hw_domain *domain)
2842 {
2843 	struct dlb2_list_entry *iter;
2844 	struct dlb2_ldb_port *port;
2845 	int i;
2846 	RTE_SET_USED(iter);
2847 
2848 	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2849 		DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)
2850 			__dlb2_domain_reset_ldb_port_registers(hw, port);
2851 	}
2852 }
2853 
2854 static void
__dlb2_domain_reset_dir_port_registers(struct dlb2_hw * hw,struct dlb2_dir_pq_pair * port)2855 __dlb2_domain_reset_dir_port_registers(struct dlb2_hw *hw,
2856 				       struct dlb2_dir_pq_pair *port)
2857 {
2858 	DLB2_CSR_WR(hw,
2859 		    DLB2_CHP_DIR_CQ2VAS(port->id.phys_id),
2860 		    DLB2_CHP_DIR_CQ2VAS_RST);
2861 
2862 	DLB2_CSR_WR(hw,
2863 		    DLB2_LSP_CQ_DIR_DSBL(port->id.phys_id),
2864 		    DLB2_LSP_CQ_DIR_DSBL_RST);
2865 
2866 	DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ_OPT_CLR, port->id.phys_id);
2867 
2868 	DLB2_CSR_WR(hw,
2869 		    DLB2_CHP_DIR_CQ_DEPTH(port->id.phys_id),
2870 		    DLB2_CHP_DIR_CQ_DEPTH_RST);
2871 
2872 	DLB2_CSR_WR(hw,
2873 		    DLB2_CHP_DIR_CQ_INT_DEPTH_THRSH(port->id.phys_id),
2874 		    DLB2_CHP_DIR_CQ_INT_DEPTH_THRSH_RST);
2875 
2876 	DLB2_CSR_WR(hw,
2877 		    DLB2_CHP_DIR_CQ_TMR_THRSH(port->id.phys_id),
2878 		    DLB2_CHP_DIR_CQ_TMR_THRSH_RST);
2879 
2880 	DLB2_CSR_WR(hw,
2881 		    DLB2_CHP_DIR_CQ_INT_ENB(port->id.phys_id),
2882 		    DLB2_CHP_DIR_CQ_INT_ENB_RST);
2883 
2884 	DLB2_CSR_WR(hw,
2885 		    DLB2_SYS_DIR_CQ_ISR(port->id.phys_id),
2886 		    DLB2_SYS_DIR_CQ_ISR_RST);
2887 
2888 	DLB2_CSR_WR(hw,
2889 		    DLB2_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI(port->id.phys_id),
2890 		    DLB2_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI_RST);
2891 
2892 	DLB2_CSR_WR(hw,
2893 		    DLB2_CHP_DIR_CQ_TKN_DEPTH_SEL(port->id.phys_id),
2894 		    DLB2_CHP_DIR_CQ_TKN_DEPTH_SEL_RST);
2895 
2896 	DLB2_CSR_WR(hw,
2897 		    DLB2_CHP_DIR_CQ_WPTR(port->id.phys_id),
2898 		    DLB2_CHP_DIR_CQ_WPTR_RST);
2899 
2900 	DLB2_CSR_WR(hw,
2901 		    DLB2_LSP_CQ_DIR_TKN_CNT(port->id.phys_id),
2902 		    DLB2_LSP_CQ_DIR_TKN_CNT_RST);
2903 
2904 	DLB2_CSR_WR(hw,
2905 		    DLB2_SYS_DIR_CQ_ADDR_L(port->id.phys_id),
2906 		    DLB2_SYS_DIR_CQ_ADDR_L_RST);
2907 
2908 	DLB2_CSR_WR(hw,
2909 		    DLB2_SYS_DIR_CQ_ADDR_U(port->id.phys_id),
2910 		    DLB2_SYS_DIR_CQ_ADDR_U_RST);
2911 
2912 	DLB2_CSR_WR(hw,
2913 		    DLB2_SYS_DIR_CQ_AT(port->id.phys_id),
2914 		    DLB2_SYS_DIR_CQ_AT_RST);
2915 
2916 	DLB2_CSR_WR(hw,
2917 		    DLB2_SYS_DIR_CQ_PASID(port->id.phys_id),
2918 		    DLB2_SYS_DIR_CQ_PASID_RST);
2919 
2920 	DLB2_CSR_WR(hw,
2921 		    DLB2_SYS_DIR_CQ_FMT(port->id.phys_id),
2922 		    DLB2_SYS_DIR_CQ_FMT_RST);
2923 
2924 	DLB2_CSR_WR(hw,
2925 		    DLB2_SYS_DIR_CQ2VF_PF_RO(port->id.phys_id),
2926 		    DLB2_SYS_DIR_CQ2VF_PF_RO_RST);
2927 
2928 	DLB2_CSR_WR(hw,
2929 		    DLB2_LSP_CQ_DIR_TOT_SCH_CNTL(port->id.phys_id),
2930 		    DLB2_LSP_CQ_DIR_TOT_SCH_CNTL_RST);
2931 
2932 	DLB2_CSR_WR(hw,
2933 		    DLB2_LSP_CQ_DIR_TOT_SCH_CNTH(port->id.phys_id),
2934 		    DLB2_LSP_CQ_DIR_TOT_SCH_CNTH_RST);
2935 
2936 	DLB2_CSR_WR(hw,
2937 		    DLB2_SYS_DIR_PP2VAS(port->id.phys_id),
2938 		    DLB2_SYS_DIR_PP2VAS_RST);
2939 
2940 	DLB2_CSR_WR(hw,
2941 		    DLB2_CHP_DIR_CQ2VAS(port->id.phys_id),
2942 		    DLB2_CHP_DIR_CQ2VAS_RST);
2943 
2944 	DLB2_CSR_WR(hw,
2945 		    DLB2_SYS_DIR_PP2VDEV(port->id.phys_id),
2946 		    DLB2_SYS_DIR_PP2VDEV_RST);
2947 
2948 	if (port->id.vdev_owned) {
2949 		unsigned int offs;
2950 		u32 virt_id;
2951 
2952 		/*
2953 		 * DLB uses producer port address bits 17:12 to determine the
2954 		 * producer port ID. In Scalable IOV mode, PP accesses come
2955 		 * through the PF MMIO window for the physical producer port,
2956 		 * so for translation purposes the virtual and physical port
2957 		 * IDs are equal.
2958 		 */
2959 		if (hw->virt_mode == DLB2_VIRT_SRIOV)
2960 			virt_id = port->id.virt_id;
2961 		else
2962 			virt_id = port->id.phys_id;
2963 
2964 		offs = port->id.vdev_id * DLB2_MAX_NUM_DIR_PORTS + virt_id;
2965 
2966 		DLB2_CSR_WR(hw,
2967 			    DLB2_SYS_VF_DIR_VPP2PP(offs),
2968 			    DLB2_SYS_VF_DIR_VPP2PP_RST);
2969 
2970 		DLB2_CSR_WR(hw,
2971 			    DLB2_SYS_VF_DIR_VPP_V(offs),
2972 			    DLB2_SYS_VF_DIR_VPP_V_RST);
2973 	}
2974 
2975 	DLB2_CSR_WR(hw,
2976 		    DLB2_SYS_DIR_PP_V(port->id.phys_id),
2977 		    DLB2_SYS_DIR_PP_V_RST);
2978 }
2979 
dlb2_domain_reset_dir_port_registers(struct dlb2_hw * hw,struct dlb2_hw_domain * domain)2980 static void dlb2_domain_reset_dir_port_registers(struct dlb2_hw *hw,
2981 						 struct dlb2_hw_domain *domain)
2982 {
2983 	struct dlb2_list_entry *iter;
2984 	struct dlb2_dir_pq_pair *port;
2985 	RTE_SET_USED(iter);
2986 
2987 	DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter)
2988 		__dlb2_domain_reset_dir_port_registers(hw, port);
2989 }
2990 
dlb2_domain_reset_ldb_queue_registers(struct dlb2_hw * hw,struct dlb2_hw_domain * domain)2991 static void dlb2_domain_reset_ldb_queue_registers(struct dlb2_hw *hw,
2992 						  struct dlb2_hw_domain *domain)
2993 {
2994 	struct dlb2_list_entry *iter;
2995 	struct dlb2_ldb_queue *queue;
2996 	RTE_SET_USED(iter);
2997 
2998 	DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
2999 		unsigned int queue_id = queue->id.phys_id;
3000 		int i;
3001 
3002 		DLB2_CSR_WR(hw,
3003 			    DLB2_LSP_QID_NALDB_TOT_ENQ_CNTL(queue_id),
3004 			    DLB2_LSP_QID_NALDB_TOT_ENQ_CNTL_RST);
3005 
3006 		DLB2_CSR_WR(hw,
3007 			    DLB2_LSP_QID_NALDB_TOT_ENQ_CNTH(queue_id),
3008 			    DLB2_LSP_QID_NALDB_TOT_ENQ_CNTH_RST);
3009 
3010 		DLB2_CSR_WR(hw,
3011 			    DLB2_LSP_QID_ATM_TOT_ENQ_CNTL(queue_id),
3012 			    DLB2_LSP_QID_ATM_TOT_ENQ_CNTL_RST);
3013 
3014 		DLB2_CSR_WR(hw,
3015 			    DLB2_LSP_QID_ATM_TOT_ENQ_CNTH(queue_id),
3016 			    DLB2_LSP_QID_ATM_TOT_ENQ_CNTH_RST);
3017 
3018 		DLB2_CSR_WR(hw,
3019 			    DLB2_LSP_QID_NALDB_MAX_DEPTH(queue_id),
3020 			    DLB2_LSP_QID_NALDB_MAX_DEPTH_RST);
3021 
3022 		DLB2_CSR_WR(hw,
3023 			    DLB2_LSP_QID_LDB_INFL_LIM(queue_id),
3024 			    DLB2_LSP_QID_LDB_INFL_LIM_RST);
3025 
3026 		DLB2_CSR_WR(hw,
3027 			    DLB2_LSP_QID_AQED_ACTIVE_LIM(queue_id),
3028 			    DLB2_LSP_QID_AQED_ACTIVE_LIM_RST);
3029 
3030 		DLB2_CSR_WR(hw,
3031 			    DLB2_LSP_QID_ATM_DEPTH_THRSH(queue_id),
3032 			    DLB2_LSP_QID_ATM_DEPTH_THRSH_RST);
3033 
3034 		DLB2_CSR_WR(hw,
3035 			    DLB2_LSP_QID_NALDB_DEPTH_THRSH(queue_id),
3036 			    DLB2_LSP_QID_NALDB_DEPTH_THRSH_RST);
3037 
3038 		DLB2_CSR_WR(hw,
3039 			    DLB2_SYS_LDB_QID_ITS(queue_id),
3040 			    DLB2_SYS_LDB_QID_ITS_RST);
3041 
3042 		DLB2_CSR_WR(hw,
3043 			    DLB2_CHP_ORD_QID_SN(queue_id),
3044 			    DLB2_CHP_ORD_QID_SN_RST);
3045 
3046 		DLB2_CSR_WR(hw,
3047 			    DLB2_CHP_ORD_QID_SN_MAP(queue_id),
3048 			    DLB2_CHP_ORD_QID_SN_MAP_RST);
3049 
3050 		DLB2_CSR_WR(hw,
3051 			    DLB2_SYS_LDB_QID_V(queue_id),
3052 			    DLB2_SYS_LDB_QID_V_RST);
3053 
3054 		DLB2_CSR_WR(hw,
3055 			    DLB2_SYS_LDB_QID_CFG_V(queue_id),
3056 			    DLB2_SYS_LDB_QID_CFG_V_RST);
3057 
3058 		if (queue->sn_cfg_valid) {
3059 			u32 offs[2];
3060 
3061 			offs[0] = DLB2_RO_PIPE_GRP_0_SLT_SHFT(queue->sn_slot);
3062 			offs[1] = DLB2_RO_PIPE_GRP_1_SLT_SHFT(queue->sn_slot);
3063 
3064 			DLB2_CSR_WR(hw,
3065 				    offs[queue->sn_group],
3066 				    DLB2_RO_PIPE_GRP_0_SLT_SHFT_RST);
3067 		}
3068 
3069 		for (i = 0; i < DLB2_LSP_QID2CQIDIX_NUM; i++) {
3070 			DLB2_CSR_WR(hw,
3071 				    DLB2_LSP_QID2CQIDIX(queue_id, i),
3072 				    DLB2_LSP_QID2CQIDIX_00_RST);
3073 
3074 			DLB2_CSR_WR(hw,
3075 				    DLB2_LSP_QID2CQIDIX2(queue_id, i),
3076 				    DLB2_LSP_QID2CQIDIX2_00_RST);
3077 
3078 			DLB2_CSR_WR(hw,
3079 				    DLB2_ATM_QID2CQIDIX(queue_id, i),
3080 				    DLB2_ATM_QID2CQIDIX_00_RST);
3081 		}
3082 	}
3083 }
3084 
dlb2_domain_reset_dir_queue_registers(struct dlb2_hw * hw,struct dlb2_hw_domain * domain)3085 static void dlb2_domain_reset_dir_queue_registers(struct dlb2_hw *hw,
3086 						  struct dlb2_hw_domain *domain)
3087 {
3088 	struct dlb2_list_entry *iter;
3089 	struct dlb2_dir_pq_pair *queue;
3090 	RTE_SET_USED(iter);
3091 
3092 	DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, queue, iter) {
3093 		DLB2_CSR_WR(hw,
3094 			    DLB2_LSP_QID_DIR_MAX_DEPTH(queue->id.phys_id),
3095 			    DLB2_LSP_QID_DIR_MAX_DEPTH_RST);
3096 
3097 		DLB2_CSR_WR(hw,
3098 			    DLB2_LSP_QID_DIR_TOT_ENQ_CNTL(queue->id.phys_id),
3099 			    DLB2_LSP_QID_DIR_TOT_ENQ_CNTL_RST);
3100 
3101 		DLB2_CSR_WR(hw,
3102 			    DLB2_LSP_QID_DIR_TOT_ENQ_CNTH(queue->id.phys_id),
3103 			    DLB2_LSP_QID_DIR_TOT_ENQ_CNTH_RST);
3104 
3105 		DLB2_CSR_WR(hw,
3106 			    DLB2_LSP_QID_DIR_DEPTH_THRSH(queue->id.phys_id),
3107 			    DLB2_LSP_QID_DIR_DEPTH_THRSH_RST);
3108 
3109 		DLB2_CSR_WR(hw,
3110 			    DLB2_SYS_DIR_QID_ITS(queue->id.phys_id),
3111 			    DLB2_SYS_DIR_QID_ITS_RST);
3112 
3113 		DLB2_CSR_WR(hw,
3114 			    DLB2_SYS_DIR_QID_V(queue->id.phys_id),
3115 			    DLB2_SYS_DIR_QID_V_RST);
3116 	}
3117 }
3118 
dlb2_domain_reset_registers(struct dlb2_hw * hw,struct dlb2_hw_domain * domain)3119 static void dlb2_domain_reset_registers(struct dlb2_hw *hw,
3120 					struct dlb2_hw_domain *domain)
3121 {
3122 	dlb2_domain_reset_ldb_port_registers(hw, domain);
3123 
3124 	dlb2_domain_reset_dir_port_registers(hw, domain);
3125 
3126 	dlb2_domain_reset_ldb_queue_registers(hw, domain);
3127 
3128 	dlb2_domain_reset_dir_queue_registers(hw, domain);
3129 
3130 	DLB2_CSR_WR(hw,
3131 		    DLB2_CHP_CFG_LDB_VAS_CRD(domain->id.phys_id),
3132 		    DLB2_CHP_CFG_LDB_VAS_CRD_RST);
3133 
3134 	DLB2_CSR_WR(hw,
3135 		    DLB2_CHP_CFG_DIR_VAS_CRD(domain->id.phys_id),
3136 		    DLB2_CHP_CFG_DIR_VAS_CRD_RST);
3137 }
3138 
dlb2_domain_reset_software_state(struct dlb2_hw * hw,struct dlb2_hw_domain * domain)3139 static int dlb2_domain_reset_software_state(struct dlb2_hw *hw,
3140 					    struct dlb2_hw_domain *domain)
3141 {
3142 	struct dlb2_dir_pq_pair *tmp_dir_port;
3143 	struct dlb2_ldb_queue *tmp_ldb_queue;
3144 	struct dlb2_ldb_port *tmp_ldb_port;
3145 	struct dlb2_list_entry *iter1;
3146 	struct dlb2_list_entry *iter2;
3147 	struct dlb2_function_resources *rsrcs;
3148 	struct dlb2_dir_pq_pair *dir_port;
3149 	struct dlb2_ldb_queue *ldb_queue;
3150 	struct dlb2_ldb_port *ldb_port;
3151 	struct dlb2_list_head *list;
3152 	int ret, i;
3153 	RTE_SET_USED(tmp_dir_port);
3154 	RTE_SET_USED(tmp_ldb_queue);
3155 	RTE_SET_USED(tmp_ldb_port);
3156 	RTE_SET_USED(iter1);
3157 	RTE_SET_USED(iter2);
3158 
3159 	rsrcs = domain->parent_func;
3160 
3161 	/* Move the domain's ldb queues to the function's avail list */
3162 	list = &domain->used_ldb_queues;
3163 	DLB2_DOM_LIST_FOR_SAFE(*list, ldb_queue, tmp_ldb_queue, iter1, iter2) {
3164 		if (ldb_queue->sn_cfg_valid) {
3165 			struct dlb2_sn_group *grp;
3166 
3167 			grp = &hw->rsrcs.sn_groups[ldb_queue->sn_group];
3168 
3169 			dlb2_sn_group_free_slot(grp, ldb_queue->sn_slot);
3170 			ldb_queue->sn_cfg_valid = false;
3171 		}
3172 
3173 		ldb_queue->owned = false;
3174 		ldb_queue->num_mappings = 0;
3175 		ldb_queue->num_pending_additions = 0;
3176 
3177 		dlb2_list_del(&domain->used_ldb_queues,
3178 			      &ldb_queue->domain_list);
3179 		dlb2_list_add(&rsrcs->avail_ldb_queues,
3180 			      &ldb_queue->func_list);
3181 		rsrcs->num_avail_ldb_queues++;
3182 	}
3183 
3184 	list = &domain->avail_ldb_queues;
3185 	DLB2_DOM_LIST_FOR_SAFE(*list, ldb_queue, tmp_ldb_queue, iter1, iter2) {
3186 		ldb_queue->owned = false;
3187 
3188 		dlb2_list_del(&domain->avail_ldb_queues,
3189 			      &ldb_queue->domain_list);
3190 		dlb2_list_add(&rsrcs->avail_ldb_queues,
3191 			      &ldb_queue->func_list);
3192 		rsrcs->num_avail_ldb_queues++;
3193 	}
3194 
3195 	/* Move the domain's ldb ports to the function's avail list */
3196 	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
3197 		list = &domain->used_ldb_ports[i];
3198 		DLB2_DOM_LIST_FOR_SAFE(*list, ldb_port, tmp_ldb_port,
3199 				       iter1, iter2) {
3200 			int j;
3201 
3202 			ldb_port->owned = false;
3203 			ldb_port->configured = false;
3204 			ldb_port->num_pending_removals = 0;
3205 			ldb_port->num_mappings = 0;
3206 			ldb_port->init_tkn_cnt = 0;
3207 			for (j = 0; j < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; j++)
3208 				ldb_port->qid_map[j].state =
3209 					DLB2_QUEUE_UNMAPPED;
3210 
3211 			dlb2_list_del(&domain->used_ldb_ports[i],
3212 				      &ldb_port->domain_list);
3213 			dlb2_list_add(&rsrcs->avail_ldb_ports[i],
3214 				      &ldb_port->func_list);
3215 			rsrcs->num_avail_ldb_ports[i]++;
3216 		}
3217 
3218 		list = &domain->avail_ldb_ports[i];
3219 		DLB2_DOM_LIST_FOR_SAFE(*list, ldb_port, tmp_ldb_port,
3220 				       iter1, iter2) {
3221 			ldb_port->owned = false;
3222 
3223 			dlb2_list_del(&domain->avail_ldb_ports[i],
3224 				      &ldb_port->domain_list);
3225 			dlb2_list_add(&rsrcs->avail_ldb_ports[i],
3226 				      &ldb_port->func_list);
3227 			rsrcs->num_avail_ldb_ports[i]++;
3228 		}
3229 	}
3230 
3231 	/* Move the domain's dir ports to the function's avail list */
3232 	list = &domain->used_dir_pq_pairs;
3233 	DLB2_DOM_LIST_FOR_SAFE(*list, dir_port, tmp_dir_port, iter1, iter2) {
3234 		dir_port->owned = false;
3235 		dir_port->port_configured = false;
3236 		dir_port->init_tkn_cnt = 0;
3237 
3238 		dlb2_list_del(&domain->used_dir_pq_pairs,
3239 			      &dir_port->domain_list);
3240 
3241 		dlb2_list_add(&rsrcs->avail_dir_pq_pairs,
3242 			      &dir_port->func_list);
3243 		rsrcs->num_avail_dir_pq_pairs++;
3244 	}
3245 
3246 	list = &domain->avail_dir_pq_pairs;
3247 	DLB2_DOM_LIST_FOR_SAFE(*list, dir_port, tmp_dir_port, iter1, iter2) {
3248 		dir_port->owned = false;
3249 
3250 		dlb2_list_del(&domain->avail_dir_pq_pairs,
3251 			      &dir_port->domain_list);
3252 
3253 		dlb2_list_add(&rsrcs->avail_dir_pq_pairs,
3254 			      &dir_port->func_list);
3255 		rsrcs->num_avail_dir_pq_pairs++;
3256 	}
3257 
3258 	/* Return hist list entries to the function */
3259 	ret = dlb2_bitmap_set_range(rsrcs->avail_hist_list_entries,
3260 				    domain->hist_list_entry_base,
3261 				    domain->total_hist_list_entries);
3262 	if (ret) {
3263 		DLB2_HW_ERR(hw,
3264 			    "[%s()] Internal error: domain hist list base doesn't match the function's bitmap.\n",
3265 			    __func__);
3266 		return ret;
3267 	}
3268 
3269 	domain->total_hist_list_entries = 0;
3270 	domain->avail_hist_list_entries = 0;
3271 	domain->hist_list_entry_base = 0;
3272 	domain->hist_list_entry_offset = 0;
3273 
3274 	rsrcs->num_avail_qed_entries += domain->num_ldb_credits;
3275 	domain->num_ldb_credits = 0;
3276 
3277 	rsrcs->num_avail_dqed_entries += domain->num_dir_credits;
3278 	domain->num_dir_credits = 0;
3279 
3280 	rsrcs->num_avail_aqed_entries += domain->num_avail_aqed_entries;
3281 	rsrcs->num_avail_aqed_entries += domain->num_used_aqed_entries;
3282 	domain->num_avail_aqed_entries = 0;
3283 	domain->num_used_aqed_entries = 0;
3284 
3285 	domain->num_pending_removals = 0;
3286 	domain->num_pending_additions = 0;
3287 	domain->configured = false;
3288 	domain->started = false;
3289 
3290 	/*
3291 	 * Move the domain out of the used_domains list and back to the
3292 	 * function's avail_domains list.
3293 	 */
3294 	dlb2_list_del(&rsrcs->used_domains, &domain->func_list);
3295 	dlb2_list_add(&rsrcs->avail_domains, &domain->func_list);
3296 	rsrcs->num_avail_domains++;
3297 
3298 	return 0;
3299 }
3300 
dlb2_domain_drain_unmapped_queue(struct dlb2_hw * hw,struct dlb2_hw_domain * domain,struct dlb2_ldb_queue * queue)3301 static int dlb2_domain_drain_unmapped_queue(struct dlb2_hw *hw,
3302 					    struct dlb2_hw_domain *domain,
3303 					    struct dlb2_ldb_queue *queue)
3304 {
3305 	struct dlb2_ldb_port *port;
3306 	int ret, i;
3307 
3308 	/* If a domain has LDB queues, it must have LDB ports */
3309 	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
3310 		if (!dlb2_list_empty(&domain->used_ldb_ports[i]))
3311 			break;
3312 	}
3313 
3314 	if (i == DLB2_NUM_COS_DOMAINS) {
3315 		DLB2_HW_ERR(hw,
3316 			    "[%s()] Internal error: No configured LDB ports\n",
3317 			    __func__);
3318 		return -EFAULT;
3319 	}
3320 
3321 	port = DLB2_DOM_LIST_HEAD(domain->used_ldb_ports[i], typeof(*port));
3322 
3323 	/* If necessary, free up a QID slot in this CQ */
3324 	if (port->num_mappings == DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
3325 		struct dlb2_ldb_queue *mapped_queue;
3326 
3327 		mapped_queue = &hw->rsrcs.ldb_queues[port->qid_map[0].qid];
3328 
3329 		ret = dlb2_ldb_port_unmap_qid(hw, port, mapped_queue);
3330 		if (ret)
3331 			return ret;
3332 	}
3333 
3334 	ret = dlb2_ldb_port_map_qid_dynamic(hw, port, queue, 0);
3335 	if (ret)
3336 		return ret;
3337 
3338 	return dlb2_domain_drain_mapped_queues(hw, domain);
3339 }
3340 
dlb2_domain_drain_unmapped_queues(struct dlb2_hw * hw,struct dlb2_hw_domain * domain)3341 static int dlb2_domain_drain_unmapped_queues(struct dlb2_hw *hw,
3342 					     struct dlb2_hw_domain *domain)
3343 {
3344 	struct dlb2_list_entry *iter;
3345 	struct dlb2_ldb_queue *queue;
3346 	int ret;
3347 	RTE_SET_USED(iter);
3348 
3349 	/* If the domain hasn't been started, there's no traffic to drain */
3350 	if (!domain->started)
3351 		return 0;
3352 
3353 	/*
3354 	 * Pre-condition: the unattached queue must not have any outstanding
3355 	 * completions. This is ensured by calling dlb2_domain_drain_ldb_cqs()
3356 	 * prior to this in dlb2_domain_drain_mapped_queues().
3357 	 */
3358 	DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
3359 		if (queue->num_mappings != 0 ||
3360 		    dlb2_ldb_queue_is_empty(hw, queue))
3361 			continue;
3362 
3363 		ret = dlb2_domain_drain_unmapped_queue(hw, domain, queue);
3364 		if (ret)
3365 			return ret;
3366 	}
3367 
3368 	return 0;
3369 }
3370 
3371 /**
3372  * dlb2_reset_domain() - Reset a DLB scheduling domain and its associated
3373  *	hardware resources.
3374  * @hw:	Contains the current state of the DLB2 hardware.
3375  * @domain_id: Domain ID
3376  * @vdev_req: Request came from a virtual device.
3377  * @vdev_id: If vdev_req is true, this contains the virtual device's ID.
3378  *
3379  * Note: User software *must* stop sending to this domain's producer ports
3380  * before invoking this function, otherwise undefined behavior will result.
3381  *
3382  * Return: returns < 0 on error, 0 otherwise.
3383  */
dlb2_reset_domain(struct dlb2_hw * hw,u32 domain_id,bool vdev_req,unsigned int vdev_id)3384 int dlb2_reset_domain(struct dlb2_hw *hw,
3385 		      u32 domain_id,
3386 		      bool vdev_req,
3387 		      unsigned int vdev_id)
3388 {
3389 	struct dlb2_hw_domain *domain;
3390 	int ret;
3391 
3392 	dlb2_log_reset_domain(hw, domain_id, vdev_req, vdev_id);
3393 
3394 	domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
3395 
3396 	if (domain  == NULL || !domain->configured)
3397 		return -EINVAL;
3398 
3399 	/* Disable VPPs */
3400 	if (vdev_req) {
3401 		dlb2_domain_disable_dir_vpps(hw, domain, vdev_id);
3402 
3403 		dlb2_domain_disable_ldb_vpps(hw, domain, vdev_id);
3404 	}
3405 
3406 	/* Disable CQ interrupts */
3407 	dlb2_domain_disable_dir_port_interrupts(hw, domain);
3408 
3409 	dlb2_domain_disable_ldb_port_interrupts(hw, domain);
3410 
3411 	/*
3412 	 * For each queue owned by this domain, disable its write permissions to
3413 	 * cause any traffic sent to it to be dropped. Well-behaved software
3414 	 * should not be sending QEs at this point.
3415 	 */
3416 	dlb2_domain_disable_dir_queue_write_perms(hw, domain);
3417 
3418 	dlb2_domain_disable_ldb_queue_write_perms(hw, domain);
3419 
3420 	/* Turn off completion tracking on all the domain's PPs. */
3421 	dlb2_domain_disable_ldb_seq_checks(hw, domain);
3422 
3423 	/*
3424 	 * Disable the LDB CQs and drain them in order to complete the map and
3425 	 * unmap procedures, which require zero CQ inflights and zero QID
3426 	 * inflights respectively.
3427 	 */
3428 	dlb2_domain_disable_ldb_cqs(hw, domain);
3429 
3430 	ret = dlb2_domain_drain_ldb_cqs(hw, domain, false);
3431 	if (ret < 0)
3432 		return ret;
3433 
3434 	ret = dlb2_domain_wait_for_ldb_cqs_to_empty(hw, domain);
3435 	if (ret < 0)
3436 		return ret;
3437 
3438 	ret = dlb2_domain_finish_unmap_qid_procedures(hw, domain);
3439 	if (ret < 0)
3440 		return ret;
3441 
3442 	ret = dlb2_domain_finish_map_qid_procedures(hw, domain);
3443 	if (ret < 0)
3444 		return ret;
3445 
3446 	/* Re-enable the CQs in order to drain the mapped queues. */
3447 	dlb2_domain_enable_ldb_cqs(hw, domain);
3448 
3449 	ret = dlb2_domain_drain_mapped_queues(hw, domain);
3450 	if (ret < 0)
3451 		return ret;
3452 
3453 	ret = dlb2_domain_drain_unmapped_queues(hw, domain);
3454 	if (ret < 0)
3455 		return ret;
3456 
3457 	/* Done draining LDB QEs, so disable the CQs. */
3458 	dlb2_domain_disable_ldb_cqs(hw, domain);
3459 
3460 	dlb2_domain_drain_dir_queues(hw, domain);
3461 
3462 	/* Done draining DIR QEs, so disable the CQs. */
3463 	dlb2_domain_disable_dir_cqs(hw, domain);
3464 
3465 	/* Disable PPs */
3466 	dlb2_domain_disable_dir_producer_ports(hw, domain);
3467 
3468 	dlb2_domain_disable_ldb_producer_ports(hw, domain);
3469 
3470 	ret = dlb2_domain_verify_reset_success(hw, domain);
3471 	if (ret)
3472 		return ret;
3473 
3474 	/* Reset the QID and port state. */
3475 	dlb2_domain_reset_registers(hw, domain);
3476 
3477 	/* Hardware reset complete. Reset the domain's software state */
3478 	ret = dlb2_domain_reset_software_state(hw, domain);
3479 	if (ret)
3480 		return ret;
3481 
3482 	return 0;
3483 }
3484 
dlb2_finish_unmap_qid_procedures(struct dlb2_hw * hw)3485 unsigned int dlb2_finish_unmap_qid_procedures(struct dlb2_hw *hw)
3486 {
3487 	int i, num = 0;
3488 
3489 	/* Finish queue unmap jobs for any domain that needs it */
3490 	for (i = 0; i < DLB2_MAX_NUM_DOMAINS; i++) {
3491 		struct dlb2_hw_domain *domain = &hw->domains[i];
3492 
3493 		num += dlb2_domain_finish_unmap_qid_procedures(hw, domain);
3494 	}
3495 
3496 	return num;
3497 }
3498 
dlb2_finish_map_qid_procedures(struct dlb2_hw * hw)3499 unsigned int dlb2_finish_map_qid_procedures(struct dlb2_hw *hw)
3500 {
3501 	int i, num = 0;
3502 
3503 	/* Finish queue map jobs for any domain that needs it */
3504 	for (i = 0; i < DLB2_MAX_NUM_DOMAINS; i++) {
3505 		struct dlb2_hw_domain *domain = &hw->domains[i];
3506 
3507 		num += dlb2_domain_finish_map_qid_procedures(hw, domain);
3508 	}
3509 
3510 	return num;
3511 }
3512 
3513 
dlb2_configure_ldb_queue(struct dlb2_hw * hw,struct dlb2_hw_domain * domain,struct dlb2_ldb_queue * queue,struct dlb2_create_ldb_queue_args * args,bool vdev_req,unsigned int vdev_id)3514 static void dlb2_configure_ldb_queue(struct dlb2_hw *hw,
3515 				     struct dlb2_hw_domain *domain,
3516 				     struct dlb2_ldb_queue *queue,
3517 				     struct dlb2_create_ldb_queue_args *args,
3518 				     bool vdev_req,
3519 				     unsigned int vdev_id)
3520 {
3521 	union dlb2_sys_vf_ldb_vqid_v r0 = { {0} };
3522 	union dlb2_sys_vf_ldb_vqid2qid r1 = { {0} };
3523 	union dlb2_sys_ldb_qid2vqid r2 = { {0} };
3524 	union dlb2_sys_ldb_vasqid_v r3 = { {0} };
3525 	union dlb2_lsp_qid_ldb_infl_lim r4 = { {0} };
3526 	union dlb2_lsp_qid_aqed_active_lim r5 = { {0} };
3527 	union dlb2_aqed_pipe_qid_hid_width r6 = { {0} };
3528 	union dlb2_sys_ldb_qid_its r7 = { {0} };
3529 	union dlb2_lsp_qid_atm_depth_thrsh r8 = { {0} };
3530 	union dlb2_lsp_qid_naldb_depth_thrsh r9 = { {0} };
3531 	union dlb2_aqed_pipe_qid_fid_lim r10 = { {0} };
3532 	union dlb2_chp_ord_qid_sn_map r11 = { {0} };
3533 	union dlb2_sys_ldb_qid_cfg_v r12 = { {0} };
3534 	union dlb2_sys_ldb_qid_v r13 = { {0} };
3535 
3536 	struct dlb2_sn_group *sn_group;
3537 	unsigned int offs;
3538 
3539 	/* QID write permissions are turned on when the domain is started */
3540 	r3.field.vasqid_v = 0;
3541 
3542 	offs = domain->id.phys_id * DLB2_MAX_NUM_LDB_QUEUES +
3543 		queue->id.phys_id;
3544 
3545 	DLB2_CSR_WR(hw, DLB2_SYS_LDB_VASQID_V(offs), r3.val);
3546 
3547 	/*
3548 	 * Unordered QIDs get 4K inflights, ordered get as many as the number
3549 	 * of sequence numbers.
3550 	 */
3551 	r4.field.limit = args->num_qid_inflights;
3552 
3553 	DLB2_CSR_WR(hw, DLB2_LSP_QID_LDB_INFL_LIM(queue->id.phys_id), r4.val);
3554 
3555 	r5.field.limit = queue->aqed_limit;
3556 
3557 	if (r5.field.limit > DLB2_MAX_NUM_AQED_ENTRIES)
3558 		r5.field.limit = DLB2_MAX_NUM_AQED_ENTRIES;
3559 
3560 	DLB2_CSR_WR(hw,
3561 		    DLB2_LSP_QID_AQED_ACTIVE_LIM(queue->id.phys_id),
3562 		    r5.val);
3563 
3564 	switch (args->lock_id_comp_level) {
3565 	case 64:
3566 		r6.field.compress_code = 1;
3567 		break;
3568 	case 128:
3569 		r6.field.compress_code = 2;
3570 		break;
3571 	case 256:
3572 		r6.field.compress_code = 3;
3573 		break;
3574 	case 512:
3575 		r6.field.compress_code = 4;
3576 		break;
3577 	case 1024:
3578 		r6.field.compress_code = 5;
3579 		break;
3580 	case 2048:
3581 		r6.field.compress_code = 6;
3582 		break;
3583 	case 4096:
3584 		r6.field.compress_code = 7;
3585 		break;
3586 	case 0:
3587 	case 65536:
3588 		r6.field.compress_code = 0;
3589 	}
3590 
3591 	DLB2_CSR_WR(hw,
3592 		    DLB2_AQED_PIPE_QID_HID_WIDTH(queue->id.phys_id),
3593 		    r6.val);
3594 
3595 	/* Don't timestamp QEs that pass through this queue */
3596 	r7.field.qid_its = 0;
3597 
3598 	DLB2_CSR_WR(hw,
3599 		    DLB2_SYS_LDB_QID_ITS(queue->id.phys_id),
3600 		    r7.val);
3601 
3602 	r8.field.thresh = args->depth_threshold;
3603 
3604 	DLB2_CSR_WR(hw,
3605 		    DLB2_LSP_QID_ATM_DEPTH_THRSH(queue->id.phys_id),
3606 		    r8.val);
3607 
3608 	r9.field.thresh = args->depth_threshold;
3609 
3610 	DLB2_CSR_WR(hw,
3611 		    DLB2_LSP_QID_NALDB_DEPTH_THRSH(queue->id.phys_id),
3612 		    r9.val);
3613 
3614 	/*
3615 	 * This register limits the number of inflight flows a queue can have
3616 	 * at one time.  It has an upper bound of 2048, but can be
3617 	 * over-subscribed. 512 is chosen so that a single queue doesn't use
3618 	 * the entire atomic storage, but can use a substantial portion if
3619 	 * needed.
3620 	 */
3621 	r10.field.qid_fid_limit = 512;
3622 
3623 	DLB2_CSR_WR(hw,
3624 		    DLB2_AQED_PIPE_QID_FID_LIM(queue->id.phys_id),
3625 		    r10.val);
3626 
3627 	/* Configure SNs */
3628 	sn_group = &hw->rsrcs.sn_groups[queue->sn_group];
3629 	r11.field.mode = sn_group->mode;
3630 	r11.field.slot = queue->sn_slot;
3631 	r11.field.grp  = sn_group->id;
3632 
3633 	DLB2_CSR_WR(hw, DLB2_CHP_ORD_QID_SN_MAP(queue->id.phys_id), r11.val);
3634 
3635 	r12.field.sn_cfg_v = (args->num_sequence_numbers != 0);
3636 	r12.field.fid_cfg_v = (args->num_atomic_inflights != 0);
3637 
3638 	DLB2_CSR_WR(hw, DLB2_SYS_LDB_QID_CFG_V(queue->id.phys_id), r12.val);
3639 
3640 	if (vdev_req) {
3641 		offs = vdev_id * DLB2_MAX_NUM_LDB_QUEUES + queue->id.virt_id;
3642 
3643 		r0.field.vqid_v = 1;
3644 
3645 		DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VQID_V(offs), r0.val);
3646 
3647 		r1.field.qid = queue->id.phys_id;
3648 
3649 		DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VQID2QID(offs), r1.val);
3650 
3651 		r2.field.vqid = queue->id.virt_id;
3652 
3653 		DLB2_CSR_WR(hw,
3654 			    DLB2_SYS_LDB_QID2VQID(queue->id.phys_id),
3655 			    r2.val);
3656 	}
3657 
3658 	r13.field.qid_v = 1;
3659 
3660 	DLB2_CSR_WR(hw, DLB2_SYS_LDB_QID_V(queue->id.phys_id), r13.val);
3661 }
3662 
3663 static int
dlb2_ldb_queue_attach_to_sn_group(struct dlb2_hw * hw,struct dlb2_ldb_queue * queue,struct dlb2_create_ldb_queue_args * args)3664 dlb2_ldb_queue_attach_to_sn_group(struct dlb2_hw *hw,
3665 				  struct dlb2_ldb_queue *queue,
3666 				  struct dlb2_create_ldb_queue_args *args)
3667 {
3668 	int slot = -1;
3669 	int i;
3670 
3671 	queue->sn_cfg_valid = false;
3672 
3673 	if (args->num_sequence_numbers == 0)
3674 		return 0;
3675 
3676 	for (i = 0; i < DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS; i++) {
3677 		struct dlb2_sn_group *group = &hw->rsrcs.sn_groups[i];
3678 
3679 		if (group->sequence_numbers_per_queue ==
3680 		    args->num_sequence_numbers &&
3681 		    !dlb2_sn_group_full(group)) {
3682 			slot = dlb2_sn_group_alloc_slot(group);
3683 			if (slot >= 0)
3684 				break;
3685 		}
3686 	}
3687 
3688 	if (slot == -1) {
3689 		DLB2_HW_ERR(hw,
3690 			    "[%s():%d] Internal error: no sequence number slots available\n",
3691 			    __func__, __LINE__);
3692 		return -EFAULT;
3693 	}
3694 
3695 	queue->sn_cfg_valid = true;
3696 	queue->sn_group = i;
3697 	queue->sn_slot = slot;
3698 	return 0;
3699 }
3700 
3701 static int
dlb2_ldb_queue_attach_resources(struct dlb2_hw * hw,struct dlb2_hw_domain * domain,struct dlb2_ldb_queue * queue,struct dlb2_create_ldb_queue_args * args)3702 dlb2_ldb_queue_attach_resources(struct dlb2_hw *hw,
3703 				struct dlb2_hw_domain *domain,
3704 				struct dlb2_ldb_queue *queue,
3705 				struct dlb2_create_ldb_queue_args *args)
3706 {
3707 	int ret;
3708 
3709 	ret = dlb2_ldb_queue_attach_to_sn_group(hw, queue, args);
3710 	if (ret)
3711 		return ret;
3712 
3713 	/* Attach QID inflights */
3714 	queue->num_qid_inflights = args->num_qid_inflights;
3715 
3716 	/* Attach atomic inflights */
3717 	queue->aqed_limit = args->num_atomic_inflights;
3718 
3719 	domain->num_avail_aqed_entries -= args->num_atomic_inflights;
3720 	domain->num_used_aqed_entries += args->num_atomic_inflights;
3721 
3722 	return 0;
3723 }
3724 
3725 static int
dlb2_verify_create_ldb_queue_args(struct dlb2_hw * hw,u32 domain_id,struct dlb2_create_ldb_queue_args * args,struct dlb2_cmd_response * resp,bool vdev_req,unsigned int vdev_id)3726 dlb2_verify_create_ldb_queue_args(struct dlb2_hw *hw,
3727 				  u32 domain_id,
3728 				  struct dlb2_create_ldb_queue_args *args,
3729 				  struct dlb2_cmd_response *resp,
3730 				  bool vdev_req,
3731 				  unsigned int vdev_id)
3732 {
3733 	struct dlb2_hw_domain *domain;
3734 	int i;
3735 
3736 	domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
3737 
3738 	if (domain == NULL) {
3739 		resp->status = DLB2_ST_INVALID_DOMAIN_ID;
3740 		return -EINVAL;
3741 	}
3742 
3743 	if (!domain->configured) {
3744 		resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
3745 		return -EINVAL;
3746 	}
3747 
3748 	if (domain->started) {
3749 		resp->status = DLB2_ST_DOMAIN_STARTED;
3750 		return -EINVAL;
3751 	}
3752 
3753 	if (dlb2_list_empty(&domain->avail_ldb_queues)) {
3754 		resp->status = DLB2_ST_LDB_QUEUES_UNAVAILABLE;
3755 		return -EINVAL;
3756 	}
3757 
3758 	if (args->num_sequence_numbers) {
3759 		for (i = 0; i < DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS; i++) {
3760 			struct dlb2_sn_group *group = &hw->rsrcs.sn_groups[i];
3761 
3762 			if (group->sequence_numbers_per_queue ==
3763 			    args->num_sequence_numbers &&
3764 			    !dlb2_sn_group_full(group))
3765 				break;
3766 		}
3767 
3768 		if (i == DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS) {
3769 			resp->status = DLB2_ST_SEQUENCE_NUMBERS_UNAVAILABLE;
3770 			return -EINVAL;
3771 		}
3772 	}
3773 
3774 	if (args->num_qid_inflights > 4096) {
3775 		resp->status = DLB2_ST_INVALID_QID_INFLIGHT_ALLOCATION;
3776 		return -EINVAL;
3777 	}
3778 
3779 	/* Inflights must be <= number of sequence numbers if ordered */
3780 	if (args->num_sequence_numbers != 0 &&
3781 	    args->num_qid_inflights > args->num_sequence_numbers) {
3782 		resp->status = DLB2_ST_INVALID_QID_INFLIGHT_ALLOCATION;
3783 		return -EINVAL;
3784 	}
3785 
3786 	if (domain->num_avail_aqed_entries < args->num_atomic_inflights) {
3787 		resp->status = DLB2_ST_ATOMIC_INFLIGHTS_UNAVAILABLE;
3788 		return -EINVAL;
3789 	}
3790 
3791 	if (args->num_atomic_inflights &&
3792 	    args->lock_id_comp_level != 0 &&
3793 	    args->lock_id_comp_level != 64 &&
3794 	    args->lock_id_comp_level != 128 &&
3795 	    args->lock_id_comp_level != 256 &&
3796 	    args->lock_id_comp_level != 512 &&
3797 	    args->lock_id_comp_level != 1024 &&
3798 	    args->lock_id_comp_level != 2048 &&
3799 	    args->lock_id_comp_level != 4096 &&
3800 	    args->lock_id_comp_level != 65536) {
3801 		resp->status = DLB2_ST_INVALID_LOCK_ID_COMP_LEVEL;
3802 		return -EINVAL;
3803 	}
3804 
3805 	return 0;
3806 }
3807 
3808 static void
dlb2_log_create_ldb_queue_args(struct dlb2_hw * hw,u32 domain_id,struct dlb2_create_ldb_queue_args * args,bool vdev_req,unsigned int vdev_id)3809 dlb2_log_create_ldb_queue_args(struct dlb2_hw *hw,
3810 			       u32 domain_id,
3811 			       struct dlb2_create_ldb_queue_args *args,
3812 			       bool vdev_req,
3813 			       unsigned int vdev_id)
3814 {
3815 	DLB2_HW_DBG(hw, "DLB2 create load-balanced queue arguments:\n");
3816 	if (vdev_req)
3817 		DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
3818 	DLB2_HW_DBG(hw, "\tDomain ID:                  %d\n",
3819 		    domain_id);
3820 	DLB2_HW_DBG(hw, "\tNumber of sequence numbers: %d\n",
3821 		    args->num_sequence_numbers);
3822 	DLB2_HW_DBG(hw, "\tNumber of QID inflights:    %d\n",
3823 		    args->num_qid_inflights);
3824 	DLB2_HW_DBG(hw, "\tNumber of ATM inflights:    %d\n",
3825 		    args->num_atomic_inflights);
3826 }
3827 
3828 /**
3829  * dlb2_hw_create_ldb_queue() - Allocate and initialize a DLB LDB queue.
3830  * @hw:	Contains the current state of the DLB2 hardware.
3831  * @domain_id: Domain ID
3832  * @args: User-provided arguments.
3833  * @resp: Response to user.
3834  * @vdev_req: Request came from a virtual device.
3835  * @vdev_id: If vdev_req is true, this contains the virtual device's ID.
3836  *
3837  * Return: returns < 0 on error, 0 otherwise. If the driver is unable to
3838  * satisfy a request, resp->status will be set accordingly.
3839  */
dlb2_hw_create_ldb_queue(struct dlb2_hw * hw,u32 domain_id,struct dlb2_create_ldb_queue_args * args,struct dlb2_cmd_response * resp,bool vdev_req,unsigned int vdev_id)3840 int dlb2_hw_create_ldb_queue(struct dlb2_hw *hw,
3841 			     u32 domain_id,
3842 			     struct dlb2_create_ldb_queue_args *args,
3843 			     struct dlb2_cmd_response *resp,
3844 			     bool vdev_req,
3845 			     unsigned int vdev_id)
3846 {
3847 	struct dlb2_hw_domain *domain;
3848 	struct dlb2_ldb_queue *queue;
3849 	int ret;
3850 
3851 	dlb2_log_create_ldb_queue_args(hw, domain_id, args, vdev_req, vdev_id);
3852 
3853 	/*
3854 	 * Verify that hardware resources are available before attempting to
3855 	 * satisfy the request. This simplifies the error unwinding code.
3856 	 */
3857 	ret = dlb2_verify_create_ldb_queue_args(hw,
3858 						domain_id,
3859 						args,
3860 						resp,
3861 						vdev_req,
3862 						vdev_id);
3863 	if (ret)
3864 		return ret;
3865 
3866 	domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
3867 	if (domain == NULL) {
3868 		DLB2_HW_ERR(hw,
3869 			    "[%s():%d] Internal error: domain not found\n",
3870 			    __func__, __LINE__);
3871 		return -EFAULT;
3872 	}
3873 
3874 	queue = DLB2_DOM_LIST_HEAD(domain->avail_ldb_queues, typeof(*queue));
3875 	if (queue == NULL) {
3876 		DLB2_HW_ERR(hw,
3877 			    "[%s():%d] Internal error: no available ldb queues\n",
3878 			    __func__, __LINE__);
3879 		return -EFAULT;
3880 	}
3881 
3882 	ret = dlb2_ldb_queue_attach_resources(hw, domain, queue, args);
3883 	if (ret < 0) {
3884 		DLB2_HW_ERR(hw,
3885 			    "[%s():%d] Internal error: failed to attach the ldb queue resources\n",
3886 			    __func__, __LINE__);
3887 		return ret;
3888 	}
3889 
3890 	dlb2_configure_ldb_queue(hw, domain, queue, args, vdev_req, vdev_id);
3891 
3892 	queue->num_mappings = 0;
3893 
3894 	queue->configured = true;
3895 
3896 	/*
3897 	 * Configuration succeeded, so move the resource from the 'avail' to
3898 	 * the 'used' list.
3899 	 */
3900 	dlb2_list_del(&domain->avail_ldb_queues, &queue->domain_list);
3901 
3902 	dlb2_list_add(&domain->used_ldb_queues, &queue->domain_list);
3903 
3904 	resp->status = 0;
3905 	resp->id = (vdev_req) ? queue->id.virt_id : queue->id.phys_id;
3906 
3907 	return 0;
3908 }
3909 
dlb2_get_group_sequence_numbers(struct dlb2_hw * hw,unsigned int group_id)3910 int dlb2_get_group_sequence_numbers(struct dlb2_hw *hw, unsigned int group_id)
3911 {
3912 	if (group_id >= DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS)
3913 		return -EINVAL;
3914 
3915 	return hw->rsrcs.sn_groups[group_id].sequence_numbers_per_queue;
3916 }
3917 
dlb2_get_group_sequence_number_occupancy(struct dlb2_hw * hw,unsigned int group_id)3918 int dlb2_get_group_sequence_number_occupancy(struct dlb2_hw *hw,
3919 					     unsigned int group_id)
3920 {
3921 	if (group_id >= DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS)
3922 		return -EINVAL;
3923 
3924 	return dlb2_sn_group_used_slots(&hw->rsrcs.sn_groups[group_id]);
3925 }
3926 
dlb2_log_set_group_sequence_numbers(struct dlb2_hw * hw,unsigned int group_id,unsigned long val)3927 static void dlb2_log_set_group_sequence_numbers(struct dlb2_hw *hw,
3928 						unsigned int group_id,
3929 						unsigned long val)
3930 {
3931 	DLB2_HW_DBG(hw, "DLB2 set group sequence numbers:\n");
3932 	DLB2_HW_DBG(hw, "\tGroup ID: %u\n", group_id);
3933 	DLB2_HW_DBG(hw, "\tValue:    %lu\n", val);
3934 }
3935 
dlb2_set_group_sequence_numbers(struct dlb2_hw * hw,unsigned int group_id,unsigned long val)3936 int dlb2_set_group_sequence_numbers(struct dlb2_hw *hw,
3937 				    unsigned int group_id,
3938 				    unsigned long val)
3939 {
3940 	u32 valid_allocations[] = {64, 128, 256, 512, 1024};
3941 	union dlb2_ro_pipe_grp_sn_mode r0 = { {0} };
3942 	struct dlb2_sn_group *group;
3943 	int mode;
3944 
3945 	if (group_id >= DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS)
3946 		return -EINVAL;
3947 
3948 	group = &hw->rsrcs.sn_groups[group_id];
3949 
3950 	/*
3951 	 * Once the first load-balanced queue using an SN group is configured,
3952 	 * the group cannot be changed.
3953 	 */
3954 	if (group->slot_use_bitmap != 0)
3955 		return -EPERM;
3956 
3957 	for (mode = 0; mode < DLB2_MAX_NUM_SEQUENCE_NUMBER_MODES; mode++)
3958 		if (val == valid_allocations[mode])
3959 			break;
3960 
3961 	if (mode == DLB2_MAX_NUM_SEQUENCE_NUMBER_MODES)
3962 		return -EINVAL;
3963 
3964 	group->mode = mode;
3965 	group->sequence_numbers_per_queue = val;
3966 
3967 	r0.field.sn_mode_0 = hw->rsrcs.sn_groups[0].mode;
3968 	r0.field.sn_mode_1 = hw->rsrcs.sn_groups[1].mode;
3969 
3970 	DLB2_CSR_WR(hw, DLB2_RO_PIPE_GRP_SN_MODE, r0.val);
3971 
3972 	dlb2_log_set_group_sequence_numbers(hw, group_id, val);
3973 
3974 	return 0;
3975 }
3976 
dlb2_ldb_port_configure_pp(struct dlb2_hw * hw,struct dlb2_hw_domain * domain,struct dlb2_ldb_port * port,bool vdev_req,unsigned int vdev_id)3977 static void dlb2_ldb_port_configure_pp(struct dlb2_hw *hw,
3978 				       struct dlb2_hw_domain *domain,
3979 				       struct dlb2_ldb_port *port,
3980 				       bool vdev_req,
3981 				       unsigned int vdev_id)
3982 {
3983 	union dlb2_sys_ldb_pp2vas r0 = { {0} };
3984 	union dlb2_sys_ldb_pp_v r4 = { {0} };
3985 
3986 	r0.field.vas = domain->id.phys_id;
3987 
3988 	DLB2_CSR_WR(hw, DLB2_SYS_LDB_PP2VAS(port->id.phys_id), r0.val);
3989 
3990 	if (vdev_req) {
3991 		union dlb2_sys_vf_ldb_vpp2pp r1 = { {0} };
3992 		union dlb2_sys_ldb_pp2vdev r2 = { {0} };
3993 		union dlb2_sys_vf_ldb_vpp_v r3 = { {0} };
3994 		unsigned int offs;
3995 		u32 virt_id;
3996 
3997 		/*
3998 		 * DLB uses producer port address bits 17:12 to determine the
3999 		 * producer port ID. In Scalable IOV mode, PP accesses come
4000 		 * through the PF MMIO window for the physical producer port,
4001 		 * so for translation purposes the virtual and physical port
4002 		 * IDs are equal.
4003 		 */
4004 		if (hw->virt_mode == DLB2_VIRT_SRIOV)
4005 			virt_id = port->id.virt_id;
4006 		else
4007 			virt_id = port->id.phys_id;
4008 
4009 		r1.field.pp = port->id.phys_id;
4010 
4011 		offs = vdev_id * DLB2_MAX_NUM_LDB_PORTS + virt_id;
4012 
4013 		DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VPP2PP(offs), r1.val);
4014 
4015 		r2.field.vdev = vdev_id;
4016 
4017 		DLB2_CSR_WR(hw,
4018 			    DLB2_SYS_LDB_PP2VDEV(port->id.phys_id),
4019 			    r2.val);
4020 
4021 		r3.field.vpp_v = 1;
4022 
4023 		DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VPP_V(offs), r3.val);
4024 	}
4025 
4026 	r4.field.pp_v = 1;
4027 
4028 	DLB2_CSR_WR(hw,
4029 		    DLB2_SYS_LDB_PP_V(port->id.phys_id),
4030 		    r4.val);
4031 }
4032 
dlb2_ldb_port_configure_cq(struct dlb2_hw * hw,struct dlb2_hw_domain * domain,struct dlb2_ldb_port * port,uintptr_t cq_dma_base,struct dlb2_create_ldb_port_args * args,bool vdev_req,unsigned int vdev_id)4033 static int dlb2_ldb_port_configure_cq(struct dlb2_hw *hw,
4034 				      struct dlb2_hw_domain *domain,
4035 				      struct dlb2_ldb_port *port,
4036 				      uintptr_t cq_dma_base,
4037 				      struct dlb2_create_ldb_port_args *args,
4038 				      bool vdev_req,
4039 				      unsigned int vdev_id)
4040 {
4041 	union dlb2_sys_ldb_cq_addr_l r0 = { {0} };
4042 	union dlb2_sys_ldb_cq_addr_u r1 = { {0} };
4043 	union dlb2_sys_ldb_cq2vf_pf_ro r2 = { {0} };
4044 	union dlb2_chp_ldb_cq_tkn_depth_sel r3 = { {0} };
4045 	union dlb2_lsp_cq_ldb_tkn_depth_sel r4 = { {0} };
4046 	union dlb2_chp_hist_list_lim r5 = { {0} };
4047 	union dlb2_chp_hist_list_base r6 = { {0} };
4048 	union dlb2_lsp_cq_ldb_infl_lim r7 = { {0} };
4049 	union dlb2_chp_hist_list_push_ptr r8 = { {0} };
4050 	union dlb2_chp_hist_list_pop_ptr r9 = { {0} };
4051 	union dlb2_sys_ldb_cq_at r10 = { {0} };
4052 	union dlb2_sys_ldb_cq_pasid r11 = { {0} };
4053 	union dlb2_chp_ldb_cq2vas r12 = { {0} };
4054 	union dlb2_lsp_cq2priov r13 = { {0} };
4055 
4056 	/* The CQ address is 64B-aligned, and the DLB only wants bits [63:6] */
4057 	r0.field.addr_l = cq_dma_base >> 6;
4058 
4059 	DLB2_CSR_WR(hw, DLB2_SYS_LDB_CQ_ADDR_L(port->id.phys_id), r0.val);
4060 
4061 	r1.field.addr_u = cq_dma_base >> 32;
4062 
4063 	DLB2_CSR_WR(hw, DLB2_SYS_LDB_CQ_ADDR_U(port->id.phys_id), r1.val);
4064 
4065 	/*
4066 	 * 'ro' == relaxed ordering. This setting allows DLB2 to write
4067 	 * cache lines out-of-order (but QEs within a cache line are always
4068 	 * updated in-order).
4069 	 */
4070 	r2.field.vf = vdev_id;
4071 	r2.field.is_pf = !vdev_req && (hw->virt_mode != DLB2_VIRT_SIOV);
4072 	r2.field.ro = 1;
4073 
4074 	DLB2_CSR_WR(hw, DLB2_SYS_LDB_CQ2VF_PF_RO(port->id.phys_id), r2.val);
4075 
4076 	if (args->cq_depth <= 8) {
4077 		r3.field.token_depth_select = 1;
4078 	} else if (args->cq_depth == 16) {
4079 		r3.field.token_depth_select = 2;
4080 	} else if (args->cq_depth == 32) {
4081 		r3.field.token_depth_select = 3;
4082 	} else if (args->cq_depth == 64) {
4083 		r3.field.token_depth_select = 4;
4084 	} else if (args->cq_depth == 128) {
4085 		r3.field.token_depth_select = 5;
4086 	} else if (args->cq_depth == 256) {
4087 		r3.field.token_depth_select = 6;
4088 	} else if (args->cq_depth == 512) {
4089 		r3.field.token_depth_select = 7;
4090 	} else if (args->cq_depth == 1024) {
4091 		r3.field.token_depth_select = 8;
4092 	} else {
4093 		DLB2_HW_ERR(hw,
4094 			    "[%s():%d] Internal error: invalid CQ depth\n",
4095 			    __func__, __LINE__);
4096 		return -EFAULT;
4097 	}
4098 
4099 	DLB2_CSR_WR(hw,
4100 		    DLB2_CHP_LDB_CQ_TKN_DEPTH_SEL(port->id.phys_id),
4101 		    r3.val);
4102 
4103 	/*
4104 	 * To support CQs with depth less than 8, program the token count
4105 	 * register with a non-zero initial value. Operations such as domain
4106 	 * reset must take this initial value into account when quiescing the
4107 	 * CQ.
4108 	 */
4109 	port->init_tkn_cnt = 0;
4110 
4111 	if (args->cq_depth < 8) {
4112 		union dlb2_lsp_cq_ldb_tkn_cnt r14 = { {0} };
4113 
4114 		port->init_tkn_cnt = 8 - args->cq_depth;
4115 
4116 		r14.field.token_count = port->init_tkn_cnt;
4117 
4118 		DLB2_CSR_WR(hw,
4119 			    DLB2_LSP_CQ_LDB_TKN_CNT(port->id.phys_id),
4120 			    r14.val);
4121 	} else {
4122 		DLB2_CSR_WR(hw,
4123 			    DLB2_LSP_CQ_LDB_TKN_CNT(port->id.phys_id),
4124 			    DLB2_LSP_CQ_LDB_TKN_CNT_RST);
4125 	}
4126 
4127 	r4.field.token_depth_select = r3.field.token_depth_select;
4128 	r4.field.ignore_depth = 0;
4129 
4130 	DLB2_CSR_WR(hw,
4131 		    DLB2_LSP_CQ_LDB_TKN_DEPTH_SEL(port->id.phys_id),
4132 		    r4.val);
4133 
4134 	/* Reset the CQ write pointer */
4135 	DLB2_CSR_WR(hw,
4136 		    DLB2_CHP_LDB_CQ_WPTR(port->id.phys_id),
4137 		    DLB2_CHP_LDB_CQ_WPTR_RST);
4138 
4139 	r5.field.limit = port->hist_list_entry_limit - 1;
4140 
4141 	DLB2_CSR_WR(hw, DLB2_CHP_HIST_LIST_LIM(port->id.phys_id), r5.val);
4142 
4143 	r6.field.base = port->hist_list_entry_base;
4144 
4145 	DLB2_CSR_WR(hw, DLB2_CHP_HIST_LIST_BASE(port->id.phys_id), r6.val);
4146 
4147 	/*
4148 	 * The inflight limit sets a cap on the number of QEs for which this CQ
4149 	 * can owe completions at one time.
4150 	 */
4151 	r7.field.limit = args->cq_history_list_size;
4152 
4153 	DLB2_CSR_WR(hw, DLB2_LSP_CQ_LDB_INFL_LIM(port->id.phys_id), r7.val);
4154 
4155 	r8.field.push_ptr = r6.field.base;
4156 	r8.field.generation = 0;
4157 
4158 	DLB2_CSR_WR(hw,
4159 		    DLB2_CHP_HIST_LIST_PUSH_PTR(port->id.phys_id),
4160 		    r8.val);
4161 
4162 	r9.field.pop_ptr = r6.field.base;
4163 	r9.field.generation = 0;
4164 
4165 	DLB2_CSR_WR(hw, DLB2_CHP_HIST_LIST_POP_PTR(port->id.phys_id), r9.val);
4166 
4167 	/*
4168 	 * Address translation (AT) settings: 0: untranslated, 2: translated
4169 	 * (see ATS spec regarding Address Type field for more details)
4170 	 */
4171 	r10.field.cq_at = 0;
4172 
4173 	DLB2_CSR_WR(hw, DLB2_SYS_LDB_CQ_AT(port->id.phys_id), r10.val);
4174 
4175 	if (vdev_req && hw->virt_mode == DLB2_VIRT_SIOV) {
4176 		r11.field.pasid = hw->pasid[vdev_id];
4177 		r11.field.fmt2 = 1;
4178 	}
4179 
4180 	DLB2_CSR_WR(hw,
4181 		    DLB2_SYS_LDB_CQ_PASID(port->id.phys_id),
4182 		    r11.val);
4183 
4184 	r12.field.cq2vas = domain->id.phys_id;
4185 
4186 	DLB2_CSR_WR(hw, DLB2_CHP_LDB_CQ2VAS(port->id.phys_id), r12.val);
4187 
4188 	/* Disable the port's QID mappings */
4189 	r13.field.v = 0;
4190 
4191 	DLB2_CSR_WR(hw, DLB2_LSP_CQ2PRIOV(port->id.phys_id), r13.val);
4192 
4193 	return 0;
4194 }
4195 
dlb2_configure_ldb_port(struct dlb2_hw * hw,struct dlb2_hw_domain * domain,struct dlb2_ldb_port * port,uintptr_t cq_dma_base,struct dlb2_create_ldb_port_args * args,bool vdev_req,unsigned int vdev_id)4196 static int dlb2_configure_ldb_port(struct dlb2_hw *hw,
4197 				   struct dlb2_hw_domain *domain,
4198 				   struct dlb2_ldb_port *port,
4199 				   uintptr_t cq_dma_base,
4200 				   struct dlb2_create_ldb_port_args *args,
4201 				   bool vdev_req,
4202 				   unsigned int vdev_id)
4203 {
4204 	int ret, i;
4205 
4206 	port->hist_list_entry_base = domain->hist_list_entry_base +
4207 				     domain->hist_list_entry_offset;
4208 	port->hist_list_entry_limit = port->hist_list_entry_base +
4209 				      args->cq_history_list_size;
4210 
4211 	domain->hist_list_entry_offset += args->cq_history_list_size;
4212 	domain->avail_hist_list_entries -= args->cq_history_list_size;
4213 
4214 	ret = dlb2_ldb_port_configure_cq(hw,
4215 					 domain,
4216 					 port,
4217 					 cq_dma_base,
4218 					 args,
4219 					 vdev_req,
4220 					 vdev_id);
4221 	if (ret < 0)
4222 		return ret;
4223 
4224 	dlb2_ldb_port_configure_pp(hw,
4225 				   domain,
4226 				   port,
4227 				   vdev_req,
4228 				   vdev_id);
4229 
4230 	dlb2_ldb_port_cq_enable(hw, port);
4231 
4232 	for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++)
4233 		port->qid_map[i].state = DLB2_QUEUE_UNMAPPED;
4234 	port->num_mappings = 0;
4235 
4236 	port->enabled = true;
4237 
4238 	port->configured = true;
4239 
4240 	return 0;
4241 }
4242 
4243 static void
dlb2_log_create_ldb_port_args(struct dlb2_hw * hw,u32 domain_id,uintptr_t cq_dma_base,struct dlb2_create_ldb_port_args * args,bool vdev_req,unsigned int vdev_id)4244 dlb2_log_create_ldb_port_args(struct dlb2_hw *hw,
4245 			      u32 domain_id,
4246 			      uintptr_t cq_dma_base,
4247 			      struct dlb2_create_ldb_port_args *args,
4248 			      bool vdev_req,
4249 			      unsigned int vdev_id)
4250 {
4251 	DLB2_HW_DBG(hw, "DLB2 create load-balanced port arguments:\n");
4252 	if (vdev_req)
4253 		DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
4254 	DLB2_HW_DBG(hw, "\tDomain ID:                 %d\n",
4255 		    domain_id);
4256 	DLB2_HW_DBG(hw, "\tCQ depth:                  %d\n",
4257 		    args->cq_depth);
4258 	DLB2_HW_DBG(hw, "\tCQ hist list size:         %d\n",
4259 		    args->cq_history_list_size);
4260 	DLB2_HW_DBG(hw, "\tCQ base address:           0x%lx\n",
4261 		    cq_dma_base);
4262 	DLB2_HW_DBG(hw, "\tCoS ID:                    %u\n", args->cos_id);
4263 	DLB2_HW_DBG(hw, "\tStrict CoS allocation:     %u\n",
4264 		    args->cos_strict);
4265 }
4266 
4267 static int
dlb2_verify_create_ldb_port_args(struct dlb2_hw * hw,u32 domain_id,uintptr_t cq_dma_base,struct dlb2_create_ldb_port_args * args,struct dlb2_cmd_response * resp,bool vdev_req,unsigned int vdev_id)4268 dlb2_verify_create_ldb_port_args(struct dlb2_hw *hw,
4269 				 u32 domain_id,
4270 				 uintptr_t cq_dma_base,
4271 				 struct dlb2_create_ldb_port_args *args,
4272 				 struct dlb2_cmd_response *resp,
4273 				 bool vdev_req,
4274 				 unsigned int vdev_id)
4275 {
4276 	struct dlb2_hw_domain *domain;
4277 	int i;
4278 
4279 	domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
4280 
4281 	if (domain == NULL) {
4282 		resp->status = DLB2_ST_INVALID_DOMAIN_ID;
4283 		return -EINVAL;
4284 	}
4285 
4286 	if (!domain->configured) {
4287 		resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
4288 		return -EINVAL;
4289 	}
4290 
4291 	if (domain->started) {
4292 		resp->status = DLB2_ST_DOMAIN_STARTED;
4293 		return -EINVAL;
4294 	}
4295 
4296 	if (args->cos_id >= DLB2_NUM_COS_DOMAINS) {
4297 		resp->status = DLB2_ST_INVALID_COS_ID;
4298 		return -EINVAL;
4299 	}
4300 
4301 	if (args->cos_strict) {
4302 		if (dlb2_list_empty(&domain->avail_ldb_ports[args->cos_id])) {
4303 			resp->status = DLB2_ST_LDB_PORTS_UNAVAILABLE;
4304 			return -EINVAL;
4305 		}
4306 	} else {
4307 		for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
4308 			if (!dlb2_list_empty(&domain->avail_ldb_ports[i]))
4309 				break;
4310 		}
4311 
4312 		if (i == DLB2_NUM_COS_DOMAINS) {
4313 			resp->status = DLB2_ST_LDB_PORTS_UNAVAILABLE;
4314 			return -EINVAL;
4315 		}
4316 	}
4317 
4318 	/* Check cache-line alignment */
4319 	if ((cq_dma_base & 0x3F) != 0) {
4320 		resp->status = DLB2_ST_INVALID_CQ_VIRT_ADDR;
4321 		return -EINVAL;
4322 	}
4323 
4324 	if (args->cq_depth != 1 &&
4325 	    args->cq_depth != 2 &&
4326 	    args->cq_depth != 4 &&
4327 	    args->cq_depth != 8 &&
4328 	    args->cq_depth != 16 &&
4329 	    args->cq_depth != 32 &&
4330 	    args->cq_depth != 64 &&
4331 	    args->cq_depth != 128 &&
4332 	    args->cq_depth != 256 &&
4333 	    args->cq_depth != 512 &&
4334 	    args->cq_depth != 1024) {
4335 		resp->status = DLB2_ST_INVALID_CQ_DEPTH;
4336 		return -EINVAL;
4337 	}
4338 
4339 	/* The history list size must be >= 1 */
4340 	if (!args->cq_history_list_size) {
4341 		resp->status = DLB2_ST_INVALID_HIST_LIST_DEPTH;
4342 		return -EINVAL;
4343 	}
4344 
4345 	if (args->cq_history_list_size > domain->avail_hist_list_entries) {
4346 		resp->status = DLB2_ST_HIST_LIST_ENTRIES_UNAVAILABLE;
4347 		return -EINVAL;
4348 	}
4349 
4350 	return 0;
4351 }
4352 
4353 
4354 /**
4355  * dlb2_hw_create_ldb_port() - Allocate and initialize a load-balanced port and
4356  *	its resources.
4357  * @hw:	Contains the current state of the DLB2 hardware.
4358  * @domain_id: Domain ID
4359  * @args: User-provided arguments.
4360  * @cq_dma_base: Base DMA address for consumer queue memory
4361  * @resp: Response to user.
4362  * @vdev_req: Request came from a virtual device.
4363  * @vdev_id: If vdev_req is true, this contains the virtual device's ID.
4364  *
4365  * Return: returns < 0 on error, 0 otherwise. If the driver is unable to
4366  * satisfy a request, resp->status will be set accordingly.
4367  */
dlb2_hw_create_ldb_port(struct dlb2_hw * hw,u32 domain_id,struct dlb2_create_ldb_port_args * args,uintptr_t cq_dma_base,struct dlb2_cmd_response * resp,bool vdev_req,unsigned int vdev_id)4368 int dlb2_hw_create_ldb_port(struct dlb2_hw *hw,
4369 			    u32 domain_id,
4370 			    struct dlb2_create_ldb_port_args *args,
4371 			    uintptr_t cq_dma_base,
4372 			    struct dlb2_cmd_response *resp,
4373 			    bool vdev_req,
4374 			    unsigned int vdev_id)
4375 {
4376 	struct dlb2_hw_domain *domain;
4377 	struct dlb2_ldb_port *port;
4378 	int ret, cos_id, i;
4379 
4380 	dlb2_log_create_ldb_port_args(hw,
4381 				      domain_id,
4382 				      cq_dma_base,
4383 				      args,
4384 				      vdev_req,
4385 				      vdev_id);
4386 
4387 	/*
4388 	 * Verify that hardware resources are available before attempting to
4389 	 * satisfy the request. This simplifies the error unwinding code.
4390 	 */
4391 	ret = dlb2_verify_create_ldb_port_args(hw,
4392 					       domain_id,
4393 					       cq_dma_base,
4394 					       args,
4395 					       resp,
4396 					       vdev_req,
4397 					       vdev_id);
4398 	if (ret)
4399 		return ret;
4400 
4401 	domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
4402 	if (domain == NULL) {
4403 		DLB2_HW_ERR(hw,
4404 			    "[%s():%d] Internal error: domain not found\n",
4405 			    __func__, __LINE__);
4406 		return -EFAULT;
4407 	}
4408 
4409 	if (args->cos_strict) {
4410 		cos_id = args->cos_id;
4411 
4412 		port = DLB2_DOM_LIST_HEAD(domain->avail_ldb_ports[cos_id],
4413 					  typeof(*port));
4414 	} else {
4415 		int idx;
4416 
4417 		for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
4418 			idx = (args->cos_id + i) % DLB2_NUM_COS_DOMAINS;
4419 
4420 			port = DLB2_DOM_LIST_HEAD(domain->avail_ldb_ports[idx],
4421 						  typeof(*port));
4422 			if (port)
4423 				break;
4424 		}
4425 
4426 		cos_id = idx;
4427 	}
4428 
4429 	if (port == NULL) {
4430 		DLB2_HW_ERR(hw,
4431 			    "[%s():%d] Internal error: no available ldb ports\n",
4432 			    __func__, __LINE__);
4433 		return -EFAULT;
4434 	}
4435 
4436 	if (port->configured) {
4437 		DLB2_HW_ERR(hw,
4438 			    "[%s()] Internal error: avail_ldb_ports contains configured ports.\n",
4439 			    __func__);
4440 		return -EFAULT;
4441 	}
4442 
4443 	ret = dlb2_configure_ldb_port(hw,
4444 				      domain,
4445 				      port,
4446 				      cq_dma_base,
4447 				      args,
4448 				      vdev_req,
4449 				      vdev_id);
4450 	if (ret < 0)
4451 		return ret;
4452 
4453 	/*
4454 	 * Configuration succeeded, so move the resource from the 'avail' to
4455 	 * the 'used' list.
4456 	 */
4457 	dlb2_list_del(&domain->avail_ldb_ports[cos_id], &port->domain_list);
4458 
4459 	dlb2_list_add(&domain->used_ldb_ports[cos_id], &port->domain_list);
4460 
4461 	resp->status = 0;
4462 	resp->id = (vdev_req) ? port->id.virt_id : port->id.phys_id;
4463 
4464 	return 0;
4465 }
4466 
4467 static void
dlb2_log_create_dir_port_args(struct dlb2_hw * hw,u32 domain_id,uintptr_t cq_dma_base,struct dlb2_create_dir_port_args * args,bool vdev_req,unsigned int vdev_id)4468 dlb2_log_create_dir_port_args(struct dlb2_hw *hw,
4469 			      u32 domain_id,
4470 			      uintptr_t cq_dma_base,
4471 			      struct dlb2_create_dir_port_args *args,
4472 			      bool vdev_req,
4473 			      unsigned int vdev_id)
4474 {
4475 	DLB2_HW_DBG(hw, "DLB2 create directed port arguments:\n");
4476 	if (vdev_req)
4477 		DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
4478 	DLB2_HW_DBG(hw, "\tDomain ID:                 %d\n",
4479 		    domain_id);
4480 	DLB2_HW_DBG(hw, "\tCQ depth:                  %d\n",
4481 		    args->cq_depth);
4482 	DLB2_HW_DBG(hw, "\tCQ base address:           0x%lx\n",
4483 		    cq_dma_base);
4484 }
4485 
4486 static struct dlb2_dir_pq_pair *
dlb2_get_domain_used_dir_pq(u32 id,bool vdev_req,struct dlb2_hw_domain * domain)4487 dlb2_get_domain_used_dir_pq(u32 id,
4488 			    bool vdev_req,
4489 			    struct dlb2_hw_domain *domain)
4490 {
4491 	struct dlb2_list_entry *iter;
4492 	struct dlb2_dir_pq_pair *port;
4493 	RTE_SET_USED(iter);
4494 
4495 	if (id >= DLB2_MAX_NUM_DIR_PORTS)
4496 		return NULL;
4497 
4498 	DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter)
4499 		if ((!vdev_req && port->id.phys_id == id) ||
4500 		    (vdev_req && port->id.virt_id == id))
4501 			return port;
4502 
4503 	return NULL;
4504 }
4505 
4506 static int
dlb2_verify_create_dir_port_args(struct dlb2_hw * hw,u32 domain_id,uintptr_t cq_dma_base,struct dlb2_create_dir_port_args * args,struct dlb2_cmd_response * resp,bool vdev_req,unsigned int vdev_id)4507 dlb2_verify_create_dir_port_args(struct dlb2_hw *hw,
4508 				 u32 domain_id,
4509 				 uintptr_t cq_dma_base,
4510 				 struct dlb2_create_dir_port_args *args,
4511 				 struct dlb2_cmd_response *resp,
4512 				 bool vdev_req,
4513 				 unsigned int vdev_id)
4514 {
4515 	struct dlb2_hw_domain *domain;
4516 
4517 	domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
4518 
4519 	if (domain == NULL) {
4520 		resp->status = DLB2_ST_INVALID_DOMAIN_ID;
4521 		return -EINVAL;
4522 	}
4523 
4524 	if (!domain->configured) {
4525 		resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
4526 		return -EINVAL;
4527 	}
4528 
4529 	if (domain->started) {
4530 		resp->status = DLB2_ST_DOMAIN_STARTED;
4531 		return -EINVAL;
4532 	}
4533 
4534 	/*
4535 	 * If the user claims the queue is already configured, validate
4536 	 * the queue ID, its domain, and whether the queue is configured.
4537 	 */
4538 	if (args->queue_id != -1) {
4539 		struct dlb2_dir_pq_pair *queue;
4540 
4541 		queue = dlb2_get_domain_used_dir_pq(args->queue_id,
4542 						    vdev_req,
4543 						    domain);
4544 
4545 		if (queue == NULL || queue->domain_id.phys_id !=
4546 				domain->id.phys_id ||
4547 				!queue->queue_configured) {
4548 			resp->status = DLB2_ST_INVALID_DIR_QUEUE_ID;
4549 			return -EINVAL;
4550 		}
4551 	}
4552 
4553 	/*
4554 	 * If the port's queue is not configured, validate that a free
4555 	 * port-queue pair is available.
4556 	 */
4557 	if (args->queue_id == -1 &&
4558 	    dlb2_list_empty(&domain->avail_dir_pq_pairs)) {
4559 		resp->status = DLB2_ST_DIR_PORTS_UNAVAILABLE;
4560 		return -EINVAL;
4561 	}
4562 
4563 	/* Check cache-line alignment */
4564 	if ((cq_dma_base & 0x3F) != 0) {
4565 		resp->status = DLB2_ST_INVALID_CQ_VIRT_ADDR;
4566 		return -EINVAL;
4567 	}
4568 
4569 	if (args->cq_depth != 1 &&
4570 	    args->cq_depth != 2 &&
4571 	    args->cq_depth != 4 &&
4572 	    args->cq_depth != 8 &&
4573 	    args->cq_depth != 16 &&
4574 	    args->cq_depth != 32 &&
4575 	    args->cq_depth != 64 &&
4576 	    args->cq_depth != 128 &&
4577 	    args->cq_depth != 256 &&
4578 	    args->cq_depth != 512 &&
4579 	    args->cq_depth != 1024) {
4580 		resp->status = DLB2_ST_INVALID_CQ_DEPTH;
4581 		return -EINVAL;
4582 	}
4583 
4584 	return 0;
4585 }
4586 
dlb2_dir_port_configure_pp(struct dlb2_hw * hw,struct dlb2_hw_domain * domain,struct dlb2_dir_pq_pair * port,bool vdev_req,unsigned int vdev_id)4587 static void dlb2_dir_port_configure_pp(struct dlb2_hw *hw,
4588 				       struct dlb2_hw_domain *domain,
4589 				       struct dlb2_dir_pq_pair *port,
4590 				       bool vdev_req,
4591 				       unsigned int vdev_id)
4592 {
4593 	union dlb2_sys_dir_pp2vas r0 = { {0} };
4594 	union dlb2_sys_dir_pp_v r4 = { {0} };
4595 
4596 	r0.field.vas = domain->id.phys_id;
4597 
4598 	DLB2_CSR_WR(hw, DLB2_SYS_DIR_PP2VAS(port->id.phys_id), r0.val);
4599 
4600 	if (vdev_req) {
4601 		union dlb2_sys_vf_dir_vpp2pp r1 = { {0} };
4602 		union dlb2_sys_dir_pp2vdev r2 = { {0} };
4603 		union dlb2_sys_vf_dir_vpp_v r3 = { {0} };
4604 		unsigned int offs;
4605 		u32 virt_id;
4606 
4607 		/*
4608 		 * DLB uses producer port address bits 17:12 to determine the
4609 		 * producer port ID. In Scalable IOV mode, PP accesses come
4610 		 * through the PF MMIO window for the physical producer port,
4611 		 * so for translation purposes the virtual and physical port
4612 		 * IDs are equal.
4613 		 */
4614 		if (hw->virt_mode == DLB2_VIRT_SRIOV)
4615 			virt_id = port->id.virt_id;
4616 		else
4617 			virt_id = port->id.phys_id;
4618 
4619 		r1.field.pp = port->id.phys_id;
4620 
4621 		offs = vdev_id * DLB2_MAX_NUM_DIR_PORTS + virt_id;
4622 
4623 		DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VPP2PP(offs), r1.val);
4624 
4625 		r2.field.vdev = vdev_id;
4626 
4627 		DLB2_CSR_WR(hw,
4628 			    DLB2_SYS_DIR_PP2VDEV(port->id.phys_id),
4629 			    r2.val);
4630 
4631 		r3.field.vpp_v = 1;
4632 
4633 		DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VPP_V(offs), r3.val);
4634 	}
4635 
4636 	r4.field.pp_v = 1;
4637 
4638 	DLB2_CSR_WR(hw,
4639 		    DLB2_SYS_DIR_PP_V(port->id.phys_id),
4640 		    r4.val);
4641 }
4642 
dlb2_dir_port_configure_cq(struct dlb2_hw * hw,struct dlb2_hw_domain * domain,struct dlb2_dir_pq_pair * port,uintptr_t cq_dma_base,struct dlb2_create_dir_port_args * args,bool vdev_req,unsigned int vdev_id)4643 static int dlb2_dir_port_configure_cq(struct dlb2_hw *hw,
4644 				      struct dlb2_hw_domain *domain,
4645 				      struct dlb2_dir_pq_pair *port,
4646 				      uintptr_t cq_dma_base,
4647 				      struct dlb2_create_dir_port_args *args,
4648 				      bool vdev_req,
4649 				      unsigned int vdev_id)
4650 {
4651 	union dlb2_sys_dir_cq_addr_l r0 = { {0} };
4652 	union dlb2_sys_dir_cq_addr_u r1 = { {0} };
4653 	union dlb2_sys_dir_cq2vf_pf_ro r2 = { {0} };
4654 	union dlb2_chp_dir_cq_tkn_depth_sel r3 = { {0} };
4655 	union dlb2_lsp_cq_dir_tkn_depth_sel_dsi r4 = { {0} };
4656 	union dlb2_sys_dir_cq_fmt r9 = { {0} };
4657 	union dlb2_sys_dir_cq_at r10 = { {0} };
4658 	union dlb2_sys_dir_cq_pasid r11 = { {0} };
4659 	union dlb2_chp_dir_cq2vas r12 = { {0} };
4660 
4661 	/* The CQ address is 64B-aligned, and the DLB only wants bits [63:6] */
4662 	r0.field.addr_l = cq_dma_base >> 6;
4663 
4664 	DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ_ADDR_L(port->id.phys_id), r0.val);
4665 
4666 	r1.field.addr_u = cq_dma_base >> 32;
4667 
4668 	DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ_ADDR_U(port->id.phys_id), r1.val);
4669 
4670 	/*
4671 	 * 'ro' == relaxed ordering. This setting allows DLB2 to write
4672 	 * cache lines out-of-order (but QEs within a cache line are always
4673 	 * updated in-order).
4674 	 */
4675 	r2.field.vf = vdev_id;
4676 	r2.field.is_pf = !vdev_req && (hw->virt_mode != DLB2_VIRT_SIOV);
4677 	r2.field.ro = 1;
4678 
4679 	DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ2VF_PF_RO(port->id.phys_id), r2.val);
4680 
4681 	if (args->cq_depth <= 8) {
4682 		r3.field.token_depth_select = 1;
4683 	} else if (args->cq_depth == 16) {
4684 		r3.field.token_depth_select = 2;
4685 	} else if (args->cq_depth == 32) {
4686 		r3.field.token_depth_select = 3;
4687 	} else if (args->cq_depth == 64) {
4688 		r3.field.token_depth_select = 4;
4689 	} else if (args->cq_depth == 128) {
4690 		r3.field.token_depth_select = 5;
4691 	} else if (args->cq_depth == 256) {
4692 		r3.field.token_depth_select = 6;
4693 	} else if (args->cq_depth == 512) {
4694 		r3.field.token_depth_select = 7;
4695 	} else if (args->cq_depth == 1024) {
4696 		r3.field.token_depth_select = 8;
4697 	} else {
4698 		DLB2_HW_ERR(hw,
4699 			    "[%s():%d] Internal error: invalid CQ depth\n",
4700 			    __func__, __LINE__);
4701 		return -EFAULT;
4702 	}
4703 
4704 	DLB2_CSR_WR(hw,
4705 		    DLB2_CHP_DIR_CQ_TKN_DEPTH_SEL(port->id.phys_id),
4706 		    r3.val);
4707 
4708 	/*
4709 	 * To support CQs with depth less than 8, program the token count
4710 	 * register with a non-zero initial value. Operations such as domain
4711 	 * reset must take this initial value into account when quiescing the
4712 	 * CQ.
4713 	 */
4714 	port->init_tkn_cnt = 0;
4715 
4716 	if (args->cq_depth < 8) {
4717 		union dlb2_lsp_cq_dir_tkn_cnt r13 = { {0} };
4718 
4719 		port->init_tkn_cnt = 8 - args->cq_depth;
4720 
4721 		r13.field.count = port->init_tkn_cnt;
4722 
4723 		DLB2_CSR_WR(hw,
4724 			    DLB2_LSP_CQ_DIR_TKN_CNT(port->id.phys_id),
4725 			    r13.val);
4726 	} else {
4727 		DLB2_CSR_WR(hw,
4728 			    DLB2_LSP_CQ_DIR_TKN_CNT(port->id.phys_id),
4729 			    DLB2_LSP_CQ_DIR_TKN_CNT_RST);
4730 	}
4731 
4732 	r4.field.token_depth_select = r3.field.token_depth_select;
4733 	r4.field.disable_wb_opt = 0;
4734 	r4.field.ignore_depth = 0;
4735 
4736 	DLB2_CSR_WR(hw,
4737 		    DLB2_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI(port->id.phys_id),
4738 		    r4.val);
4739 
4740 	/* Reset the CQ write pointer */
4741 	DLB2_CSR_WR(hw,
4742 		    DLB2_CHP_DIR_CQ_WPTR(port->id.phys_id),
4743 		    DLB2_CHP_DIR_CQ_WPTR_RST);
4744 
4745 	/* Virtualize the PPID */
4746 	r9.field.keep_pf_ppid = 0;
4747 
4748 	DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ_FMT(port->id.phys_id), r9.val);
4749 
4750 	/*
4751 	 * Address translation (AT) settings: 0: untranslated, 2: translated
4752 	 * (see ATS spec regarding Address Type field for more details)
4753 	 */
4754 	r10.field.cq_at = 0;
4755 
4756 	DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ_AT(port->id.phys_id), r10.val);
4757 
4758 	if (vdev_req && hw->virt_mode == DLB2_VIRT_SIOV) {
4759 		r11.field.pasid = hw->pasid[vdev_id];
4760 		r11.field.fmt2 = 1;
4761 	}
4762 
4763 	DLB2_CSR_WR(hw,
4764 		    DLB2_SYS_DIR_CQ_PASID(port->id.phys_id),
4765 		    r11.val);
4766 
4767 	r12.field.cq2vas = domain->id.phys_id;
4768 
4769 	DLB2_CSR_WR(hw, DLB2_CHP_DIR_CQ2VAS(port->id.phys_id), r12.val);
4770 
4771 	return 0;
4772 }
4773 
dlb2_configure_dir_port(struct dlb2_hw * hw,struct dlb2_hw_domain * domain,struct dlb2_dir_pq_pair * port,uintptr_t cq_dma_base,struct dlb2_create_dir_port_args * args,bool vdev_req,unsigned int vdev_id)4774 static int dlb2_configure_dir_port(struct dlb2_hw *hw,
4775 				   struct dlb2_hw_domain *domain,
4776 				   struct dlb2_dir_pq_pair *port,
4777 				   uintptr_t cq_dma_base,
4778 				   struct dlb2_create_dir_port_args *args,
4779 				   bool vdev_req,
4780 				   unsigned int vdev_id)
4781 {
4782 	int ret;
4783 
4784 	ret = dlb2_dir_port_configure_cq(hw,
4785 					 domain,
4786 					 port,
4787 					 cq_dma_base,
4788 					 args,
4789 					 vdev_req,
4790 					 vdev_id);
4791 
4792 	if (ret < 0)
4793 		return ret;
4794 
4795 	dlb2_dir_port_configure_pp(hw,
4796 				   domain,
4797 				   port,
4798 				   vdev_req,
4799 				   vdev_id);
4800 
4801 	dlb2_dir_port_cq_enable(hw, port);
4802 
4803 	port->enabled = true;
4804 
4805 	port->port_configured = true;
4806 
4807 	return 0;
4808 }
4809 
4810 /**
4811  * dlb2_hw_create_dir_port() - Allocate and initialize a DLB directed port
4812  *	and queue. The port/queue pair have the same ID and name.
4813  * @hw:	Contains the current state of the DLB2 hardware.
4814  * @domain_id: Domain ID
4815  * @args: User-provided arguments.
4816  * @cq_dma_base: Base DMA address for consumer queue memory
4817  * @resp: Response to user.
4818  * @vdev_req: Request came from a virtual device.
4819  * @vdev_id: If vdev_req is true, this contains the virtual device's ID.
4820  *
4821  * Return: returns < 0 on error, 0 otherwise. If the driver is unable to
4822  * satisfy a request, resp->status will be set accordingly.
4823  */
dlb2_hw_create_dir_port(struct dlb2_hw * hw,u32 domain_id,struct dlb2_create_dir_port_args * args,uintptr_t cq_dma_base,struct dlb2_cmd_response * resp,bool vdev_req,unsigned int vdev_id)4824 int dlb2_hw_create_dir_port(struct dlb2_hw *hw,
4825 			    u32 domain_id,
4826 			    struct dlb2_create_dir_port_args *args,
4827 			    uintptr_t cq_dma_base,
4828 			    struct dlb2_cmd_response *resp,
4829 			    bool vdev_req,
4830 			    unsigned int vdev_id)
4831 {
4832 	struct dlb2_dir_pq_pair *port;
4833 	struct dlb2_hw_domain *domain;
4834 	int ret;
4835 
4836 	dlb2_log_create_dir_port_args(hw,
4837 				      domain_id,
4838 				      cq_dma_base,
4839 				      args,
4840 				      vdev_req,
4841 				      vdev_id);
4842 
4843 	/*
4844 	 * Verify that hardware resources are available before attempting to
4845 	 * satisfy the request. This simplifies the error unwinding code.
4846 	 */
4847 	ret = dlb2_verify_create_dir_port_args(hw,
4848 					       domain_id,
4849 					       cq_dma_base,
4850 					       args,
4851 					       resp,
4852 					       vdev_req,
4853 					       vdev_id);
4854 	if (ret)
4855 		return ret;
4856 
4857 	domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
4858 
4859 	if (args->queue_id != -1)
4860 		port = dlb2_get_domain_used_dir_pq(args->queue_id,
4861 						   vdev_req,
4862 						   domain);
4863 	else
4864 		port = DLB2_DOM_LIST_HEAD(domain->avail_dir_pq_pairs,
4865 					  typeof(*port));
4866 	if (port == NULL) {
4867 		DLB2_HW_ERR(hw,
4868 			    "[%s():%d] Internal error: no available dir ports\n",
4869 			    __func__, __LINE__);
4870 		return -EFAULT;
4871 	}
4872 
4873 	ret = dlb2_configure_dir_port(hw,
4874 				      domain,
4875 				      port,
4876 				      cq_dma_base,
4877 				      args,
4878 				      vdev_req,
4879 				      vdev_id);
4880 	if (ret < 0)
4881 		return ret;
4882 
4883 	/*
4884 	 * Configuration succeeded, so move the resource from the 'avail' to
4885 	 * the 'used' list (if it's not already there).
4886 	 */
4887 	if (args->queue_id == -1) {
4888 		dlb2_list_del(&domain->avail_dir_pq_pairs, &port->domain_list);
4889 
4890 		dlb2_list_add(&domain->used_dir_pq_pairs, &port->domain_list);
4891 	}
4892 
4893 	resp->status = 0;
4894 	resp->id = (vdev_req) ? port->id.virt_id : port->id.phys_id;
4895 
4896 	return 0;
4897 }
4898 
dlb2_configure_dir_queue(struct dlb2_hw * hw,struct dlb2_hw_domain * domain,struct dlb2_dir_pq_pair * queue,struct dlb2_create_dir_queue_args * args,bool vdev_req,unsigned int vdev_id)4899 static void dlb2_configure_dir_queue(struct dlb2_hw *hw,
4900 				     struct dlb2_hw_domain *domain,
4901 				     struct dlb2_dir_pq_pair *queue,
4902 				     struct dlb2_create_dir_queue_args *args,
4903 				     bool vdev_req,
4904 				     unsigned int vdev_id)
4905 {
4906 	union dlb2_sys_dir_vasqid_v r0 = { {0} };
4907 	union dlb2_sys_dir_qid_its r1 = { {0} };
4908 	union dlb2_lsp_qid_dir_depth_thrsh r2 = { {0} };
4909 	union dlb2_sys_dir_qid_v r5 = { {0} };
4910 
4911 	unsigned int offs;
4912 
4913 	/* QID write permissions are turned on when the domain is started */
4914 	r0.field.vasqid_v = 0;
4915 
4916 	offs = domain->id.phys_id * DLB2_MAX_NUM_DIR_QUEUES +
4917 		queue->id.phys_id;
4918 
4919 	DLB2_CSR_WR(hw, DLB2_SYS_DIR_VASQID_V(offs), r0.val);
4920 
4921 	/* Don't timestamp QEs that pass through this queue */
4922 	r1.field.qid_its = 0;
4923 
4924 	DLB2_CSR_WR(hw,
4925 		    DLB2_SYS_DIR_QID_ITS(queue->id.phys_id),
4926 		    r1.val);
4927 
4928 	r2.field.thresh = args->depth_threshold;
4929 
4930 	DLB2_CSR_WR(hw,
4931 		    DLB2_LSP_QID_DIR_DEPTH_THRSH(queue->id.phys_id),
4932 		    r2.val);
4933 
4934 	if (vdev_req) {
4935 		union dlb2_sys_vf_dir_vqid_v r3 = { {0} };
4936 		union dlb2_sys_vf_dir_vqid2qid r4 = { {0} };
4937 
4938 		offs = vdev_id * DLB2_MAX_NUM_DIR_QUEUES + queue->id.virt_id;
4939 
4940 		r3.field.vqid_v = 1;
4941 
4942 		DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VQID_V(offs), r3.val);
4943 
4944 		r4.field.qid = queue->id.phys_id;
4945 
4946 		DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VQID2QID(offs), r4.val);
4947 	}
4948 
4949 	r5.field.qid_v = 1;
4950 
4951 	DLB2_CSR_WR(hw, DLB2_SYS_DIR_QID_V(queue->id.phys_id), r5.val);
4952 
4953 	queue->queue_configured = true;
4954 }
4955 
4956 static void
dlb2_log_create_dir_queue_args(struct dlb2_hw * hw,u32 domain_id,struct dlb2_create_dir_queue_args * args,bool vdev_req,unsigned int vdev_id)4957 dlb2_log_create_dir_queue_args(struct dlb2_hw *hw,
4958 			       u32 domain_id,
4959 			       struct dlb2_create_dir_queue_args *args,
4960 			       bool vdev_req,
4961 			       unsigned int vdev_id)
4962 {
4963 	DLB2_HW_DBG(hw, "DLB2 create directed queue arguments:\n");
4964 	if (vdev_req)
4965 		DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
4966 	DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id);
4967 	DLB2_HW_DBG(hw, "\tPort ID:   %d\n", args->port_id);
4968 }
4969 
4970 static int
dlb2_verify_create_dir_queue_args(struct dlb2_hw * hw,u32 domain_id,struct dlb2_create_dir_queue_args * args,struct dlb2_cmd_response * resp,bool vdev_req,unsigned int vdev_id)4971 dlb2_verify_create_dir_queue_args(struct dlb2_hw *hw,
4972 				  u32 domain_id,
4973 				  struct dlb2_create_dir_queue_args *args,
4974 				  struct dlb2_cmd_response *resp,
4975 				  bool vdev_req,
4976 				  unsigned int vdev_id)
4977 {
4978 	struct dlb2_hw_domain *domain;
4979 
4980 	domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
4981 
4982 	if (domain == NULL) {
4983 		resp->status = DLB2_ST_INVALID_DOMAIN_ID;
4984 		return -EINVAL;
4985 	}
4986 
4987 	if (!domain->configured) {
4988 		resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
4989 		return -EINVAL;
4990 	}
4991 
4992 	if (domain->started) {
4993 		resp->status = DLB2_ST_DOMAIN_STARTED;
4994 		return -EINVAL;
4995 	}
4996 
4997 	/*
4998 	 * If the user claims the port is already configured, validate the port
4999 	 * ID, its domain, and whether the port is configured.
5000 	 */
5001 	if (args->port_id != -1) {
5002 		struct dlb2_dir_pq_pair *port;
5003 
5004 		port = dlb2_get_domain_used_dir_pq(args->port_id,
5005 						   vdev_req,
5006 						   domain);
5007 
5008 		if (port == NULL || port->domain_id.phys_id !=
5009 				domain->id.phys_id || !port->port_configured) {
5010 			resp->status = DLB2_ST_INVALID_PORT_ID;
5011 			return -EINVAL;
5012 		}
5013 	}
5014 
5015 	/*
5016 	 * If the queue's port is not configured, validate that a free
5017 	 * port-queue pair is available.
5018 	 */
5019 	if (args->port_id == -1 &&
5020 	    dlb2_list_empty(&domain->avail_dir_pq_pairs)) {
5021 		resp->status = DLB2_ST_DIR_QUEUES_UNAVAILABLE;
5022 		return -EINVAL;
5023 	}
5024 
5025 	return 0;
5026 }
5027 
5028 /**
5029  * dlb2_hw_create_dir_queue() - Allocate and initialize a DLB DIR queue.
5030  * @hw:	Contains the current state of the DLB2 hardware.
5031  * @domain_id: Domain ID
5032  * @args: User-provided arguments.
5033  * @resp: Response to user.
5034  * @vdev_req: Request came from a virtual device.
5035  * @vdev_id: If vdev_req is true, this contains the virtual device's ID.
5036  *
5037  * Return: returns < 0 on error, 0 otherwise. If the driver is unable to
5038  * satisfy a request, resp->status will be set accordingly.
5039  */
dlb2_hw_create_dir_queue(struct dlb2_hw * hw,u32 domain_id,struct dlb2_create_dir_queue_args * args,struct dlb2_cmd_response * resp,bool vdev_req,unsigned int vdev_id)5040 int dlb2_hw_create_dir_queue(struct dlb2_hw *hw,
5041 			     u32 domain_id,
5042 			     struct dlb2_create_dir_queue_args *args,
5043 			     struct dlb2_cmd_response *resp,
5044 			     bool vdev_req,
5045 			     unsigned int vdev_id)
5046 {
5047 	struct dlb2_dir_pq_pair *queue;
5048 	struct dlb2_hw_domain *domain;
5049 	int ret;
5050 
5051 	dlb2_log_create_dir_queue_args(hw, domain_id, args, vdev_req, vdev_id);
5052 
5053 	/*
5054 	 * Verify that hardware resources are available before attempting to
5055 	 * satisfy the request. This simplifies the error unwinding code.
5056 	 */
5057 	ret = dlb2_verify_create_dir_queue_args(hw,
5058 						domain_id,
5059 						args,
5060 						resp,
5061 						vdev_req,
5062 						vdev_id);
5063 	if (ret)
5064 		return ret;
5065 
5066 	domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
5067 	if (domain == NULL) {
5068 		DLB2_HW_ERR(hw,
5069 			    "[%s():%d] Internal error: domain not found\n",
5070 			    __func__, __LINE__);
5071 		return -EFAULT;
5072 	}
5073 
5074 	if (args->port_id != -1)
5075 		queue = dlb2_get_domain_used_dir_pq(args->port_id,
5076 						    vdev_req,
5077 						    domain);
5078 	else
5079 		queue = DLB2_DOM_LIST_HEAD(domain->avail_dir_pq_pairs,
5080 					   typeof(*queue));
5081 	if (queue == NULL) {
5082 		DLB2_HW_ERR(hw,
5083 			    "[%s():%d] Internal error: no available dir queues\n",
5084 			    __func__, __LINE__);
5085 		return -EFAULT;
5086 	}
5087 
5088 	dlb2_configure_dir_queue(hw, domain, queue, args, vdev_req, vdev_id);
5089 
5090 	/*
5091 	 * Configuration succeeded, so move the resource from the 'avail' to
5092 	 * the 'used' list (if it's not already there).
5093 	 */
5094 	if (args->port_id == -1) {
5095 		dlb2_list_del(&domain->avail_dir_pq_pairs,
5096 			      &queue->domain_list);
5097 
5098 		dlb2_list_add(&domain->used_dir_pq_pairs,
5099 			      &queue->domain_list);
5100 	}
5101 
5102 	resp->status = 0;
5103 
5104 	resp->id = (vdev_req) ? queue->id.virt_id : queue->id.phys_id;
5105 
5106 	return 0;
5107 }
5108 
5109 static bool
dlb2_port_find_slot_with_pending_map_queue(struct dlb2_ldb_port * port,struct dlb2_ldb_queue * queue,int * slot)5110 dlb2_port_find_slot_with_pending_map_queue(struct dlb2_ldb_port *port,
5111 					   struct dlb2_ldb_queue *queue,
5112 					   int *slot)
5113 {
5114 	int i;
5115 
5116 	for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
5117 		struct dlb2_ldb_port_qid_map *map = &port->qid_map[i];
5118 
5119 		if (map->state == DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP &&
5120 		    map->pending_qid == queue->id.phys_id)
5121 			break;
5122 	}
5123 
5124 	*slot = i;
5125 
5126 	return (i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ);
5127 }
5128 
dlb2_ldb_port_change_qid_priority(struct dlb2_hw * hw,struct dlb2_ldb_port * port,int slot,struct dlb2_map_qid_args * args)5129 static void dlb2_ldb_port_change_qid_priority(struct dlb2_hw *hw,
5130 					      struct dlb2_ldb_port *port,
5131 					      int slot,
5132 					      struct dlb2_map_qid_args *args)
5133 {
5134 	union dlb2_lsp_cq2priov r0;
5135 
5136 	/* Read-modify-write the priority and valid bit register */
5137 	r0.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ2PRIOV(port->id.phys_id));
5138 
5139 	r0.field.v |= 1 << slot;
5140 	r0.field.prio |= (args->priority & 0x7) << slot * 3;
5141 
5142 	DLB2_CSR_WR(hw, DLB2_LSP_CQ2PRIOV(port->id.phys_id), r0.val);
5143 
5144 	dlb2_flush_csr(hw);
5145 
5146 	port->qid_map[slot].priority = args->priority;
5147 }
5148 
dlb2_verify_map_qid_slot_available(struct dlb2_ldb_port * port,struct dlb2_ldb_queue * queue,struct dlb2_cmd_response * resp)5149 static int dlb2_verify_map_qid_slot_available(struct dlb2_ldb_port *port,
5150 					      struct dlb2_ldb_queue *queue,
5151 					      struct dlb2_cmd_response *resp)
5152 {
5153 	enum dlb2_qid_map_state state;
5154 	int i;
5155 
5156 	/* Unused slot available? */
5157 	if (port->num_mappings < DLB2_MAX_NUM_QIDS_PER_LDB_CQ)
5158 		return 0;
5159 
5160 	/*
5161 	 * If the queue is already mapped (from the application's perspective),
5162 	 * this is simply a priority update.
5163 	 */
5164 	state = DLB2_QUEUE_MAPPED;
5165 	if (dlb2_port_find_slot_queue(port, state, queue, &i))
5166 		return 0;
5167 
5168 	state = DLB2_QUEUE_MAP_IN_PROG;
5169 	if (dlb2_port_find_slot_queue(port, state, queue, &i))
5170 		return 0;
5171 
5172 	if (dlb2_port_find_slot_with_pending_map_queue(port, queue, &i))
5173 		return 0;
5174 
5175 	/*
5176 	 * If the slot contains an unmap in progress, it's considered
5177 	 * available.
5178 	 */
5179 	state = DLB2_QUEUE_UNMAP_IN_PROG;
5180 	if (dlb2_port_find_slot(port, state, &i))
5181 		return 0;
5182 
5183 	state = DLB2_QUEUE_UNMAPPED;
5184 	if (dlb2_port_find_slot(port, state, &i))
5185 		return 0;
5186 
5187 	resp->status = DLB2_ST_NO_QID_SLOTS_AVAILABLE;
5188 	return -EINVAL;
5189 }
5190 
5191 static struct dlb2_ldb_queue *
dlb2_get_domain_ldb_queue(u32 id,bool vdev_req,struct dlb2_hw_domain * domain)5192 dlb2_get_domain_ldb_queue(u32 id,
5193 			  bool vdev_req,
5194 			  struct dlb2_hw_domain *domain)
5195 {
5196 	struct dlb2_list_entry *iter;
5197 	struct dlb2_ldb_queue *queue;
5198 	RTE_SET_USED(iter);
5199 
5200 	if (id >= DLB2_MAX_NUM_LDB_QUEUES)
5201 		return NULL;
5202 
5203 	DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter)
5204 		if ((!vdev_req && queue->id.phys_id == id) ||
5205 		    (vdev_req && queue->id.virt_id == id))
5206 			return queue;
5207 
5208 	return NULL;
5209 }
5210 
5211 static struct dlb2_ldb_port *
dlb2_get_domain_used_ldb_port(u32 id,bool vdev_req,struct dlb2_hw_domain * domain)5212 dlb2_get_domain_used_ldb_port(u32 id,
5213 			      bool vdev_req,
5214 			      struct dlb2_hw_domain *domain)
5215 {
5216 	struct dlb2_list_entry *iter;
5217 	struct dlb2_ldb_port *port;
5218 	int i;
5219 	RTE_SET_USED(iter);
5220 
5221 	if (id >= DLB2_MAX_NUM_LDB_PORTS)
5222 		return NULL;
5223 
5224 	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
5225 		DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)
5226 			if ((!vdev_req && port->id.phys_id == id) ||
5227 			    (vdev_req && port->id.virt_id == id))
5228 				return port;
5229 
5230 		DLB2_DOM_LIST_FOR(domain->avail_ldb_ports[i], port, iter)
5231 			if ((!vdev_req && port->id.phys_id == id) ||
5232 			    (vdev_req && port->id.virt_id == id))
5233 				return port;
5234 	}
5235 
5236 	return NULL;
5237 }
5238 
dlb2_verify_map_qid_args(struct dlb2_hw * hw,u32 domain_id,struct dlb2_map_qid_args * args,struct dlb2_cmd_response * resp,bool vdev_req,unsigned int vdev_id)5239 static int dlb2_verify_map_qid_args(struct dlb2_hw *hw,
5240 				    u32 domain_id,
5241 				    struct dlb2_map_qid_args *args,
5242 				    struct dlb2_cmd_response *resp,
5243 				    bool vdev_req,
5244 				    unsigned int vdev_id)
5245 {
5246 	struct dlb2_hw_domain *domain;
5247 	struct dlb2_ldb_port *port;
5248 	struct dlb2_ldb_queue *queue;
5249 	int id;
5250 
5251 	domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
5252 
5253 	if (domain == NULL) {
5254 		resp->status = DLB2_ST_INVALID_DOMAIN_ID;
5255 		return -EINVAL;
5256 	}
5257 
5258 	if (!domain->configured) {
5259 		resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
5260 		return -EINVAL;
5261 	}
5262 
5263 	id = args->port_id;
5264 
5265 	port = dlb2_get_domain_used_ldb_port(id, vdev_req, domain);
5266 
5267 	if (port == NULL || !port->configured) {
5268 		resp->status = DLB2_ST_INVALID_PORT_ID;
5269 		return -EINVAL;
5270 	}
5271 
5272 	if (args->priority >= DLB2_QID_PRIORITIES) {
5273 		resp->status = DLB2_ST_INVALID_PRIORITY;
5274 		return -EINVAL;
5275 	}
5276 
5277 	queue = dlb2_get_domain_ldb_queue(args->qid, vdev_req, domain);
5278 
5279 	if (queue == NULL || !queue->configured) {
5280 		resp->status = DLB2_ST_INVALID_QID;
5281 		return -EINVAL;
5282 	}
5283 
5284 	if (queue->domain_id.phys_id != domain->id.phys_id) {
5285 		resp->status = DLB2_ST_INVALID_QID;
5286 		return -EINVAL;
5287 	}
5288 
5289 	if (port->domain_id.phys_id != domain->id.phys_id) {
5290 		resp->status = DLB2_ST_INVALID_PORT_ID;
5291 		return -EINVAL;
5292 	}
5293 
5294 	return 0;
5295 }
5296 
dlb2_log_map_qid(struct dlb2_hw * hw,u32 domain_id,struct dlb2_map_qid_args * args,bool vdev_req,unsigned int vdev_id)5297 static void dlb2_log_map_qid(struct dlb2_hw *hw,
5298 			     u32 domain_id,
5299 			     struct dlb2_map_qid_args *args,
5300 			     bool vdev_req,
5301 			     unsigned int vdev_id)
5302 {
5303 	DLB2_HW_DBG(hw, "DLB2 map QID arguments:\n");
5304 	if (vdev_req)
5305 		DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
5306 	DLB2_HW_DBG(hw, "\tDomain ID: %d\n",
5307 		    domain_id);
5308 	DLB2_HW_DBG(hw, "\tPort ID:   %d\n",
5309 		    args->port_id);
5310 	DLB2_HW_DBG(hw, "\tQueue ID:  %d\n",
5311 		    args->qid);
5312 	DLB2_HW_DBG(hw, "\tPriority:  %d\n",
5313 		    args->priority);
5314 }
5315 
dlb2_hw_map_qid(struct dlb2_hw * hw,u32 domain_id,struct dlb2_map_qid_args * args,struct dlb2_cmd_response * resp,bool vdev_req,unsigned int vdev_id)5316 int dlb2_hw_map_qid(struct dlb2_hw *hw,
5317 		    u32 domain_id,
5318 		    struct dlb2_map_qid_args *args,
5319 		    struct dlb2_cmd_response *resp,
5320 		    bool vdev_req,
5321 		    unsigned int vdev_id)
5322 {
5323 	struct dlb2_hw_domain *domain;
5324 	struct dlb2_ldb_queue *queue;
5325 	enum dlb2_qid_map_state st;
5326 	struct dlb2_ldb_port *port;
5327 	int ret, i, id;
5328 	u8 prio;
5329 
5330 	dlb2_log_map_qid(hw, domain_id, args, vdev_req, vdev_id);
5331 
5332 	/*
5333 	 * Verify that hardware resources are available before attempting to
5334 	 * satisfy the request. This simplifies the error unwinding code.
5335 	 */
5336 	ret = dlb2_verify_map_qid_args(hw,
5337 				       domain_id,
5338 				       args,
5339 				       resp,
5340 				       vdev_req,
5341 				       vdev_id);
5342 	if (ret)
5343 		return ret;
5344 
5345 	prio = args->priority;
5346 
5347 	domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
5348 	if (domain == NULL) {
5349 		DLB2_HW_ERR(hw,
5350 			    "[%s():%d] Internal error: domain not found\n",
5351 			    __func__, __LINE__);
5352 		return -EFAULT;
5353 	}
5354 
5355 	id = args->port_id;
5356 
5357 	port = dlb2_get_domain_used_ldb_port(id, vdev_req, domain);
5358 	if (port == NULL) {
5359 		DLB2_HW_ERR(hw,
5360 			    "[%s():%d] Internal error: port not found\n",
5361 			    __func__, __LINE__);
5362 		return -EFAULT;
5363 	}
5364 
5365 	queue = dlb2_get_domain_ldb_queue(args->qid, vdev_req, domain);
5366 	if (queue == NULL) {
5367 		DLB2_HW_ERR(hw,
5368 			    "[%s():%d] Internal error: queue not found\n",
5369 			    __func__, __LINE__);
5370 		return -EFAULT;
5371 	}
5372 
5373 	/*
5374 	 * If there are any outstanding detach operations for this port,
5375 	 * attempt to complete them. This may be necessary to free up a QID
5376 	 * slot for this requested mapping.
5377 	 */
5378 	if (port->num_pending_removals)
5379 		dlb2_domain_finish_unmap_port(hw, domain, port);
5380 
5381 	ret = dlb2_verify_map_qid_slot_available(port, queue, resp);
5382 	if (ret)
5383 		return ret;
5384 
5385 	/* Hardware requires disabling the CQ before mapping QIDs. */
5386 	if (port->enabled)
5387 		dlb2_ldb_port_cq_disable(hw, port);
5388 
5389 	/*
5390 	 * If this is only a priority change, don't perform the full QID->CQ
5391 	 * mapping procedure
5392 	 */
5393 	st = DLB2_QUEUE_MAPPED;
5394 	if (dlb2_port_find_slot_queue(port, st, queue, &i)) {
5395 		if (i >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
5396 			DLB2_HW_ERR(hw,
5397 				    "[%s():%d] Internal error: port slot tracking failed\n",
5398 				    __func__, __LINE__);
5399 			return -EFAULT;
5400 		}
5401 
5402 		if (prio != port->qid_map[i].priority) {
5403 			dlb2_ldb_port_change_qid_priority(hw, port, i, args);
5404 			DLB2_HW_DBG(hw, "DLB2 map: priority change\n");
5405 		}
5406 
5407 		st = DLB2_QUEUE_MAPPED;
5408 		ret = dlb2_port_slot_state_transition(hw, port, queue, i, st);
5409 		if (ret)
5410 			return ret;
5411 
5412 		goto map_qid_done;
5413 	}
5414 
5415 	st = DLB2_QUEUE_UNMAP_IN_PROG;
5416 	if (dlb2_port_find_slot_queue(port, st, queue, &i)) {
5417 		if (i >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
5418 			DLB2_HW_ERR(hw,
5419 				    "[%s():%d] Internal error: port slot tracking failed\n",
5420 				    __func__, __LINE__);
5421 			return -EFAULT;
5422 		}
5423 
5424 		if (prio != port->qid_map[i].priority) {
5425 			dlb2_ldb_port_change_qid_priority(hw, port, i, args);
5426 			DLB2_HW_DBG(hw, "DLB2 map: priority change\n");
5427 		}
5428 
5429 		st = DLB2_QUEUE_MAPPED;
5430 		ret = dlb2_port_slot_state_transition(hw, port, queue, i, st);
5431 		if (ret)
5432 			return ret;
5433 
5434 		goto map_qid_done;
5435 	}
5436 
5437 	/*
5438 	 * If this is a priority change on an in-progress mapping, don't
5439 	 * perform the full QID->CQ mapping procedure.
5440 	 */
5441 	st = DLB2_QUEUE_MAP_IN_PROG;
5442 	if (dlb2_port_find_slot_queue(port, st, queue, &i)) {
5443 		if (i >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
5444 			DLB2_HW_ERR(hw,
5445 				    "[%s():%d] Internal error: port slot tracking failed\n",
5446 				    __func__, __LINE__);
5447 			return -EFAULT;
5448 		}
5449 
5450 		port->qid_map[i].priority = prio;
5451 
5452 		DLB2_HW_DBG(hw, "DLB2 map: priority change only\n");
5453 
5454 		goto map_qid_done;
5455 	}
5456 
5457 	/*
5458 	 * If this is a priority change on a pending mapping, update the
5459 	 * pending priority
5460 	 */
5461 	if (dlb2_port_find_slot_with_pending_map_queue(port, queue, &i)) {
5462 		if (i >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
5463 			DLB2_HW_ERR(hw,
5464 				    "[%s():%d] Internal error: port slot tracking failed\n",
5465 				    __func__, __LINE__);
5466 			return -EFAULT;
5467 		}
5468 
5469 		port->qid_map[i].pending_priority = prio;
5470 
5471 		DLB2_HW_DBG(hw, "DLB2 map: priority change only\n");
5472 
5473 		goto map_qid_done;
5474 	}
5475 
5476 	/*
5477 	 * If all the CQ's slots are in use, then there's an unmap in progress
5478 	 * (guaranteed by dlb2_verify_map_qid_slot_available()), so add this
5479 	 * mapping to pending_map and return. When the removal is completed for
5480 	 * the slot's current occupant, this mapping will be performed.
5481 	 */
5482 	if (!dlb2_port_find_slot(port, DLB2_QUEUE_UNMAPPED, &i)) {
5483 		if (dlb2_port_find_slot(port, DLB2_QUEUE_UNMAP_IN_PROG, &i)) {
5484 			enum dlb2_qid_map_state st;
5485 
5486 			if (i >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
5487 				DLB2_HW_ERR(hw,
5488 					    "[%s():%d] Internal error: port slot tracking failed\n",
5489 					    __func__, __LINE__);
5490 				return -EFAULT;
5491 			}
5492 
5493 			port->qid_map[i].pending_qid = queue->id.phys_id;
5494 			port->qid_map[i].pending_priority = prio;
5495 
5496 			st = DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP;
5497 
5498 			ret = dlb2_port_slot_state_transition(hw, port, queue,
5499 							      i, st);
5500 			if (ret)
5501 				return ret;
5502 
5503 			DLB2_HW_DBG(hw, "DLB2 map: map pending removal\n");
5504 
5505 			goto map_qid_done;
5506 		}
5507 	}
5508 
5509 	/*
5510 	 * If the domain has started, a special "dynamic" CQ->queue mapping
5511 	 * procedure is required in order to safely update the CQ<->QID tables.
5512 	 * The "static" procedure cannot be used when traffic is flowing,
5513 	 * because the CQ<->QID tables cannot be updated atomically and the
5514 	 * scheduler won't see the new mapping unless the queue's if_status
5515 	 * changes, which isn't guaranteed.
5516 	 */
5517 	ret = dlb2_ldb_port_map_qid(hw, domain, port, queue, prio);
5518 
5519 	/* If ret is less than zero, it's due to an internal error */
5520 	if (ret < 0)
5521 		return ret;
5522 
5523 map_qid_done:
5524 	if (port->enabled)
5525 		dlb2_ldb_port_cq_enable(hw, port);
5526 
5527 	resp->status = 0;
5528 
5529 	return 0;
5530 }
5531 
dlb2_log_unmap_qid(struct dlb2_hw * hw,u32 domain_id,struct dlb2_unmap_qid_args * args,bool vdev_req,unsigned int vdev_id)5532 static void dlb2_log_unmap_qid(struct dlb2_hw *hw,
5533 			       u32 domain_id,
5534 			       struct dlb2_unmap_qid_args *args,
5535 			       bool vdev_req,
5536 			       unsigned int vdev_id)
5537 {
5538 	DLB2_HW_DBG(hw, "DLB2 unmap QID arguments:\n");
5539 	if (vdev_req)
5540 		DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
5541 	DLB2_HW_DBG(hw, "\tDomain ID: %d\n",
5542 		    domain_id);
5543 	DLB2_HW_DBG(hw, "\tPort ID:   %d\n",
5544 		    args->port_id);
5545 	DLB2_HW_DBG(hw, "\tQueue ID:  %d\n",
5546 		    args->qid);
5547 	if (args->qid < DLB2_MAX_NUM_LDB_QUEUES)
5548 		DLB2_HW_DBG(hw, "\tQueue's num mappings:  %d\n",
5549 			    hw->rsrcs.ldb_queues[args->qid].num_mappings);
5550 }
5551 
dlb2_verify_unmap_qid_args(struct dlb2_hw * hw,u32 domain_id,struct dlb2_unmap_qid_args * args,struct dlb2_cmd_response * resp,bool vdev_req,unsigned int vdev_id)5552 static int dlb2_verify_unmap_qid_args(struct dlb2_hw *hw,
5553 				      u32 domain_id,
5554 				      struct dlb2_unmap_qid_args *args,
5555 				      struct dlb2_cmd_response *resp,
5556 				      bool vdev_req,
5557 				      unsigned int vdev_id)
5558 {
5559 	enum dlb2_qid_map_state state;
5560 	struct dlb2_hw_domain *domain;
5561 	struct dlb2_ldb_queue *queue;
5562 	struct dlb2_ldb_port *port;
5563 	int slot;
5564 	int id;
5565 
5566 	domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
5567 
5568 	if (domain == NULL) {
5569 		resp->status = DLB2_ST_INVALID_DOMAIN_ID;
5570 		return -EINVAL;
5571 	}
5572 
5573 	if (!domain->configured) {
5574 		resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
5575 		return -EINVAL;
5576 	}
5577 
5578 	id = args->port_id;
5579 
5580 	port = dlb2_get_domain_used_ldb_port(id, vdev_req, domain);
5581 
5582 	if (port == NULL || !port->configured) {
5583 		resp->status = DLB2_ST_INVALID_PORT_ID;
5584 		return -EINVAL;
5585 	}
5586 
5587 	if (port->domain_id.phys_id != domain->id.phys_id) {
5588 		resp->status = DLB2_ST_INVALID_PORT_ID;
5589 		return -EINVAL;
5590 	}
5591 
5592 	queue = dlb2_get_domain_ldb_queue(args->qid, vdev_req, domain);
5593 
5594 	if (queue == NULL || !queue->configured) {
5595 		DLB2_HW_ERR(hw, "[%s()] Can't unmap unconfigured queue %d\n",
5596 			    __func__, args->qid);
5597 		resp->status = DLB2_ST_INVALID_QID;
5598 		return -EINVAL;
5599 	}
5600 
5601 	/*
5602 	 * Verify that the port has the queue mapped. From the application's
5603 	 * perspective a queue is mapped if it is actually mapped, the map is
5604 	 * in progress, or the map is blocked pending an unmap.
5605 	 */
5606 	state = DLB2_QUEUE_MAPPED;
5607 	if (dlb2_port_find_slot_queue(port, state, queue, &slot))
5608 		return 0;
5609 
5610 	state = DLB2_QUEUE_MAP_IN_PROG;
5611 	if (dlb2_port_find_slot_queue(port, state, queue, &slot))
5612 		return 0;
5613 
5614 	if (dlb2_port_find_slot_with_pending_map_queue(port, queue, &slot))
5615 		return 0;
5616 
5617 	resp->status = DLB2_ST_INVALID_QID;
5618 	return -EINVAL;
5619 }
5620 
dlb2_hw_unmap_qid(struct dlb2_hw * hw,u32 domain_id,struct dlb2_unmap_qid_args * args,struct dlb2_cmd_response * resp,bool vdev_req,unsigned int vdev_id)5621 int dlb2_hw_unmap_qid(struct dlb2_hw *hw,
5622 		      u32 domain_id,
5623 		      struct dlb2_unmap_qid_args *args,
5624 		      struct dlb2_cmd_response *resp,
5625 		      bool vdev_req,
5626 		      unsigned int vdev_id)
5627 {
5628 	struct dlb2_hw_domain *domain;
5629 	struct dlb2_ldb_queue *queue;
5630 	enum dlb2_qid_map_state st;
5631 	struct dlb2_ldb_port *port;
5632 	bool unmap_complete;
5633 	int i, ret, id;
5634 
5635 	dlb2_log_unmap_qid(hw, domain_id, args, vdev_req, vdev_id);
5636 
5637 	/*
5638 	 * Verify that hardware resources are available before attempting to
5639 	 * satisfy the request. This simplifies the error unwinding code.
5640 	 */
5641 	ret = dlb2_verify_unmap_qid_args(hw,
5642 					 domain_id,
5643 					 args,
5644 					 resp,
5645 					 vdev_req,
5646 					 vdev_id);
5647 	if (ret)
5648 		return ret;
5649 
5650 	domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
5651 	if (domain == NULL) {
5652 		DLB2_HW_ERR(hw,
5653 			    "[%s():%d] Internal error: domain not found\n",
5654 			    __func__, __LINE__);
5655 		return -EFAULT;
5656 	}
5657 
5658 	id = args->port_id;
5659 
5660 	port = dlb2_get_domain_used_ldb_port(id, vdev_req, domain);
5661 	if (port == NULL) {
5662 		DLB2_HW_ERR(hw,
5663 			    "[%s():%d] Internal error: port not found\n",
5664 			    __func__, __LINE__);
5665 		return -EFAULT;
5666 	}
5667 
5668 	queue = dlb2_get_domain_ldb_queue(args->qid, vdev_req, domain);
5669 	if (queue == NULL) {
5670 		DLB2_HW_ERR(hw,
5671 			    "[%s():%d] Internal error: queue not found\n",
5672 			    __func__, __LINE__);
5673 		return -EFAULT;
5674 	}
5675 
5676 	/*
5677 	 * If the queue hasn't been mapped yet, we need to update the slot's
5678 	 * state and re-enable the queue's inflights.
5679 	 */
5680 	st = DLB2_QUEUE_MAP_IN_PROG;
5681 	if (dlb2_port_find_slot_queue(port, st, queue, &i)) {
5682 		if (i >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
5683 			DLB2_HW_ERR(hw,
5684 				    "[%s():%d] Internal error: port slot tracking failed\n",
5685 				    __func__, __LINE__);
5686 			return -EFAULT;
5687 		}
5688 
5689 		/*
5690 		 * Since the in-progress map was aborted, re-enable the QID's
5691 		 * inflights.
5692 		 */
5693 		if (queue->num_pending_additions == 0)
5694 			dlb2_ldb_queue_set_inflight_limit(hw, queue);
5695 
5696 		st = DLB2_QUEUE_UNMAPPED;
5697 		ret = dlb2_port_slot_state_transition(hw, port, queue, i, st);
5698 		if (ret)
5699 			return ret;
5700 
5701 		goto unmap_qid_done;
5702 	}
5703 
5704 	/*
5705 	 * If the queue mapping is on hold pending an unmap, we simply need to
5706 	 * update the slot's state.
5707 	 */
5708 	if (dlb2_port_find_slot_with_pending_map_queue(port, queue, &i)) {
5709 		if (i >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
5710 			DLB2_HW_ERR(hw,
5711 				    "[%s():%d] Internal error: port slot tracking failed\n",
5712 				    __func__, __LINE__);
5713 			return -EFAULT;
5714 		}
5715 
5716 		st = DLB2_QUEUE_UNMAP_IN_PROG;
5717 		ret = dlb2_port_slot_state_transition(hw, port, queue, i, st);
5718 		if (ret)
5719 			return ret;
5720 
5721 		goto unmap_qid_done;
5722 	}
5723 
5724 	st = DLB2_QUEUE_MAPPED;
5725 	if (!dlb2_port_find_slot_queue(port, st, queue, &i)) {
5726 		DLB2_HW_ERR(hw,
5727 			    "[%s()] Internal error: no available CQ slots\n",
5728 			    __func__);
5729 		return -EFAULT;
5730 	}
5731 
5732 	if (i >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
5733 		DLB2_HW_ERR(hw,
5734 			    "[%s():%d] Internal error: port slot tracking failed\n",
5735 			    __func__, __LINE__);
5736 		return -EFAULT;
5737 	}
5738 
5739 	/*
5740 	 * QID->CQ mapping removal is an asynchronous procedure. It requires
5741 	 * stopping the DLB2 from scheduling this CQ, draining all inflights
5742 	 * from the CQ, then unmapping the queue from the CQ. This function
5743 	 * simply marks the port as needing the queue unmapped, and (if
5744 	 * necessary) starts the unmapping worker thread.
5745 	 */
5746 	dlb2_ldb_port_cq_disable(hw, port);
5747 
5748 	st = DLB2_QUEUE_UNMAP_IN_PROG;
5749 	ret = dlb2_port_slot_state_transition(hw, port, queue, i, st);
5750 	if (ret)
5751 		return ret;
5752 
5753 	/*
5754 	 * Attempt to finish the unmapping now, in case the port has no
5755 	 * outstanding inflights. If that's not the case, this will fail and
5756 	 * the unmapping will be completed at a later time.
5757 	 */
5758 	unmap_complete = dlb2_domain_finish_unmap_port(hw, domain, port);
5759 
5760 	/*
5761 	 * If the unmapping couldn't complete immediately, launch the worker
5762 	 * thread (if it isn't already launched) to finish it later.
5763 	 */
5764 	if (!unmap_complete && !os_worker_active(hw))
5765 		os_schedule_work(hw);
5766 
5767 unmap_qid_done:
5768 	resp->status = 0;
5769 
5770 	return 0;
5771 }
5772 
5773 static void
dlb2_log_pending_port_unmaps_args(struct dlb2_hw * hw,struct dlb2_pending_port_unmaps_args * args,bool vdev_req,unsigned int vdev_id)5774 dlb2_log_pending_port_unmaps_args(struct dlb2_hw *hw,
5775 				  struct dlb2_pending_port_unmaps_args *args,
5776 				  bool vdev_req,
5777 				  unsigned int vdev_id)
5778 {
5779 	DLB2_HW_DBG(hw, "DLB unmaps in progress arguments:\n");
5780 	if (vdev_req)
5781 		DLB2_HW_DBG(hw, "(Request from VF %d)\n", vdev_id);
5782 	DLB2_HW_DBG(hw, "\tPort ID: %d\n", args->port_id);
5783 }
5784 
dlb2_hw_pending_port_unmaps(struct dlb2_hw * hw,u32 domain_id,struct dlb2_pending_port_unmaps_args * args,struct dlb2_cmd_response * resp,bool vdev_req,unsigned int vdev_id)5785 int dlb2_hw_pending_port_unmaps(struct dlb2_hw *hw,
5786 				u32 domain_id,
5787 				struct dlb2_pending_port_unmaps_args *args,
5788 				struct dlb2_cmd_response *resp,
5789 				bool vdev_req,
5790 				unsigned int vdev_id)
5791 {
5792 	struct dlb2_hw_domain *domain;
5793 	struct dlb2_ldb_port *port;
5794 
5795 	dlb2_log_pending_port_unmaps_args(hw, args, vdev_req, vdev_id);
5796 
5797 	domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
5798 
5799 	if (domain == NULL) {
5800 		resp->status = DLB2_ST_INVALID_DOMAIN_ID;
5801 		return -EINVAL;
5802 	}
5803 
5804 	port = dlb2_get_domain_used_ldb_port(args->port_id, vdev_req, domain);
5805 	if (port == NULL || !port->configured) {
5806 		resp->status = DLB2_ST_INVALID_PORT_ID;
5807 		return -EINVAL;
5808 	}
5809 
5810 	resp->id = port->num_pending_removals;
5811 
5812 	return 0;
5813 }
5814 
dlb2_verify_start_domain_args(struct dlb2_hw * hw,u32 domain_id,struct dlb2_cmd_response * resp,bool vdev_req,unsigned int vdev_id)5815 static int dlb2_verify_start_domain_args(struct dlb2_hw *hw,
5816 					 u32 domain_id,
5817 					 struct dlb2_cmd_response *resp,
5818 					 bool vdev_req,
5819 					 unsigned int vdev_id)
5820 {
5821 	struct dlb2_hw_domain *domain;
5822 
5823 	domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
5824 
5825 	if (domain == NULL) {
5826 		resp->status = DLB2_ST_INVALID_DOMAIN_ID;
5827 		return -EINVAL;
5828 	}
5829 
5830 	if (!domain->configured) {
5831 		resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
5832 		return -EINVAL;
5833 	}
5834 
5835 	if (domain->started) {
5836 		resp->status = DLB2_ST_DOMAIN_STARTED;
5837 		return -EINVAL;
5838 	}
5839 
5840 	return 0;
5841 }
5842 
dlb2_log_start_domain(struct dlb2_hw * hw,u32 domain_id,bool vdev_req,unsigned int vdev_id)5843 static void dlb2_log_start_domain(struct dlb2_hw *hw,
5844 				  u32 domain_id,
5845 				  bool vdev_req,
5846 				  unsigned int vdev_id)
5847 {
5848 	DLB2_HW_DBG(hw, "DLB2 start domain arguments:\n");
5849 	if (vdev_req)
5850 		DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
5851 	DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id);
5852 }
5853 
5854 /**
5855  * dlb2_hw_start_domain() - Lock the domain configuration
5856  * @hw:	Contains the current state of the DLB2 hardware.
5857  * @domain_id: Domain ID
5858  * @arg: User-provided arguments (unused, here for ioctl callback template).
5859  * @resp: Response to user.
5860  * @vdev_req: Request came from a virtual device.
5861  * @vdev_id: If vdev_req is true, this contains the virtual device's ID.
5862  *
5863  * Return: returns < 0 on error, 0 otherwise. If the driver is unable to
5864  * satisfy a request, resp->status will be set accordingly.
5865  */
5866 int
dlb2_hw_start_domain(struct dlb2_hw * hw,u32 domain_id,__attribute ((unused))struct dlb2_start_domain_args * arg,struct dlb2_cmd_response * resp,bool vdev_req,unsigned int vdev_id)5867 dlb2_hw_start_domain(struct dlb2_hw *hw,
5868 		     u32 domain_id,
5869 		     __attribute((unused)) struct dlb2_start_domain_args *arg,
5870 		     struct dlb2_cmd_response *resp,
5871 		     bool vdev_req,
5872 		     unsigned int vdev_id)
5873 {
5874 	struct dlb2_list_entry *iter;
5875 	struct dlb2_dir_pq_pair *dir_queue;
5876 	struct dlb2_ldb_queue *ldb_queue;
5877 	struct dlb2_hw_domain *domain;
5878 	int ret;
5879 	RTE_SET_USED(arg);
5880 	RTE_SET_USED(iter);
5881 
5882 	dlb2_log_start_domain(hw, domain_id, vdev_req, vdev_id);
5883 
5884 	ret = dlb2_verify_start_domain_args(hw,
5885 					    domain_id,
5886 					    resp,
5887 					    vdev_req,
5888 					    vdev_id);
5889 	if (ret)
5890 		return ret;
5891 
5892 	domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
5893 	if (domain == NULL) {
5894 		DLB2_HW_ERR(hw,
5895 			    "[%s():%d] Internal error: domain not found\n",
5896 			    __func__, __LINE__);
5897 		return -EFAULT;
5898 	}
5899 
5900 	/*
5901 	 * Enable load-balanced and directed queue write permissions for the
5902 	 * queues this domain owns. Without this, the DLB2 will drop all
5903 	 * incoming traffic to those queues.
5904 	 */
5905 	DLB2_DOM_LIST_FOR(domain->used_ldb_queues, ldb_queue, iter) {
5906 		union dlb2_sys_ldb_vasqid_v r0 = { {0} };
5907 		unsigned int offs;
5908 
5909 		r0.field.vasqid_v = 1;
5910 
5911 		offs = domain->id.phys_id * DLB2_MAX_NUM_LDB_QUEUES +
5912 			ldb_queue->id.phys_id;
5913 
5914 		DLB2_CSR_WR(hw, DLB2_SYS_LDB_VASQID_V(offs), r0.val);
5915 	}
5916 
5917 	DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, dir_queue, iter) {
5918 		union dlb2_sys_dir_vasqid_v r0 = { {0} };
5919 		unsigned int offs;
5920 
5921 		r0.field.vasqid_v = 1;
5922 
5923 		offs = domain->id.phys_id * DLB2_MAX_NUM_DIR_PORTS +
5924 			dir_queue->id.phys_id;
5925 
5926 		DLB2_CSR_WR(hw, DLB2_SYS_DIR_VASQID_V(offs), r0.val);
5927 	}
5928 
5929 	dlb2_flush_csr(hw);
5930 
5931 	domain->started = true;
5932 
5933 	resp->status = 0;
5934 
5935 	return 0;
5936 }
5937 
dlb2_log_get_dir_queue_depth(struct dlb2_hw * hw,u32 domain_id,u32 queue_id,bool vdev_req,unsigned int vf_id)5938 static void dlb2_log_get_dir_queue_depth(struct dlb2_hw *hw,
5939 					 u32 domain_id,
5940 					 u32 queue_id,
5941 					 bool vdev_req,
5942 					 unsigned int vf_id)
5943 {
5944 	DLB2_HW_DBG(hw, "DLB get directed queue depth:\n");
5945 	if (vdev_req)
5946 		DLB2_HW_DBG(hw, "(Request from VF %d)\n", vf_id);
5947 	DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id);
5948 	DLB2_HW_DBG(hw, "\tQueue ID: %d\n", queue_id);
5949 }
5950 
dlb2_hw_get_dir_queue_depth(struct dlb2_hw * hw,u32 domain_id,struct dlb2_get_dir_queue_depth_args * args,struct dlb2_cmd_response * resp,bool vdev_req,unsigned int vdev_id)5951 int dlb2_hw_get_dir_queue_depth(struct dlb2_hw *hw,
5952 				u32 domain_id,
5953 				struct dlb2_get_dir_queue_depth_args *args,
5954 				struct dlb2_cmd_response *resp,
5955 				bool vdev_req,
5956 				unsigned int vdev_id)
5957 {
5958 	struct dlb2_dir_pq_pair *queue;
5959 	struct dlb2_hw_domain *domain;
5960 	int id;
5961 
5962 	id = domain_id;
5963 
5964 	dlb2_log_get_dir_queue_depth(hw, domain_id, args->queue_id,
5965 				     vdev_req, vdev_id);
5966 
5967 	domain = dlb2_get_domain_from_id(hw, id, vdev_req, vdev_id);
5968 	if (domain == NULL) {
5969 		resp->status = DLB2_ST_INVALID_DOMAIN_ID;
5970 		return -EINVAL;
5971 	}
5972 
5973 	id = args->queue_id;
5974 
5975 	queue = dlb2_get_domain_used_dir_pq(id, vdev_req, domain);
5976 	if (queue == NULL) {
5977 		resp->status = DLB2_ST_INVALID_QID;
5978 		return -EINVAL;
5979 	}
5980 
5981 	resp->id = dlb2_dir_queue_depth(hw, queue);
5982 
5983 	return 0;
5984 }
5985 
dlb2_log_get_ldb_queue_depth(struct dlb2_hw * hw,u32 domain_id,u32 queue_id,bool vdev_req,unsigned int vf_id)5986 static void dlb2_log_get_ldb_queue_depth(struct dlb2_hw *hw,
5987 					 u32 domain_id,
5988 					 u32 queue_id,
5989 					 bool vdev_req,
5990 					 unsigned int vf_id)
5991 {
5992 	DLB2_HW_DBG(hw, "DLB get load-balanced queue depth:\n");
5993 	if (vdev_req)
5994 		DLB2_HW_DBG(hw, "(Request from VF %d)\n", vf_id);
5995 	DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id);
5996 	DLB2_HW_DBG(hw, "\tQueue ID: %d\n", queue_id);
5997 }
5998 
dlb2_hw_get_ldb_queue_depth(struct dlb2_hw * hw,u32 domain_id,struct dlb2_get_ldb_queue_depth_args * args,struct dlb2_cmd_response * resp,bool vdev_req,unsigned int vdev_id)5999 int dlb2_hw_get_ldb_queue_depth(struct dlb2_hw *hw,
6000 				u32 domain_id,
6001 				struct dlb2_get_ldb_queue_depth_args *args,
6002 				struct dlb2_cmd_response *resp,
6003 				bool vdev_req,
6004 				unsigned int vdev_id)
6005 {
6006 	struct dlb2_hw_domain *domain;
6007 	struct dlb2_ldb_queue *queue;
6008 
6009 	dlb2_log_get_ldb_queue_depth(hw, domain_id, args->queue_id,
6010 				     vdev_req, vdev_id);
6011 
6012 	domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
6013 	if (domain == NULL) {
6014 		resp->status = DLB2_ST_INVALID_DOMAIN_ID;
6015 		return -EINVAL;
6016 	}
6017 
6018 	queue = dlb2_get_domain_ldb_queue(args->queue_id, vdev_req, domain);
6019 	if (queue == NULL) {
6020 		resp->status = DLB2_ST_INVALID_QID;
6021 		return -EINVAL;
6022 	}
6023 
6024 	resp->id = dlb2_ldb_queue_depth(hw, queue);
6025 
6026 	return 0;
6027 }
6028