1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2020 Broadcom
3 * All rights reserved.
4 */
5
6 #include <rte_common.h>
7 #include <rte_cycles.h>
8 #include <rte_malloc.h>
9 #include <rte_log.h>
10 #include <rte_alarm.h>
11 #include "bnxt.h"
12 #include "bnxt_ulp.h"
13 #include "bnxt_tf_common.h"
14 #include "ulp_fc_mgr.h"
15 #include "ulp_flow_db.h"
16 #include "ulp_template_db_enum.h"
17 #include "ulp_template_struct.h"
18 #include "tf_tbl.h"
19
20 static int
ulp_fc_mgr_shadow_mem_alloc(struct hw_fc_mem_info * parms,int size)21 ulp_fc_mgr_shadow_mem_alloc(struct hw_fc_mem_info *parms, int size)
22 {
23 /* Allocate memory*/
24 if (!parms)
25 return -EINVAL;
26
27 parms->mem_va = rte_zmalloc("ulp_fc_info",
28 RTE_CACHE_LINE_ROUNDUP(size),
29 4096);
30 if (!parms->mem_va) {
31 BNXT_TF_DBG(ERR, "Allocate failed mem_va\n");
32 return -ENOMEM;
33 }
34
35 rte_mem_lock_page(parms->mem_va);
36
37 parms->mem_pa = (void *)(uintptr_t)rte_mem_virt2phy(parms->mem_va);
38 if (parms->mem_pa == (void *)(uintptr_t)RTE_BAD_IOVA) {
39 BNXT_TF_DBG(ERR, "Allocate failed mem_pa\n");
40 return -ENOMEM;
41 }
42
43 return 0;
44 }
45
46 static void
ulp_fc_mgr_shadow_mem_free(struct hw_fc_mem_info * parms)47 ulp_fc_mgr_shadow_mem_free(struct hw_fc_mem_info *parms)
48 {
49 rte_free(parms->mem_va);
50 }
51
52 /*
53 * Allocate and Initialize all Flow Counter Manager resources for this ulp
54 * context.
55 *
56 * ctxt [in] The ulp context for the Flow Counter manager.
57 *
58 */
59 int32_t
ulp_fc_mgr_init(struct bnxt_ulp_context * ctxt)60 ulp_fc_mgr_init(struct bnxt_ulp_context *ctxt)
61 {
62 struct bnxt_ulp_device_params *dparms;
63 uint32_t dev_id, sw_acc_cntr_tbl_sz, hw_fc_mem_info_sz;
64 struct bnxt_ulp_fc_info *ulp_fc_info;
65 int i, rc;
66
67 if (!ctxt) {
68 BNXT_TF_DBG(DEBUG, "Invalid ULP CTXT\n");
69 return -EINVAL;
70 }
71
72 if (bnxt_ulp_cntxt_dev_id_get(ctxt, &dev_id)) {
73 BNXT_TF_DBG(DEBUG, "Failed to get device id\n");
74 return -EINVAL;
75 }
76
77 dparms = bnxt_ulp_device_params_get(dev_id);
78 if (!dparms) {
79 BNXT_TF_DBG(DEBUG, "Failed to device parms\n");
80 return -EINVAL;
81 }
82
83 ulp_fc_info = rte_zmalloc("ulp_fc_info", sizeof(*ulp_fc_info), 0);
84 if (!ulp_fc_info)
85 goto error;
86
87 rc = pthread_mutex_init(&ulp_fc_info->fc_lock, NULL);
88 if (rc) {
89 PMD_DRV_LOG(ERR, "Failed to initialize fc mutex\n");
90 goto error;
91 }
92
93 /* Add the FC info tbl to the ulp context. */
94 bnxt_ulp_cntxt_ptr2_fc_info_set(ctxt, ulp_fc_info);
95
96 sw_acc_cntr_tbl_sz = sizeof(struct sw_acc_counter) *
97 dparms->flow_count_db_entries;
98
99 for (i = 0; i < TF_DIR_MAX; i++) {
100 ulp_fc_info->sw_acc_tbl[i] = rte_zmalloc("ulp_sw_acc_cntr_tbl",
101 sw_acc_cntr_tbl_sz, 0);
102 if (!ulp_fc_info->sw_acc_tbl[i])
103 goto error;
104 }
105
106 hw_fc_mem_info_sz = sizeof(uint64_t) * dparms->flow_count_db_entries;
107
108 for (i = 0; i < TF_DIR_MAX; i++) {
109 rc = ulp_fc_mgr_shadow_mem_alloc(&ulp_fc_info->shadow_hw_tbl[i],
110 hw_fc_mem_info_sz);
111 if (rc)
112 goto error;
113 }
114
115 return 0;
116
117 error:
118 ulp_fc_mgr_deinit(ctxt);
119 BNXT_TF_DBG(DEBUG,
120 "Failed to allocate memory for fc mgr\n");
121
122 return -ENOMEM;
123 }
124
125 /*
126 * Release all resources in the Flow Counter Manager for this ulp context
127 *
128 * ctxt [in] The ulp context for the Flow Counter manager
129 *
130 */
131 int32_t
ulp_fc_mgr_deinit(struct bnxt_ulp_context * ctxt)132 ulp_fc_mgr_deinit(struct bnxt_ulp_context *ctxt)
133 {
134 struct bnxt_ulp_fc_info *ulp_fc_info;
135 int i;
136
137 ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
138
139 if (!ulp_fc_info)
140 return -EINVAL;
141
142 ulp_fc_mgr_thread_cancel(ctxt);
143
144 pthread_mutex_destroy(&ulp_fc_info->fc_lock);
145
146 for (i = 0; i < TF_DIR_MAX; i++)
147 rte_free(ulp_fc_info->sw_acc_tbl[i]);
148
149 for (i = 0; i < TF_DIR_MAX; i++)
150 ulp_fc_mgr_shadow_mem_free(&ulp_fc_info->shadow_hw_tbl[i]);
151
152 rte_free(ulp_fc_info);
153
154 /* Safe to ignore on deinit */
155 (void)bnxt_ulp_cntxt_ptr2_fc_info_set(ctxt, NULL);
156
157 return 0;
158 }
159
160 /*
161 * Check if the alarm thread that walks through the flows is started
162 *
163 * ctxt [in] The ulp context for the flow counter manager
164 *
165 */
ulp_fc_mgr_thread_isstarted(struct bnxt_ulp_context * ctxt)166 bool ulp_fc_mgr_thread_isstarted(struct bnxt_ulp_context *ctxt)
167 {
168 struct bnxt_ulp_fc_info *ulp_fc_info;
169
170 ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
171
172 return !!(ulp_fc_info->flags & ULP_FLAG_FC_THREAD);
173 }
174
175 /*
176 * Setup the Flow counter timer thread that will fetch/accumulate raw counter
177 * data from the chip's internal flow counters
178 *
179 * ctxt [in] The ulp context for the flow counter manager
180 *
181 */
182 int32_t
ulp_fc_mgr_thread_start(struct bnxt_ulp_context * ctxt)183 ulp_fc_mgr_thread_start(struct bnxt_ulp_context *ctxt)
184 {
185 struct bnxt_ulp_fc_info *ulp_fc_info;
186
187 ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
188
189 if (!(ulp_fc_info->flags & ULP_FLAG_FC_THREAD)) {
190 rte_eal_alarm_set(US_PER_S * ULP_FC_TIMER,
191 ulp_fc_mgr_alarm_cb,
192 (void *)ctxt);
193 ulp_fc_info->flags |= ULP_FLAG_FC_THREAD;
194 }
195
196 return 0;
197 }
198
199 /*
200 * Cancel the alarm handler
201 *
202 * ctxt [in] The ulp context for the flow counter manager
203 *
204 */
ulp_fc_mgr_thread_cancel(struct bnxt_ulp_context * ctxt)205 void ulp_fc_mgr_thread_cancel(struct bnxt_ulp_context *ctxt)
206 {
207 struct bnxt_ulp_fc_info *ulp_fc_info;
208
209 ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
210 if (!ulp_fc_info)
211 return;
212
213 ulp_fc_info->flags &= ~ULP_FLAG_FC_THREAD;
214 rte_eal_alarm_cancel(ulp_fc_mgr_alarm_cb, (void *)ctxt);
215 }
216
217 /*
218 * DMA-in the raw counter data from the HW and accumulate in the
219 * local accumulator table using the TF-Core API
220 *
221 * tfp [in] The TF-Core context
222 *
223 * fc_info [in] The ULP Flow counter info ptr
224 *
225 * dir [in] The direction of the flow
226 *
227 * num_counters [in] The number of counters
228 *
229 */
230 __rte_unused static int32_t
ulp_bulk_get_flow_stats(struct tf * tfp,struct bnxt_ulp_fc_info * fc_info,enum tf_dir dir,struct bnxt_ulp_device_params * dparms)231 ulp_bulk_get_flow_stats(struct tf *tfp,
232 struct bnxt_ulp_fc_info *fc_info,
233 enum tf_dir dir,
234 struct bnxt_ulp_device_params *dparms)
235 /* MARK AS UNUSED FOR NOW TO AVOID COMPILATION ERRORS TILL API is RESOLVED */
236 {
237 int rc = 0;
238 struct tf_tbl_get_bulk_parms parms = { 0 };
239 enum tf_tbl_type stype = TF_TBL_TYPE_ACT_STATS_64; /* TBD: Template? */
240 struct sw_acc_counter *sw_acc_tbl_entry = NULL;
241 uint64_t *stats = NULL;
242 uint16_t i = 0;
243
244 parms.dir = dir;
245 parms.type = stype;
246 parms.starting_idx = fc_info->shadow_hw_tbl[dir].start_idx;
247 parms.num_entries = dparms->flow_count_db_entries / 2; /* direction */
248 /*
249 * TODO:
250 * Size of an entry needs to obtained from template
251 */
252 parms.entry_sz_in_bytes = sizeof(uint64_t);
253 stats = (uint64_t *)fc_info->shadow_hw_tbl[dir].mem_va;
254 parms.physical_mem_addr = (uintptr_t)fc_info->shadow_hw_tbl[dir].mem_pa;
255
256 if (!stats) {
257 PMD_DRV_LOG(ERR,
258 "BULK: Memory not initialized id:0x%x dir:%d\n",
259 parms.starting_idx, dir);
260 return -EINVAL;
261 }
262
263 rc = tf_tbl_bulk_get(tfp, &parms);
264 if (rc) {
265 PMD_DRV_LOG(ERR,
266 "BULK: Get failed for id:0x%x rc:%d\n",
267 parms.starting_idx, rc);
268 return rc;
269 }
270
271 for (i = 0; i < parms.num_entries; i++) {
272 /* TBD - Get PKT/BYTE COUNT SHIFT/MASK from Template */
273 sw_acc_tbl_entry = &fc_info->sw_acc_tbl[dir][i];
274 if (!sw_acc_tbl_entry->valid)
275 continue;
276 sw_acc_tbl_entry->pkt_count += FLOW_CNTR_PKTS(stats[i],
277 dparms);
278 sw_acc_tbl_entry->byte_count += FLOW_CNTR_BYTES(stats[i],
279 dparms);
280 }
281
282 return rc;
283 }
284
ulp_get_single_flow_stat(struct bnxt_ulp_context * ctxt,struct tf * tfp,struct bnxt_ulp_fc_info * fc_info,enum tf_dir dir,uint32_t hw_cntr_id,struct bnxt_ulp_device_params * dparms)285 static int ulp_get_single_flow_stat(struct bnxt_ulp_context *ctxt,
286 struct tf *tfp,
287 struct bnxt_ulp_fc_info *fc_info,
288 enum tf_dir dir,
289 uint32_t hw_cntr_id,
290 struct bnxt_ulp_device_params *dparms)
291 {
292 int rc = 0;
293 struct tf_get_tbl_entry_parms parms = { 0 };
294 enum tf_tbl_type stype = TF_TBL_TYPE_ACT_STATS_64; /* TBD:Template? */
295 struct sw_acc_counter *sw_acc_tbl_entry = NULL, *t_sw;
296 uint64_t stats = 0;
297 uint32_t sw_cntr_indx = 0;
298
299 parms.dir = dir;
300 parms.type = stype;
301 parms.idx = hw_cntr_id;
302 /*
303 * TODO:
304 * Size of an entry needs to obtained from template
305 */
306 parms.data_sz_in_bytes = sizeof(uint64_t);
307 parms.data = (uint8_t *)&stats;
308 rc = tf_get_tbl_entry(tfp, &parms);
309 if (rc) {
310 PMD_DRV_LOG(ERR,
311 "Get failed for id:0x%x rc:%d\n",
312 parms.idx, rc);
313 return rc;
314 }
315
316 /* TBD - Get PKT/BYTE COUNT SHIFT/MASK from Template */
317 sw_cntr_indx = hw_cntr_id - fc_info->shadow_hw_tbl[dir].start_idx;
318 sw_acc_tbl_entry = &fc_info->sw_acc_tbl[dir][sw_cntr_indx];
319 sw_acc_tbl_entry->pkt_count = FLOW_CNTR_PKTS(stats, dparms);
320 sw_acc_tbl_entry->byte_count = FLOW_CNTR_BYTES(stats, dparms);
321
322 /* Update the parent counters if it is child flow */
323 if (sw_acc_tbl_entry->parent_flow_id) {
324 /* Update the parent counters */
325 t_sw = sw_acc_tbl_entry;
326 if (ulp_flow_db_parent_flow_count_update(ctxt,
327 t_sw->parent_flow_id,
328 t_sw->pkt_count,
329 t_sw->byte_count)) {
330 PMD_DRV_LOG(ERR, "Error updating parent counters\n");
331 }
332 }
333
334 return rc;
335 }
336
337 /*
338 * Alarm handler that will issue the TF-Core API to fetch
339 * data from the chip's internal flow counters
340 *
341 * ctxt [in] The ulp context for the flow counter manager
342 *
343 */
344
345 void
ulp_fc_mgr_alarm_cb(void * arg)346 ulp_fc_mgr_alarm_cb(void *arg)
347 {
348 int rc = 0;
349 unsigned int j;
350 enum tf_dir i;
351 struct bnxt_ulp_context *ctxt = arg;
352 struct bnxt_ulp_fc_info *ulp_fc_info;
353 struct bnxt_ulp_device_params *dparms;
354 struct tf *tfp;
355 uint32_t dev_id, hw_cntr_id = 0, num_entries = 0;
356
357 ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
358 if (!ulp_fc_info)
359 return;
360
361 if (bnxt_ulp_cntxt_dev_id_get(ctxt, &dev_id)) {
362 BNXT_TF_DBG(DEBUG, "Failed to get device id\n");
363 return;
364 }
365
366 dparms = bnxt_ulp_device_params_get(dev_id);
367 if (!dparms) {
368 BNXT_TF_DBG(DEBUG, "Failed to device parms\n");
369 return;
370 }
371
372 tfp = bnxt_ulp_cntxt_tfp_get(ctxt);
373 if (!tfp) {
374 BNXT_TF_DBG(ERR, "Failed to get the truflow pointer\n");
375 return;
376 }
377
378 /*
379 * Take the fc_lock to ensure no flow is destroyed
380 * during the bulk get
381 */
382 if (pthread_mutex_trylock(&ulp_fc_info->fc_lock))
383 goto out;
384
385 if (!ulp_fc_info->num_entries) {
386 pthread_mutex_unlock(&ulp_fc_info->fc_lock);
387 ulp_fc_mgr_thread_cancel(ctxt);
388 return;
389 }
390 /*
391 * Commented for now till GET_BULK is resolved, just get the first flow
392 * stat for now
393 for (i = 0; i < TF_DIR_MAX; i++) {
394 rc = ulp_bulk_get_flow_stats(tfp, ulp_fc_info, i,
395 dparms->flow_count_db_entries);
396 if (rc)
397 break;
398 }
399 */
400
401 /* reset the parent accumulation counters before accumulation if any */
402 ulp_flow_db_parent_flow_count_reset(ctxt);
403
404 num_entries = dparms->flow_count_db_entries / 2;
405 for (i = 0; i < TF_DIR_MAX; i++) {
406 for (j = 0; j < num_entries; j++) {
407 if (!ulp_fc_info->sw_acc_tbl[i][j].valid)
408 continue;
409 hw_cntr_id = ulp_fc_info->sw_acc_tbl[i][j].hw_cntr_id;
410 rc = ulp_get_single_flow_stat(ctxt, tfp, ulp_fc_info, i,
411 hw_cntr_id, dparms);
412 if (rc)
413 break;
414 }
415 }
416
417 pthread_mutex_unlock(&ulp_fc_info->fc_lock);
418
419 /*
420 * If cmd fails once, no need of
421 * invoking again every second
422 */
423
424 if (rc) {
425 ulp_fc_mgr_thread_cancel(ctxt);
426 return;
427 }
428 out:
429 rte_eal_alarm_set(US_PER_S * ULP_FC_TIMER,
430 ulp_fc_mgr_alarm_cb,
431 (void *)ctxt);
432 }
433
434 /*
435 * Set the starting index that indicates the first HW flow
436 * counter ID
437 *
438 * ctxt [in] The ulp context for the flow counter manager
439 *
440 * dir [in] The direction of the flow
441 *
442 * start_idx [in] The HW flow counter ID
443 *
444 */
ulp_fc_mgr_start_idx_isset(struct bnxt_ulp_context * ctxt,enum tf_dir dir)445 bool ulp_fc_mgr_start_idx_isset(struct bnxt_ulp_context *ctxt, enum tf_dir dir)
446 {
447 struct bnxt_ulp_fc_info *ulp_fc_info;
448
449 ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
450
451 return ulp_fc_info->shadow_hw_tbl[dir].start_idx_is_set;
452 }
453
454 /*
455 * Set the starting index that indicates the first HW flow
456 * counter ID
457 *
458 * ctxt [in] The ulp context for the flow counter manager
459 *
460 * dir [in] The direction of the flow
461 *
462 * start_idx [in] The HW flow counter ID
463 *
464 */
ulp_fc_mgr_start_idx_set(struct bnxt_ulp_context * ctxt,enum tf_dir dir,uint32_t start_idx)465 int32_t ulp_fc_mgr_start_idx_set(struct bnxt_ulp_context *ctxt, enum tf_dir dir,
466 uint32_t start_idx)
467 {
468 struct bnxt_ulp_fc_info *ulp_fc_info;
469
470 ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
471
472 if (!ulp_fc_info)
473 return -EIO;
474
475 if (!ulp_fc_info->shadow_hw_tbl[dir].start_idx_is_set) {
476 ulp_fc_info->shadow_hw_tbl[dir].start_idx = start_idx;
477 ulp_fc_info->shadow_hw_tbl[dir].start_idx_is_set = true;
478 }
479
480 return 0;
481 }
482
483 /*
484 * Set the corresponding SW accumulator table entry based on
485 * the difference between this counter ID and the starting
486 * counter ID. Also, keep track of num of active counter enabled
487 * flows.
488 *
489 * ctxt [in] The ulp context for the flow counter manager
490 *
491 * dir [in] The direction of the flow
492 *
493 * hw_cntr_id [in] The HW flow counter ID
494 *
495 */
ulp_fc_mgr_cntr_set(struct bnxt_ulp_context * ctxt,enum tf_dir dir,uint32_t hw_cntr_id)496 int32_t ulp_fc_mgr_cntr_set(struct bnxt_ulp_context *ctxt, enum tf_dir dir,
497 uint32_t hw_cntr_id)
498 {
499 struct bnxt_ulp_fc_info *ulp_fc_info;
500 uint32_t sw_cntr_idx;
501
502 ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
503 if (!ulp_fc_info)
504 return -EIO;
505
506 pthread_mutex_lock(&ulp_fc_info->fc_lock);
507 sw_cntr_idx = hw_cntr_id - ulp_fc_info->shadow_hw_tbl[dir].start_idx;
508 ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].valid = true;
509 ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].hw_cntr_id = hw_cntr_id;
510 ulp_fc_info->num_entries++;
511 pthread_mutex_unlock(&ulp_fc_info->fc_lock);
512
513 return 0;
514 }
515
516 /*
517 * Reset the corresponding SW accumulator table entry based on
518 * the difference between this counter ID and the starting
519 * counter ID.
520 *
521 * ctxt [in] The ulp context for the flow counter manager
522 *
523 * dir [in] The direction of the flow
524 *
525 * hw_cntr_id [in] The HW flow counter ID
526 *
527 */
ulp_fc_mgr_cntr_reset(struct bnxt_ulp_context * ctxt,enum tf_dir dir,uint32_t hw_cntr_id)528 int32_t ulp_fc_mgr_cntr_reset(struct bnxt_ulp_context *ctxt, enum tf_dir dir,
529 uint32_t hw_cntr_id)
530 {
531 struct bnxt_ulp_fc_info *ulp_fc_info;
532 uint32_t sw_cntr_idx;
533
534 ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
535 if (!ulp_fc_info)
536 return -EIO;
537
538 pthread_mutex_lock(&ulp_fc_info->fc_lock);
539 sw_cntr_idx = hw_cntr_id - ulp_fc_info->shadow_hw_tbl[dir].start_idx;
540 ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].valid = false;
541 ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].hw_cntr_id = 0;
542 ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].pkt_count = 0;
543 ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].byte_count = 0;
544 ulp_fc_info->num_entries--;
545 pthread_mutex_unlock(&ulp_fc_info->fc_lock);
546
547 return 0;
548 }
549
550 /*
551 * Fill the rte_flow_query_count 'data' argument passed
552 * in the rte_flow_query() with the values obtained and
553 * accumulated locally.
554 *
555 * ctxt [in] The ulp context for the flow counter manager
556 *
557 * flow_id [in] The HW flow ID
558 *
559 * count [out] The rte_flow_query_count 'data' that is set
560 *
561 */
ulp_fc_mgr_query_count_get(struct bnxt_ulp_context * ctxt,uint32_t flow_id,struct rte_flow_query_count * count)562 int ulp_fc_mgr_query_count_get(struct bnxt_ulp_context *ctxt,
563 uint32_t flow_id,
564 struct rte_flow_query_count *count)
565 {
566 int rc = 0;
567 uint32_t nxt_resource_index = 0;
568 struct bnxt_ulp_fc_info *ulp_fc_info;
569 struct ulp_flow_db_res_params params;
570 enum tf_dir dir;
571 uint32_t hw_cntr_id = 0, sw_cntr_idx = 0;
572 struct sw_acc_counter *sw_acc_tbl_entry;
573 bool found_cntr_resource = false;
574
575 ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
576 if (!ulp_fc_info)
577 return -ENODEV;
578
579 if (bnxt_ulp_cntxt_acquire_fdb_lock(ctxt))
580 return -EIO;
581
582 do {
583 rc = ulp_flow_db_resource_get(ctxt,
584 BNXT_ULP_FDB_TYPE_REGULAR,
585 flow_id,
586 &nxt_resource_index,
587 ¶ms);
588 if (params.resource_func ==
589 BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE &&
590 (params.resource_sub_type ==
591 BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TYPE_INT_COUNT ||
592 params.resource_sub_type ==
593 BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TYPE_EXT_COUNT ||
594 params.resource_sub_type ==
595 BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TYPE_INT_COUNT_ACC)) {
596 found_cntr_resource = true;
597 break;
598 }
599 } while (!rc && nxt_resource_index);
600
601 bnxt_ulp_cntxt_release_fdb_lock(ctxt);
602
603 if (rc || !found_cntr_resource)
604 return rc;
605
606 dir = params.direction;
607 hw_cntr_id = params.resource_hndl;
608 if (params.resource_sub_type ==
609 BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TYPE_INT_COUNT) {
610 pthread_mutex_lock(&ulp_fc_info->fc_lock);
611 sw_cntr_idx = hw_cntr_id -
612 ulp_fc_info->shadow_hw_tbl[dir].start_idx;
613 sw_acc_tbl_entry = &ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx];
614 if (sw_acc_tbl_entry->pkt_count) {
615 count->hits_set = 1;
616 count->bytes_set = 1;
617 count->hits = sw_acc_tbl_entry->pkt_count;
618 count->bytes = sw_acc_tbl_entry->byte_count;
619 }
620 if (count->reset) {
621 sw_acc_tbl_entry->pkt_count = 0;
622 sw_acc_tbl_entry->byte_count = 0;
623 }
624 pthread_mutex_unlock(&ulp_fc_info->fc_lock);
625 } else if (params.resource_sub_type ==
626 BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TYPE_INT_COUNT_ACC) {
627 /* Get stats from the parent child table */
628 ulp_flow_db_parent_flow_count_get(ctxt, flow_id,
629 &count->hits, &count->bytes,
630 count->reset);
631 count->hits_set = 1;
632 count->bytes_set = 1;
633 } else {
634 /* TBD: Handle External counters */
635 rc = -EINVAL;
636 }
637
638 return rc;
639 }
640
641 /*
642 * Set the parent flow if it is SW accumulation counter entry.
643 *
644 * ctxt [in] The ulp context for the flow counter manager
645 *
646 * dir [in] The direction of the flow
647 *
648 * hw_cntr_id [in] The HW flow counter ID
649 *
650 * fid [in] parent flow id
651 *
652 */
ulp_fc_mgr_cntr_parent_flow_set(struct bnxt_ulp_context * ctxt,enum tf_dir dir,uint32_t hw_cntr_id,uint32_t fid)653 int32_t ulp_fc_mgr_cntr_parent_flow_set(struct bnxt_ulp_context *ctxt,
654 enum tf_dir dir,
655 uint32_t hw_cntr_id,
656 uint32_t fid)
657 {
658 struct bnxt_ulp_fc_info *ulp_fc_info;
659 uint32_t sw_cntr_idx;
660 int32_t rc = 0;
661
662 ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
663 if (!ulp_fc_info)
664 return -EIO;
665
666 pthread_mutex_lock(&ulp_fc_info->fc_lock);
667 sw_cntr_idx = hw_cntr_id - ulp_fc_info->shadow_hw_tbl[dir].start_idx;
668 if (ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].valid) {
669 ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].parent_flow_id = fid;
670 } else {
671 BNXT_TF_DBG(ERR, "Failed to set parent flow id %x:%x\n",
672 hw_cntr_id, fid);
673 rc = -ENOENT;
674 }
675 pthread_mutex_unlock(&ulp_fc_info->fc_lock);
676
677 return rc;
678 }
679