1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2018 Intel Corporation 3 */ 4 5 #ifndef _RTE_EMPTY_POLL_H 6 #define _RTE_EMPTY_POLL_H 7 8 /** 9 * @file 10 * RTE Power Management 11 */ 12 #include <stdint.h> 13 #include <stdbool.h> 14 15 #include <rte_common.h> 16 #include <rte_byteorder.h> 17 #include <rte_log.h> 18 #include <rte_string_fns.h> 19 #include <rte_power.h> 20 #include <rte_timer.h> 21 22 #ifdef __cplusplus 23 extern "C" { 24 #endif 25 26 #define NUM_FREQS RTE_MAX_LCORE_FREQS 27 28 #define BINS_AV 4 /* Has to be ^2 */ 29 30 #define DROP (NUM_DIRECTIONS * NUM_DEVICES) 31 32 #define NUM_PRIORITIES 2 33 34 #define NUM_NODES 256 /* Max core number*/ 35 36 /* Processor Power State */ 37 enum freq_val { 38 LOW, 39 MED, 40 HGH, 41 NUM_FREQ = NUM_FREQS 42 }; 43 44 45 /* Queue Polling State */ 46 enum queue_state { 47 TRAINING, /* NO TRAFFIC */ 48 MED_NORMAL, /* MED */ 49 HGH_BUSY, /* HIGH */ 50 LOW_PURGE, /* LOW */ 51 }; 52 53 /* Queue Stats */ 54 struct freq_threshold { 55 56 uint64_t base_edpi; 57 bool trained; 58 uint32_t threshold_percent; 59 uint32_t cur_train_iter; 60 }; 61 62 /* Each Worker Thread Empty Poll Stats */ 63 struct priority_worker { 64 65 /* Current dequeue and throughput counts */ 66 /* These 2 are written to by the worker threads */ 67 /* So keep them on their own cache line */ 68 uint64_t empty_dequeues; 69 uint64_t num_dequeue_pkts; 70 71 enum queue_state queue_state; 72 73 uint64_t empty_dequeues_prev; 74 uint64_t num_dequeue_pkts_prev; 75 76 /* Used for training only */ 77 struct freq_threshold thresh[NUM_FREQ]; 78 enum freq_val cur_freq; 79 80 /* bucket arrays to calculate the averages */ 81 /* edpi mean empty poll counter difference per interval */ 82 uint64_t edpi_av[BINS_AV]; 83 /* empty poll counter */ 84 uint32_t ec; 85 /* ppi mean valid poll counter per interval */ 86 uint64_t ppi_av[BINS_AV]; 87 /* valid poll counter */ 88 uint32_t pc; 89 90 uint32_t lcore_id; 91 uint32_t iter_counter; 92 uint32_t threshold_ctr; 93 uint32_t display_ctr; 94 uint8_t dev_id; 95 96 } __rte_cache_aligned; 97 98 99 struct stats_data { 100 101 struct priority_worker wrk_stats[NUM_NODES]; 102 103 /* flag to stop rx threads processing packets until training over */ 104 bool start_rx; 105 106 }; 107 108 /* Empty Poll Parameters */ 109 struct ep_params { 110 111 /* Timer related stuff */ 112 uint64_t interval_ticks; 113 uint32_t max_train_iter; 114 115 struct rte_timer timer0; 116 struct stats_data wrk_data; 117 }; 118 119 120 /* Sample App Init information */ 121 struct ep_policy { 122 123 uint64_t med_base_edpi; 124 uint64_t hgh_base_edpi; 125 126 enum queue_state state; 127 }; 128 129 130 131 /** 132 * Initialize the power management system. 133 * 134 * @param eptr 135 * the structure of empty poll configuration 136 * @param freq_tlb 137 * the power state/frequency mapping table 138 * @param policy 139 * the initialization policy from sample app 140 * 141 * @return 142 * - 0 on success. 143 * - Negative on error. 144 */ 145 __rte_experimental 146 int 147 rte_power_empty_poll_stat_init(struct ep_params **eptr, uint8_t *freq_tlb, 148 struct ep_policy *policy); 149 150 /** 151 * Free the resource hold by power management system. 152 */ 153 __rte_experimental 154 void 155 rte_power_empty_poll_stat_free(void); 156 157 /** 158 * Update specific core empty poll counter 159 * It's not thread safe. 160 * 161 * @param lcore_id 162 * lcore id 163 * 164 * @return 165 * - 0 on success. 166 * - Negative on error. 167 */ 168 __rte_experimental 169 int 170 rte_power_empty_poll_stat_update(unsigned int lcore_id); 171 172 /** 173 * Update specific core valid poll counter, not thread safe. 174 * 175 * @param lcore_id 176 * lcore id. 177 * @param nb_pkt 178 * The packet number of one valid poll. 179 * 180 * @return 181 * - 0 on success. 182 * - Negative on error. 183 */ 184 __rte_experimental 185 int 186 rte_power_poll_stat_update(unsigned int lcore_id, uint8_t nb_pkt); 187 188 /** 189 * Fetch specific core empty poll counter. 190 * 191 * @param lcore_id 192 * lcore id 193 * 194 * @return 195 * Current lcore empty poll counter value. 196 */ 197 __rte_experimental 198 uint64_t 199 rte_power_empty_poll_stat_fetch(unsigned int lcore_id); 200 201 /** 202 * Fetch specific core valid poll counter. 203 * 204 * @param lcore_id 205 * lcore id 206 * 207 * @return 208 * Current lcore valid poll counter value. 209 */ 210 __rte_experimental 211 uint64_t 212 rte_power_poll_stat_fetch(unsigned int lcore_id); 213 214 /** 215 * Empty poll state change detection function 216 * 217 * @param tim 218 * The timer structure 219 * @param arg 220 * The customized parameter 221 */ 222 __rte_experimental 223 void 224 rte_empty_poll_detection(struct rte_timer *tim, void *arg); 225 226 #ifdef __cplusplus 227 } 228 #endif 229 230 #endif 231