1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
3 */
4
5 #include <string.h>
6 #include <stdio.h>
7
8 #include <rte_common.h>
9 #include <rte_mbuf.h>
10 #include <rte_memory.h>
11 #include <rte_malloc.h>
12 #include <rte_byteorder.h>
13 #include <rte_log.h>
14 #include <rte_lpm.h>
15
16 #include "rte_table_lpm.h"
17
18 #ifndef RTE_TABLE_LPM_MAX_NEXT_HOPS
19 #define RTE_TABLE_LPM_MAX_NEXT_HOPS 65536
20 #endif
21
22 #ifdef RTE_TABLE_STATS_COLLECT
23
24 #define RTE_TABLE_LPM_STATS_PKTS_IN_ADD(table, val) \
25 table->stats.n_pkts_in += val
26 #define RTE_TABLE_LPM_STATS_PKTS_LOOKUP_MISS(table, val) \
27 table->stats.n_pkts_lookup_miss += val
28
29 #else
30
31 #define RTE_TABLE_LPM_STATS_PKTS_IN_ADD(table, val)
32 #define RTE_TABLE_LPM_STATS_PKTS_LOOKUP_MISS(table, val)
33
34 #endif
35
36 struct rte_table_lpm {
37 struct rte_table_stats stats;
38
39 /* Input parameters */
40 uint32_t entry_size;
41 uint32_t entry_unique_size;
42 uint32_t n_rules;
43 uint32_t offset;
44
45 /* Handle to low-level LPM table */
46 struct rte_lpm *lpm;
47
48 /* Next Hop Table (NHT) */
49 uint32_t nht_users[RTE_TABLE_LPM_MAX_NEXT_HOPS];
50 uint8_t nht[0] __rte_cache_aligned;
51 };
52
53 static void *
rte_table_lpm_create(void * params,int socket_id,uint32_t entry_size)54 rte_table_lpm_create(void *params, int socket_id, uint32_t entry_size)
55 {
56 struct rte_table_lpm_params *p = params;
57 struct rte_table_lpm *lpm;
58 struct rte_lpm_config lpm_config;
59
60 uint32_t total_size, nht_size;
61
62 /* Check input parameters */
63 if (p == NULL) {
64 RTE_LOG(ERR, TABLE, "%s: NULL input parameters\n", __func__);
65 return NULL;
66 }
67 if (p->n_rules == 0) {
68 RTE_LOG(ERR, TABLE, "%s: Invalid n_rules\n", __func__);
69 return NULL;
70 }
71 if (p->number_tbl8s == 0) {
72 RTE_LOG(ERR, TABLE, "%s: Invalid number_tbl8s\n", __func__);
73 return NULL;
74 }
75 if (p->entry_unique_size == 0) {
76 RTE_LOG(ERR, TABLE, "%s: Invalid entry_unique_size\n",
77 __func__);
78 return NULL;
79 }
80 if (p->entry_unique_size > entry_size) {
81 RTE_LOG(ERR, TABLE, "%s: Invalid entry_unique_size\n",
82 __func__);
83 return NULL;
84 }
85 if (p->name == NULL) {
86 RTE_LOG(ERR, TABLE, "%s: Table name is NULL\n",
87 __func__);
88 return NULL;
89 }
90 entry_size = RTE_ALIGN(entry_size, sizeof(uint64_t));
91
92 /* Memory allocation */
93 nht_size = RTE_TABLE_LPM_MAX_NEXT_HOPS * entry_size;
94 total_size = sizeof(struct rte_table_lpm) + nht_size;
95 lpm = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE,
96 socket_id);
97 if (lpm == NULL) {
98 RTE_LOG(ERR, TABLE,
99 "%s: Cannot allocate %u bytes for LPM table\n",
100 __func__, total_size);
101 return NULL;
102 }
103
104 /* LPM low-level table creation */
105 lpm_config.max_rules = p->n_rules;
106 lpm_config.number_tbl8s = p->number_tbl8s;
107 lpm_config.flags = p->flags;
108 lpm->lpm = rte_lpm_create(p->name, socket_id, &lpm_config);
109
110 if (lpm->lpm == NULL) {
111 rte_free(lpm);
112 RTE_LOG(ERR, TABLE, "Unable to create low-level LPM table\n");
113 return NULL;
114 }
115
116 /* Memory initialization */
117 lpm->entry_size = entry_size;
118 lpm->entry_unique_size = p->entry_unique_size;
119 lpm->n_rules = p->n_rules;
120 lpm->offset = p->offset;
121
122 return lpm;
123 }
124
125 static int
rte_table_lpm_free(void * table)126 rte_table_lpm_free(void *table)
127 {
128 struct rte_table_lpm *lpm = table;
129
130 /* Check input parameters */
131 if (lpm == NULL) {
132 RTE_LOG(ERR, TABLE, "%s: table parameter is NULL\n", __func__);
133 return -EINVAL;
134 }
135
136 /* Free previously allocated resources */
137 rte_lpm_free(lpm->lpm);
138 rte_free(lpm);
139
140 return 0;
141 }
142
143 static int
nht_find_free(struct rte_table_lpm * lpm,uint32_t * pos)144 nht_find_free(struct rte_table_lpm *lpm, uint32_t *pos)
145 {
146 uint32_t i;
147
148 for (i = 0; i < RTE_TABLE_LPM_MAX_NEXT_HOPS; i++) {
149 if (lpm->nht_users[i] == 0) {
150 *pos = i;
151 return 1;
152 }
153 }
154
155 return 0;
156 }
157
158 static int
nht_find_existing(struct rte_table_lpm * lpm,void * entry,uint32_t * pos)159 nht_find_existing(struct rte_table_lpm *lpm, void *entry, uint32_t *pos)
160 {
161 uint32_t i;
162
163 for (i = 0; i < RTE_TABLE_LPM_MAX_NEXT_HOPS; i++) {
164 uint8_t *nht_entry = &lpm->nht[i * lpm->entry_size];
165
166 if ((lpm->nht_users[i] > 0) && (memcmp(nht_entry, entry,
167 lpm->entry_unique_size) == 0)) {
168 *pos = i;
169 return 1;
170 }
171 }
172
173 return 0;
174 }
175
176 static int
rte_table_lpm_entry_add(void * table,void * key,void * entry,int * key_found,void ** entry_ptr)177 rte_table_lpm_entry_add(
178 void *table,
179 void *key,
180 void *entry,
181 int *key_found,
182 void **entry_ptr)
183 {
184 struct rte_table_lpm *lpm = table;
185 struct rte_table_lpm_key *ip_prefix = key;
186 uint32_t nht_pos, nht_pos0_valid;
187 int status;
188 uint32_t nht_pos0 = 0;
189
190 /* Check input parameters */
191 if (lpm == NULL) {
192 RTE_LOG(ERR, TABLE, "%s: table parameter is NULL\n", __func__);
193 return -EINVAL;
194 }
195 if (ip_prefix == NULL) {
196 RTE_LOG(ERR, TABLE, "%s: ip_prefix parameter is NULL\n",
197 __func__);
198 return -EINVAL;
199 }
200 if (entry == NULL) {
201 RTE_LOG(ERR, TABLE, "%s: entry parameter is NULL\n", __func__);
202 return -EINVAL;
203 }
204
205 if ((ip_prefix->depth == 0) || (ip_prefix->depth > 32)) {
206 RTE_LOG(ERR, TABLE, "%s: invalid depth (%d)\n",
207 __func__, ip_prefix->depth);
208 return -EINVAL;
209 }
210
211 /* Check if rule is already present in the table */
212 status = rte_lpm_is_rule_present(lpm->lpm, ip_prefix->ip,
213 ip_prefix->depth, &nht_pos0);
214 nht_pos0_valid = status > 0;
215
216 /* Find existing or free NHT entry */
217 if (nht_find_existing(lpm, entry, &nht_pos) == 0) {
218 uint8_t *nht_entry;
219
220 if (nht_find_free(lpm, &nht_pos) == 0) {
221 RTE_LOG(ERR, TABLE, "%s: NHT full\n", __func__);
222 return -1;
223 }
224
225 nht_entry = &lpm->nht[nht_pos * lpm->entry_size];
226 memcpy(nht_entry, entry, lpm->entry_size);
227 }
228
229 /* Add rule to low level LPM table */
230 if (rte_lpm_add(lpm->lpm, ip_prefix->ip, ip_prefix->depth, nht_pos) < 0) {
231 RTE_LOG(ERR, TABLE, "%s: LPM rule add failed\n", __func__);
232 return -1;
233 }
234
235 /* Commit NHT changes */
236 lpm->nht_users[nht_pos]++;
237 lpm->nht_users[nht_pos0] -= nht_pos0_valid;
238
239 *key_found = nht_pos0_valid;
240 *entry_ptr = (void *) &lpm->nht[nht_pos * lpm->entry_size];
241 return 0;
242 }
243
244 static int
rte_table_lpm_entry_delete(void * table,void * key,int * key_found,void * entry)245 rte_table_lpm_entry_delete(
246 void *table,
247 void *key,
248 int *key_found,
249 void *entry)
250 {
251 struct rte_table_lpm *lpm = table;
252 struct rte_table_lpm_key *ip_prefix = key;
253 uint32_t nht_pos;
254 int status;
255
256 /* Check input parameters */
257 if (lpm == NULL) {
258 RTE_LOG(ERR, TABLE, "%s: table parameter is NULL\n", __func__);
259 return -EINVAL;
260 }
261 if (ip_prefix == NULL) {
262 RTE_LOG(ERR, TABLE, "%s: ip_prefix parameter is NULL\n",
263 __func__);
264 return -EINVAL;
265 }
266 if ((ip_prefix->depth == 0) || (ip_prefix->depth > 32)) {
267 RTE_LOG(ERR, TABLE, "%s: invalid depth (%d)\n", __func__,
268 ip_prefix->depth);
269 return -EINVAL;
270 }
271
272 /* Return if rule is not present in the table */
273 status = rte_lpm_is_rule_present(lpm->lpm, ip_prefix->ip,
274 ip_prefix->depth, &nht_pos);
275 if (status < 0) {
276 RTE_LOG(ERR, TABLE, "%s: LPM algorithmic error\n", __func__);
277 return -1;
278 }
279 if (status == 0) {
280 *key_found = 0;
281 return 0;
282 }
283
284 /* Delete rule from the low-level LPM table */
285 status = rte_lpm_delete(lpm->lpm, ip_prefix->ip, ip_prefix->depth);
286 if (status) {
287 RTE_LOG(ERR, TABLE, "%s: LPM rule delete failed\n", __func__);
288 return -1;
289 }
290
291 /* Commit NHT changes */
292 lpm->nht_users[nht_pos]--;
293
294 *key_found = 1;
295 if (entry)
296 memcpy(entry, &lpm->nht[nht_pos * lpm->entry_size],
297 lpm->entry_size);
298
299 return 0;
300 }
301
302 static int
rte_table_lpm_lookup(void * table,struct rte_mbuf ** pkts,uint64_t pkts_mask,uint64_t * lookup_hit_mask,void ** entries)303 rte_table_lpm_lookup(
304 void *table,
305 struct rte_mbuf **pkts,
306 uint64_t pkts_mask,
307 uint64_t *lookup_hit_mask,
308 void **entries)
309 {
310 struct rte_table_lpm *lpm = (struct rte_table_lpm *) table;
311 uint64_t pkts_out_mask = 0;
312 uint32_t i;
313
314 __rte_unused uint32_t n_pkts_in = __builtin_popcountll(pkts_mask);
315 RTE_TABLE_LPM_STATS_PKTS_IN_ADD(lpm, n_pkts_in);
316
317 pkts_out_mask = 0;
318 for (i = 0; i < (uint32_t)(RTE_PORT_IN_BURST_SIZE_MAX -
319 __builtin_clzll(pkts_mask)); i++) {
320 uint64_t pkt_mask = 1LLU << i;
321
322 if (pkt_mask & pkts_mask) {
323 struct rte_mbuf *pkt = pkts[i];
324 uint32_t ip = rte_bswap32(
325 RTE_MBUF_METADATA_UINT32(pkt, lpm->offset));
326 int status;
327 uint32_t nht_pos;
328
329 status = rte_lpm_lookup(lpm->lpm, ip, &nht_pos);
330 if (status == 0) {
331 pkts_out_mask |= pkt_mask;
332 entries[i] = (void *) &lpm->nht[nht_pos *
333 lpm->entry_size];
334 }
335 }
336 }
337
338 *lookup_hit_mask = pkts_out_mask;
339 RTE_TABLE_LPM_STATS_PKTS_LOOKUP_MISS(lpm, n_pkts_in - __builtin_popcountll(pkts_out_mask));
340 return 0;
341 }
342
343 static int
rte_table_lpm_stats_read(void * table,struct rte_table_stats * stats,int clear)344 rte_table_lpm_stats_read(void *table, struct rte_table_stats *stats, int clear)
345 {
346 struct rte_table_lpm *t = table;
347
348 if (stats != NULL)
349 memcpy(stats, &t->stats, sizeof(t->stats));
350
351 if (clear)
352 memset(&t->stats, 0, sizeof(t->stats));
353
354 return 0;
355 }
356
357 struct rte_table_ops rte_table_lpm_ops = {
358 .f_create = rte_table_lpm_create,
359 .f_free = rte_table_lpm_free,
360 .f_add = rte_table_lpm_entry_add,
361 .f_delete = rte_table_lpm_entry_delete,
362 .f_add_bulk = NULL,
363 .f_delete_bulk = NULL,
364 .f_lookup = rte_table_lpm_lookup,
365 .f_stats = rte_table_lpm_stats_read,
366 };
367