1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
3 */
4
5 #include <string.h>
6 #include <stdio.h>
7
8 #include <rte_common.h>
9 #include <rte_malloc.h>
10 #include <rte_byteorder.h>
11 #include <rte_log.h>
12 #include <rte_lpm.h>
13
14 #include "rte_table_lpm.h"
15
16 #ifndef RTE_TABLE_LPM_MAX_NEXT_HOPS
17 #define RTE_TABLE_LPM_MAX_NEXT_HOPS 65536
18 #endif
19
20 #ifdef RTE_TABLE_STATS_COLLECT
21
22 #define RTE_TABLE_LPM_STATS_PKTS_IN_ADD(table, val) \
23 table->stats.n_pkts_in += val
24 #define RTE_TABLE_LPM_STATS_PKTS_LOOKUP_MISS(table, val) \
25 table->stats.n_pkts_lookup_miss += val
26
27 #else
28
29 #define RTE_TABLE_LPM_STATS_PKTS_IN_ADD(table, val)
30 #define RTE_TABLE_LPM_STATS_PKTS_LOOKUP_MISS(table, val)
31
32 #endif
33
34 struct rte_table_lpm {
35 struct rte_table_stats stats;
36
37 /* Input parameters */
38 uint32_t entry_size;
39 uint32_t entry_unique_size;
40 uint32_t n_rules;
41 uint32_t offset;
42
43 /* Handle to low-level LPM table */
44 struct rte_lpm *lpm;
45
46 /* Next Hop Table (NHT) */
47 uint32_t nht_users[RTE_TABLE_LPM_MAX_NEXT_HOPS];
48 uint8_t nht[0] __rte_cache_aligned;
49 };
50
51 static void *
rte_table_lpm_create(void * params,int socket_id,uint32_t entry_size)52 rte_table_lpm_create(void *params, int socket_id, uint32_t entry_size)
53 {
54 struct rte_table_lpm_params *p = params;
55 struct rte_table_lpm *lpm;
56 struct rte_lpm_config lpm_config;
57
58 uint32_t total_size, nht_size;
59
60 /* Check input parameters */
61 if (p == NULL) {
62 RTE_LOG(ERR, TABLE, "%s: NULL input parameters\n", __func__);
63 return NULL;
64 }
65 if (p->n_rules == 0) {
66 RTE_LOG(ERR, TABLE, "%s: Invalid n_rules\n", __func__);
67 return NULL;
68 }
69 if (p->number_tbl8s == 0) {
70 RTE_LOG(ERR, TABLE, "%s: Invalid number_tbl8s\n", __func__);
71 return NULL;
72 }
73 if (p->entry_unique_size == 0) {
74 RTE_LOG(ERR, TABLE, "%s: Invalid entry_unique_size\n",
75 __func__);
76 return NULL;
77 }
78 if (p->entry_unique_size > entry_size) {
79 RTE_LOG(ERR, TABLE, "%s: Invalid entry_unique_size\n",
80 __func__);
81 return NULL;
82 }
83 if (p->name == NULL) {
84 RTE_LOG(ERR, TABLE, "%s: Table name is NULL\n",
85 __func__);
86 return NULL;
87 }
88 entry_size = RTE_ALIGN(entry_size, sizeof(uint64_t));
89
90 /* Memory allocation */
91 nht_size = RTE_TABLE_LPM_MAX_NEXT_HOPS * entry_size;
92 total_size = sizeof(struct rte_table_lpm) + nht_size;
93 lpm = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE,
94 socket_id);
95 if (lpm == NULL) {
96 RTE_LOG(ERR, TABLE,
97 "%s: Cannot allocate %u bytes for LPM table\n",
98 __func__, total_size);
99 return NULL;
100 }
101
102 /* LPM low-level table creation */
103 lpm_config.max_rules = p->n_rules;
104 lpm_config.number_tbl8s = p->number_tbl8s;
105 lpm_config.flags = p->flags;
106 lpm->lpm = rte_lpm_create(p->name, socket_id, &lpm_config);
107
108 if (lpm->lpm == NULL) {
109 rte_free(lpm);
110 RTE_LOG(ERR, TABLE, "Unable to create low-level LPM table\n");
111 return NULL;
112 }
113
114 /* Memory initialization */
115 lpm->entry_size = entry_size;
116 lpm->entry_unique_size = p->entry_unique_size;
117 lpm->n_rules = p->n_rules;
118 lpm->offset = p->offset;
119
120 return lpm;
121 }
122
123 static int
rte_table_lpm_free(void * table)124 rte_table_lpm_free(void *table)
125 {
126 struct rte_table_lpm *lpm = table;
127
128 /* Check input parameters */
129 if (lpm == NULL) {
130 RTE_LOG(ERR, TABLE, "%s: table parameter is NULL\n", __func__);
131 return -EINVAL;
132 }
133
134 /* Free previously allocated resources */
135 rte_lpm_free(lpm->lpm);
136 rte_free(lpm);
137
138 return 0;
139 }
140
141 static int
nht_find_free(struct rte_table_lpm * lpm,uint32_t * pos)142 nht_find_free(struct rte_table_lpm *lpm, uint32_t *pos)
143 {
144 uint32_t i;
145
146 for (i = 0; i < RTE_TABLE_LPM_MAX_NEXT_HOPS; i++) {
147 if (lpm->nht_users[i] == 0) {
148 *pos = i;
149 return 1;
150 }
151 }
152
153 return 0;
154 }
155
156 static int
nht_find_existing(struct rte_table_lpm * lpm,void * entry,uint32_t * pos)157 nht_find_existing(struct rte_table_lpm *lpm, void *entry, uint32_t *pos)
158 {
159 uint32_t i;
160
161 for (i = 0; i < RTE_TABLE_LPM_MAX_NEXT_HOPS; i++) {
162 uint8_t *nht_entry = &lpm->nht[i * lpm->entry_size];
163
164 if ((lpm->nht_users[i] > 0) && (memcmp(nht_entry, entry,
165 lpm->entry_unique_size) == 0)) {
166 *pos = i;
167 return 1;
168 }
169 }
170
171 return 0;
172 }
173
174 static int
rte_table_lpm_entry_add(void * table,void * key,void * entry,int * key_found,void ** entry_ptr)175 rte_table_lpm_entry_add(
176 void *table,
177 void *key,
178 void *entry,
179 int *key_found,
180 void **entry_ptr)
181 {
182 struct rte_table_lpm *lpm = table;
183 struct rte_table_lpm_key *ip_prefix = key;
184 uint32_t nht_pos, nht_pos0_valid;
185 int status;
186 uint32_t nht_pos0 = 0;
187
188 /* Check input parameters */
189 if (lpm == NULL) {
190 RTE_LOG(ERR, TABLE, "%s: table parameter is NULL\n", __func__);
191 return -EINVAL;
192 }
193 if (ip_prefix == NULL) {
194 RTE_LOG(ERR, TABLE, "%s: ip_prefix parameter is NULL\n",
195 __func__);
196 return -EINVAL;
197 }
198 if (entry == NULL) {
199 RTE_LOG(ERR, TABLE, "%s: entry parameter is NULL\n", __func__);
200 return -EINVAL;
201 }
202
203 if ((ip_prefix->depth == 0) || (ip_prefix->depth > 32)) {
204 RTE_LOG(ERR, TABLE, "%s: invalid depth (%d)\n",
205 __func__, ip_prefix->depth);
206 return -EINVAL;
207 }
208
209 /* Check if rule is already present in the table */
210 status = rte_lpm_is_rule_present(lpm->lpm, ip_prefix->ip,
211 ip_prefix->depth, &nht_pos0);
212 nht_pos0_valid = status > 0;
213
214 /* Find existing or free NHT entry */
215 if (nht_find_existing(lpm, entry, &nht_pos) == 0) {
216 uint8_t *nht_entry;
217
218 if (nht_find_free(lpm, &nht_pos) == 0) {
219 RTE_LOG(ERR, TABLE, "%s: NHT full\n", __func__);
220 return -1;
221 }
222
223 nht_entry = &lpm->nht[nht_pos * lpm->entry_size];
224 memcpy(nht_entry, entry, lpm->entry_size);
225 }
226
227 /* Add rule to low level LPM table */
228 if (rte_lpm_add(lpm->lpm, ip_prefix->ip, ip_prefix->depth, nht_pos) < 0) {
229 RTE_LOG(ERR, TABLE, "%s: LPM rule add failed\n", __func__);
230 return -1;
231 }
232
233 /* Commit NHT changes */
234 lpm->nht_users[nht_pos]++;
235 lpm->nht_users[nht_pos0] -= nht_pos0_valid;
236
237 *key_found = nht_pos0_valid;
238 *entry_ptr = (void *) &lpm->nht[nht_pos * lpm->entry_size];
239 return 0;
240 }
241
242 static int
rte_table_lpm_entry_delete(void * table,void * key,int * key_found,void * entry)243 rte_table_lpm_entry_delete(
244 void *table,
245 void *key,
246 int *key_found,
247 void *entry)
248 {
249 struct rte_table_lpm *lpm = table;
250 struct rte_table_lpm_key *ip_prefix = key;
251 uint32_t nht_pos;
252 int status;
253
254 /* Check input parameters */
255 if (lpm == NULL) {
256 RTE_LOG(ERR, TABLE, "%s: table parameter is NULL\n", __func__);
257 return -EINVAL;
258 }
259 if (ip_prefix == NULL) {
260 RTE_LOG(ERR, TABLE, "%s: ip_prefix parameter is NULL\n",
261 __func__);
262 return -EINVAL;
263 }
264 if ((ip_prefix->depth == 0) || (ip_prefix->depth > 32)) {
265 RTE_LOG(ERR, TABLE, "%s: invalid depth (%d)\n", __func__,
266 ip_prefix->depth);
267 return -EINVAL;
268 }
269
270 /* Return if rule is not present in the table */
271 status = rte_lpm_is_rule_present(lpm->lpm, ip_prefix->ip,
272 ip_prefix->depth, &nht_pos);
273 if (status < 0) {
274 RTE_LOG(ERR, TABLE, "%s: LPM algorithmic error\n", __func__);
275 return -1;
276 }
277 if (status == 0) {
278 *key_found = 0;
279 return 0;
280 }
281
282 /* Delete rule from the low-level LPM table */
283 status = rte_lpm_delete(lpm->lpm, ip_prefix->ip, ip_prefix->depth);
284 if (status) {
285 RTE_LOG(ERR, TABLE, "%s: LPM rule delete failed\n", __func__);
286 return -1;
287 }
288
289 /* Commit NHT changes */
290 lpm->nht_users[nht_pos]--;
291
292 *key_found = 1;
293 if (entry)
294 memcpy(entry, &lpm->nht[nht_pos * lpm->entry_size],
295 lpm->entry_size);
296
297 return 0;
298 }
299
300 static int
rte_table_lpm_lookup(void * table,struct rte_mbuf ** pkts,uint64_t pkts_mask,uint64_t * lookup_hit_mask,void ** entries)301 rte_table_lpm_lookup(
302 void *table,
303 struct rte_mbuf **pkts,
304 uint64_t pkts_mask,
305 uint64_t *lookup_hit_mask,
306 void **entries)
307 {
308 struct rte_table_lpm *lpm = (struct rte_table_lpm *) table;
309 uint64_t pkts_out_mask = 0;
310 uint32_t i;
311
312 __rte_unused uint32_t n_pkts_in = __builtin_popcountll(pkts_mask);
313 RTE_TABLE_LPM_STATS_PKTS_IN_ADD(lpm, n_pkts_in);
314
315 pkts_out_mask = 0;
316 for (i = 0; i < (uint32_t)(RTE_PORT_IN_BURST_SIZE_MAX -
317 __builtin_clzll(pkts_mask)); i++) {
318 uint64_t pkt_mask = 1LLU << i;
319
320 if (pkt_mask & pkts_mask) {
321 struct rte_mbuf *pkt = pkts[i];
322 uint32_t ip = rte_bswap32(
323 RTE_MBUF_METADATA_UINT32(pkt, lpm->offset));
324 int status;
325 uint32_t nht_pos;
326
327 status = rte_lpm_lookup(lpm->lpm, ip, &nht_pos);
328 if (status == 0) {
329 pkts_out_mask |= pkt_mask;
330 entries[i] = (void *) &lpm->nht[nht_pos *
331 lpm->entry_size];
332 }
333 }
334 }
335
336 *lookup_hit_mask = pkts_out_mask;
337 RTE_TABLE_LPM_STATS_PKTS_LOOKUP_MISS(lpm, n_pkts_in - __builtin_popcountll(pkts_out_mask));
338 return 0;
339 }
340
341 static int
rte_table_lpm_stats_read(void * table,struct rte_table_stats * stats,int clear)342 rte_table_lpm_stats_read(void *table, struct rte_table_stats *stats, int clear)
343 {
344 struct rte_table_lpm *t = table;
345
346 if (stats != NULL)
347 memcpy(stats, &t->stats, sizeof(t->stats));
348
349 if (clear)
350 memset(&t->stats, 0, sizeof(t->stats));
351
352 return 0;
353 }
354
355 struct rte_table_ops rte_table_lpm_ops = {
356 .f_create = rte_table_lpm_create,
357 .f_free = rte_table_lpm_free,
358 .f_add = rte_table_lpm_entry_add,
359 .f_delete = rte_table_lpm_entry_delete,
360 .f_add_bulk = NULL,
361 .f_delete_bulk = NULL,
362 .f_lookup = rte_table_lpm_lookup,
363 .f_stats = rte_table_lpm_stats_read,
364 };
365