xref: /mOS-networking-stack/core/src/addr_pool.c (revision 152f7c19)
1 #include <stdio.h>
2 #include <stdlib.h>
3 #include <pthread.h>
4 #include "addr_pool.h"
5 #include "util.h"
6 #include "debug.h"
7 #include "config.h"
8 
9 #define MIN_PORT (1025)
10 #define MAX_PORT (65535 + 1)
11 
12 /*----------------------------------------------------------------------------*/
13 struct addr_entry
14 {
15 	struct sockaddr_in addr;
16 	TAILQ_ENTRY(addr_entry) addr_link;
17 };
18 /*----------------------------------------------------------------------------*/
19 struct addr_map
20 {
21 	struct addr_entry *addrmap[MAX_PORT];
22 };
23 /*----------------------------------------------------------------------------*/
24 struct addr_pool
25 {
26 	struct addr_entry *pool;		/* address pool */
27 	struct addr_map *mapper;		/* address map  */
28 
29 	uint32_t addr_base;				/* in host order */
30 	int num_addr;					/* number of addresses in use */
31 
32 	int num_entry;
33 	int num_free;
34 	int num_used;
35 
36 	pthread_mutex_t lock;
37 	TAILQ_HEAD(, addr_entry) free_list;
38 	TAILQ_HEAD(, addr_entry) used_list;
39 };
40 /*----------------------------------------------------------------------------*/
41 addr_pool_t
42 CreateAddressPool(in_addr_t addr_base, int num_addr)
43 {
44 	struct addr_pool *ap;
45 	int num_entry;
46 	int i, j, cnt;
47 	in_addr_t addr;
48 	uint32_t addr_h;
49 
50 	ap = (addr_pool_t)calloc(1, sizeof(struct addr_pool));
51 	if (!ap)
52 		return NULL;
53 
54 	/* initialize address pool */
55 	num_entry = num_addr * (MAX_PORT - MIN_PORT);
56 	ap->pool = (struct addr_entry *)calloc(num_entry, sizeof(struct addr_entry));
57 	if (!ap->pool) {
58 		free(ap);
59 		return NULL;
60 	}
61 
62 	/* initialize address map */
63 	ap->mapper = (struct addr_map *)calloc(num_addr, sizeof(struct addr_map));
64 	if (!ap->mapper) {
65 		free(ap->pool);
66 		free(ap);
67 		return NULL;
68 	}
69 
70 	TAILQ_INIT(&ap->free_list);
71 	TAILQ_INIT(&ap->used_list);
72 
73 	if (pthread_mutex_init(&ap->lock, NULL)) {
74 		free(ap->pool);
75 		free(ap);
76 		return NULL;
77 	}
78 
79 	pthread_mutex_lock(&ap->lock);
80 
81 	ap->addr_base = ntohl(addr_base);
82 	ap->num_addr = num_addr;
83 
84 	cnt = 0;
85 	for (i = 0; i < num_addr; i++) {
86 		addr_h = ap->addr_base + i;
87 		addr = htonl(addr_h);
88 		for (j = MIN_PORT; j < MAX_PORT; j++) {
89 			ap->pool[cnt].addr.sin_addr.s_addr = addr;
90 			ap->pool[cnt].addr.sin_port = htons(j);
91 			ap->mapper[i].addrmap[j] = &ap->pool[cnt];
92 
93 			TAILQ_INSERT_TAIL(&ap->free_list, &ap->pool[cnt], addr_link);
94 
95 			if ((++cnt) >= num_entry)
96 				break;
97 		}
98 	}
99 	ap->num_entry = cnt;
100 	ap->num_free = cnt;
101 	ap->num_used = 0;
102 
103 	pthread_mutex_unlock(&ap->lock);
104 
105 	return ap;
106 }
107 /*----------------------------------------------------------------------------*/
108 addr_pool_t
109 CreateAddressPoolPerCore(int core, int num_queues,
110 		in_addr_t saddr_base, int num_addr, in_addr_t daddr, in_port_t dport)
111 {
112 	struct addr_pool *ap;
113 	int num_entry;
114 	int i, j, cnt;
115 	in_addr_t saddr;
116 	uint32_t saddr_h, daddr_h;
117 	uint16_t sport_h, dport_h;
118 	int rss_core;
119 
120 	ap = (addr_pool_t)calloc(1, sizeof(struct addr_pool));
121 	if (!ap)
122 		return NULL;
123 
124 	/* initialize address pool */
125 	num_entry = (num_addr * (MAX_PORT - MIN_PORT)) / num_queues;
126 	ap->pool = (struct addr_entry *)calloc(num_entry, sizeof(struct addr_entry));
127 	if (!ap->pool) {
128 		free(ap);
129 		return NULL;
130 	}
131 
132 	/* initialize address map */
133 	ap->mapper = (struct addr_map *)calloc(num_addr, sizeof(struct addr_map));
134 	if (!ap->mapper) {
135 		free(ap->pool);
136 		free(ap);
137 		return NULL;
138 	}
139 
140 	TAILQ_INIT(&ap->free_list);
141 	TAILQ_INIT(&ap->used_list);
142 
143 	if (pthread_mutex_init(&ap->lock, NULL)) {
144 		free(ap->pool);
145 		free(ap);
146 		return NULL;
147 	}
148 
149 	pthread_mutex_lock(&ap->lock);
150 
151 	ap->addr_base = ntohl(saddr_base);
152 	ap->num_addr = num_addr;
153 	daddr_h = ntohl(daddr);
154 	dport_h = ntohs(dport);
155 
156 	/* search address space to get RSS-friendly addresses */
157 	cnt = 0;
158 	for (i = 0; i < num_addr; i++) {
159 		saddr_h = ap->addr_base + i;
160 		saddr = htonl(saddr_h);
161 		for (j = MIN_PORT; j < MAX_PORT; j++) {
162 			if (cnt >= num_entry)
163 				break;
164 
165 			sport_h = j;
166 			rss_core = GetRSSCPUCore(daddr_h, saddr_h, dport_h, sport_h, num_queues);
167 			if (rss_core != core)
168 				continue;
169 
170 			ap->pool[cnt].addr.sin_addr.s_addr = saddr;
171 			ap->pool[cnt].addr.sin_port = htons(sport_h);
172 			ap->mapper[i].addrmap[j] = &ap->pool[cnt];
173 			TAILQ_INSERT_TAIL(&ap->free_list, &ap->pool[cnt], addr_link);
174 			cnt++;
175 		}
176 	}
177 
178 	ap->num_entry = cnt;
179 	ap->num_free = cnt;
180 	ap->num_used = 0;
181 	//fprintf(stderr, "CPU %d: Created %d address entries.\n", core, cnt);
182 	if (ap->num_entry < g_config.mos->max_concurrency) {
183 		fprintf(stderr, "[WARINING] Available # addresses (%d) is smaller than"
184 				" the max concurrency (%d).\n",
185 				ap->num_entry, g_config.mos->max_concurrency);
186 	}
187 
188 	pthread_mutex_unlock(&ap->lock);
189 
190 	return ap;
191 }
192 /*----------------------------------------------------------------------------*/
193 void
194 DestroyAddressPool(addr_pool_t ap)
195 {
196 	if (!ap)
197 		return;
198 
199 	if (ap->pool) {
200 		free(ap->pool);
201 		ap->pool = NULL;
202 	}
203 
204 	if (ap->mapper) {
205 		free(ap->mapper);
206 		ap->mapper = NULL;
207 	}
208 
209 	pthread_mutex_destroy(&ap->lock);
210 
211 	free(ap);
212 }
213 /*----------------------------------------------------------------------------*/
214 int
215 FetchAddress(addr_pool_t ap, int core, int num_queues,
216 		const struct sockaddr_in *daddr, struct sockaddr_in *saddr)
217 {
218 	struct addr_entry *walk, *next;
219 	int rss_core;
220 	int ret = -1;
221 
222 	if (!ap || !daddr || !saddr)
223 		return -1;
224 
225 	pthread_mutex_lock(&ap->lock);
226 
227 	walk = TAILQ_FIRST(&ap->free_list);
228 	while (walk) {
229 		next = TAILQ_NEXT(walk, addr_link);
230 
231 		if (saddr->sin_addr.s_addr != INADDR_ANY &&
232 		    walk->addr.sin_addr.s_addr != saddr->sin_addr.s_addr) {
233 			walk = next;
234 			continue;
235 		}
236 
237 		if (saddr->sin_port != INPORT_ANY &&
238 		    walk->addr.sin_port != saddr->sin_port) {
239 			walk = next;
240 			continue;
241 		}
242 
243 		rss_core = GetRSSCPUCore(ntohl(walk->addr.sin_addr.s_addr),
244 				ntohl(daddr->sin_addr.s_addr), ntohs(walk->addr.sin_port),
245 				ntohs(daddr->sin_port), num_queues);
246 
247 		if (core == rss_core)
248 			break;
249 
250 		walk = next;
251 	}
252 
253 	if (walk) {
254 		*saddr = walk->addr;
255 		TAILQ_REMOVE(&ap->free_list, walk, addr_link);
256 		TAILQ_INSERT_TAIL(&ap->used_list, walk, addr_link);
257 		ap->num_free--;
258 		ap->num_used++;
259 		ret = 0;
260 	}
261 
262 	pthread_mutex_unlock(&ap->lock);
263 
264 	return ret;
265 }
266 /*----------------------------------------------------------------------------*/
267 int
268 FreeAddress(addr_pool_t ap, const struct sockaddr_in *addr)
269 {
270 	struct addr_entry *walk, *next;
271 	int ret = -1;
272 
273 	if (!ap || !addr)
274 		return -1;
275 
276 	pthread_mutex_lock(&ap->lock);
277 
278 	if (ap->mapper) {
279 		uint32_t addr_h = ntohl(addr->sin_addr.s_addr);
280 		uint16_t port_h = ntohs(addr->sin_port);
281 		int index = addr_h - ap->addr_base;
282 
283 		if (index >= 0 || index < ap->num_addr) {
284 			walk = ap->mapper[addr_h - ap->addr_base].addrmap[port_h];
285 		} else {
286 			walk = NULL;
287 		}
288 
289 	} else {
290 		walk = TAILQ_FIRST(&ap->used_list);
291 		while (walk) {
292 			next = TAILQ_NEXT(walk, addr_link);
293 			if (addr->sin_port == walk->addr.sin_port &&
294 					addr->sin_addr.s_addr == walk->addr.sin_addr.s_addr) {
295 				break;
296 			}
297 
298 			walk = next;
299 		}
300 
301 	}
302 
303 	if (walk) {
304 		TAILQ_REMOVE(&ap->used_list, walk, addr_link);
305 		TAILQ_INSERT_TAIL(&ap->free_list, walk, addr_link);
306 		ap->num_free++;
307 		ap->num_used--;
308 		ret = 0;
309 	}
310 
311 	pthread_mutex_unlock(&ap->lock);
312 
313 	return ret;
314 }
315 /*----------------------------------------------------------------------------*/
316