xref: /iperf/src/iperf_server_api.c (revision 8ffe72e2)
1 /*
2  * iperf, Copyright (c) 2014-2021 The Regents of the University of
3  * California, through Lawrence Berkeley National Laboratory (subject
4  * to receipt of any required approvals from the U.S. Dept. of
5  * Energy).  All rights reserved.
6  *
7  * If you have questions about your rights to use or distribute this
8  * software, please contact Berkeley Lab's Technology Transfer
9  * Department at [email protected].
10  *
11  * NOTICE.  This software is owned by the U.S. Department of Energy.
12  * As such, the U.S. Government has been granted for itself and others
13  * acting on its behalf a paid-up, nonexclusive, irrevocable,
14  * worldwide license in the Software to reproduce, prepare derivative
15  * works, and perform publicly and display publicly.  Beginning five
16  * (5) years after the date permission to assert copyright is obtained
17  * from the U.S. Department of Energy, and subject to any subsequent
18  * five (5) year renewals, the U.S. Government is granted for itself
19  * and others acting on its behalf a paid-up, nonexclusive,
20  * irrevocable, worldwide license in the Software to reproduce,
21  * prepare derivative works, distribute copies to the public, perform
22  * publicly and display publicly, and to permit others to do so.
23  *
24  * This code is distributed under a BSD style license, see the LICENSE
25  * file for complete information.
26  */
27 /* iperf_server_api.c: Functions to be used by an iperf server
28 */
29 
30 #include <stdio.h>
31 #include <stdlib.h>
32 #include <string.h>
33 #include <getopt.h>
34 #include <errno.h>
35 #include <unistd.h>
36 #include <assert.h>
37 #include <fcntl.h>
38 #include <sys/socket.h>
39 #include <sys/types.h>
40 #include <netinet/in.h>
41 #include <arpa/inet.h>
42 #include <netdb.h>
43 #ifdef HAVE_STDINT_H
44 #include <stdint.h>
45 #endif
46 #include <sys/time.h>
47 #include <sys/resource.h>
48 #include <sched.h>
49 #include <setjmp.h>
50 
51 #include "iperf.h"
52 #include "iperf_api.h"
53 #include "iperf_udp.h"
54 #include "iperf_tcp.h"
55 #include "iperf_util.h"
56 #include "timer.h"
57 #include "iperf_time.h"
58 #include "net.h"
59 #include "units.h"
60 #include "iperf_util.h"
61 #include "iperf_locale.h"
62 
63 #if defined(HAVE_TCP_CONGESTION)
64 #if !defined(TCP_CA_NAME_MAX)
65 #define TCP_CA_NAME_MAX 16
66 #endif /* TCP_CA_NAME_MAX */
67 #endif /* HAVE_TCP_CONGESTION */
68 
69 int
70 iperf_server_listen(struct iperf_test *test)
71 {
72     retry:
73     if((test->listener = netannounce(test->settings->domain, Ptcp, test->bind_address, test->bind_dev, test->server_port)) < 0) {
74 	if (errno == EAFNOSUPPORT && (test->settings->domain == AF_INET6 || test->settings->domain == AF_UNSPEC)) {
75 	    /* If we get "Address family not supported by protocol", that
76 	    ** probably means we were compiled with IPv6 but the running
77 	    ** kernel does not actually do IPv6.  This is not too unusual,
78 	    ** v6 support is and perhaps always will be spotty.
79 	    */
80 	    warning("this system does not seem to support IPv6 - trying IPv4");
81 	    test->settings->domain = AF_INET;
82 	    goto retry;
83 	} else {
84 	    i_errno = IELISTEN;
85 	    return -1;
86 	}
87     }
88 
89     if (!test->json_output) {
90         if (test->server_last_run_rc != 2)
91             test->server_test_number +=1;
92         if (test->debug || test->server_last_run_rc != 2) {
93 	    iperf_printf(test, "-----------------------------------------------------------\n");
94 	    iperf_printf(test, "Server listening on %d (test #%d)\n", test->server_port, test->server_test_number);
95 	    iperf_printf(test, "-----------------------------------------------------------\n");
96 	    if (test->forceflush)
97 	        iflush(test);
98         }
99     }
100 
101     FD_ZERO(&test->read_set);
102     FD_ZERO(&test->write_set);
103     FD_SET(test->listener, &test->read_set);
104     if (test->listener > test->max_fd) test->max_fd = test->listener;
105 
106     return 0;
107 }
108 
109 int
110 iperf_accept(struct iperf_test *test)
111 {
112     int s;
113     signed char rbuf = ACCESS_DENIED;
114     socklen_t len;
115     struct sockaddr_storage addr;
116 
117     len = sizeof(addr);
118     if ((s = accept(test->listener, (struct sockaddr *) &addr, &len)) < 0) {
119         i_errno = IEACCEPT;
120         return -1;
121     }
122 
123     if (test->ctrl_sck == -1) {
124         /* Server free, accept new client */
125         test->ctrl_sck = s;
126         // set TCP_NODELAY for lower latency on control messages
127         int flag = 1;
128         if (setsockopt(test->ctrl_sck, IPPROTO_TCP, TCP_NODELAY, (char *) &flag, sizeof(int))) {
129             i_errno = IESETNODELAY;
130             return -1;
131         }
132 
133         if (Nread(test->ctrl_sck, test->cookie, COOKIE_SIZE, Ptcp) < 0) {
134             i_errno = IERECVCOOKIE;
135             return -1;
136         }
137 	FD_SET(test->ctrl_sck, &test->read_set);
138 	if (test->ctrl_sck > test->max_fd) test->max_fd = test->ctrl_sck;
139 
140 	if (iperf_set_send_state(test, PARAM_EXCHANGE) != 0)
141             return -1;
142         if (iperf_exchange_parameters(test) < 0)
143             return -1;
144 	if (test->server_affinity != -1)
145 	    if (iperf_setaffinity(test, test->server_affinity) != 0)
146 		return -1;
147         if (test->on_connect)
148             test->on_connect(test);
149     } else {
150 	/*
151 	 * Don't try to read from the socket.  It could block an ongoing test.
152 	 * Just send ACCESS_DENIED.
153 	 */
154         if (Nwrite(s, (char*) &rbuf, sizeof(rbuf), Ptcp) < 0) {
155             i_errno = IESENDMESSAGE;
156             return -1;
157         }
158         close(s);
159     }
160 
161     return 0;
162 }
163 
164 
165 /**************************************************************************/
166 int
167 iperf_handle_message_server(struct iperf_test *test)
168 {
169     int rval;
170     struct iperf_stream *sp;
171 
172     // XXX: Need to rethink how this behaves to fit API
173     if ((rval = Nread(test->ctrl_sck, (char*) &test->state, sizeof(signed char), Ptcp)) <= 0) {
174         if (rval == 0) {
175 	    iperf_err(test, "the client has unexpectedly closed the connection");
176             i_errno = IECTRLCLOSE;
177             test->state = IPERF_DONE;
178             return 0;
179         } else {
180             i_errno = IERECVMESSAGE;
181             return -1;
182         }
183     }
184 
185     switch(test->state) {
186         case TEST_START:
187             break;
188         case TEST_END:
189 	    test->done = 1;
190             cpu_util(test->cpu_util);
191             test->stats_callback(test);
192             SLIST_FOREACH(sp, &test->streams, streams) {
193                 FD_CLR(sp->socket, &test->read_set);
194                 FD_CLR(sp->socket, &test->write_set);
195                 close(sp->socket);
196             }
197             test->reporter_callback(test);
198 	    if (iperf_set_send_state(test, EXCHANGE_RESULTS) != 0)
199                 return -1;
200             if (iperf_exchange_results(test) < 0)
201                 return -1;
202 	    if (iperf_set_send_state(test, DISPLAY_RESULTS) != 0)
203                 return -1;
204             if (test->on_test_finish)
205                 test->on_test_finish(test);
206             break;
207         case IPERF_DONE:
208             break;
209         case CLIENT_TERMINATE:
210             i_errno = IECLIENTTERM;
211 
212 	    // Temporarily be in DISPLAY_RESULTS phase so we can get
213 	    // ending summary statistics.
214 	    signed char oldstate = test->state;
215 	    cpu_util(test->cpu_util);
216 	    test->state = DISPLAY_RESULTS;
217 	    test->reporter_callback(test);
218 	    test->state = oldstate;
219 
220             // XXX: Remove this line below!
221 	    iperf_err(test, "the client has terminated");
222             SLIST_FOREACH(sp, &test->streams, streams) {
223                 FD_CLR(sp->socket, &test->read_set);
224                 FD_CLR(sp->socket, &test->write_set);
225                 close(sp->socket);
226             }
227             test->state = IPERF_DONE;
228             break;
229         default:
230             i_errno = IEMESSAGE;
231             return -1;
232     }
233 
234     return 0;
235 }
236 
237 static void
238 server_timer_proc(TimerClientData client_data, struct iperf_time *nowP)
239 {
240     struct iperf_test *test = client_data.p;
241     struct iperf_stream *sp;
242 
243     test->timer = NULL;
244     if (test->done)
245         return;
246     test->done = 1;
247     /* Free streams */
248     while (!SLIST_EMPTY(&test->streams)) {
249         sp = SLIST_FIRST(&test->streams);
250         SLIST_REMOVE_HEAD(&test->streams, streams);
251         close(sp->socket);
252         iperf_free_stream(sp);
253     }
254     close(test->ctrl_sck);
255 }
256 
257 static void
258 server_stats_timer_proc(TimerClientData client_data, struct iperf_time *nowP)
259 {
260     struct iperf_test *test = client_data.p;
261 
262     if (test->done)
263         return;
264     if (test->stats_callback)
265 	test->stats_callback(test);
266 }
267 
268 static void
269 server_reporter_timer_proc(TimerClientData client_data, struct iperf_time *nowP)
270 {
271     struct iperf_test *test = client_data.p;
272 
273     if (test->done)
274         return;
275     if (test->reporter_callback)
276 	test->reporter_callback(test);
277 }
278 
279 static int
280 create_server_timers(struct iperf_test * test)
281 {
282     struct iperf_time now;
283     TimerClientData cd;
284     int max_rtt = 4; /* seconds */
285     int state_transitions = 10; /* number of state transitions in iperf3 */
286     int grace_period = max_rtt * state_transitions;
287 
288     if (iperf_time_now(&now) < 0) {
289 	i_errno = IEINITTEST;
290 	return -1;
291     }
292     cd.p = test;
293     test->timer = test->stats_timer = test->reporter_timer = NULL;
294     if (test->duration != 0 ) {
295         test->done = 0;
296         test->timer = tmr_create(&now, server_timer_proc, cd, (test->duration + test->omit + grace_period) * SEC_TO_US, 0);
297         if (test->timer == NULL) {
298             i_errno = IEINITTEST;
299             return -1;
300         }
301     }
302 
303     test->stats_timer = test->reporter_timer = NULL;
304     if (test->stats_interval != 0) {
305         test->stats_timer = tmr_create(&now, server_stats_timer_proc, cd, test->stats_interval * SEC_TO_US, 1);
306         if (test->stats_timer == NULL) {
307             i_errno = IEINITTEST;
308             return -1;
309 	}
310     }
311     if (test->reporter_interval != 0) {
312         test->reporter_timer = tmr_create(&now, server_reporter_timer_proc, cd, test->reporter_interval * SEC_TO_US, 1);
313         if (test->reporter_timer == NULL) {
314             i_errno = IEINITTEST;
315             return -1;
316 	}
317     }
318     return 0;
319 }
320 
321 static void
322 server_omit_timer_proc(TimerClientData client_data, struct iperf_time *nowP)
323 {
324     struct iperf_test *test = client_data.p;
325 
326     test->omit_timer = NULL;
327     test->omitting = 0;
328     iperf_reset_stats(test);
329     if (test->verbose && !test->json_output && test->reporter_interval == 0)
330 	iperf_printf(test, "%s", report_omit_done);
331 
332     /* Reset the timers. */
333     if (test->stats_timer != NULL)
334 	tmr_reset(nowP, test->stats_timer);
335     if (test->reporter_timer != NULL)
336 	tmr_reset(nowP, test->reporter_timer);
337 }
338 
339 static int
340 create_server_omit_timer(struct iperf_test * test)
341 {
342     struct iperf_time now;
343     TimerClientData cd;
344 
345     if (test->omit == 0) {
346 	test->omit_timer = NULL;
347 	test->omitting = 0;
348     } else {
349 	if (iperf_time_now(&now) < 0) {
350 	    i_errno = IEINITTEST;
351 	    return -1;
352 	}
353 	test->omitting = 1;
354 	cd.p = test;
355 	test->omit_timer = tmr_create(&now, server_omit_timer_proc, cd, test->omit * SEC_TO_US, 0);
356 	if (test->omit_timer == NULL) {
357 	    i_errno = IEINITTEST;
358 	    return -1;
359 	}
360     }
361 
362     return 0;
363 }
364 
365 static void
366 cleanup_server(struct iperf_test *test)
367 {
368     struct iperf_stream *sp;
369 
370     /* Close open streams */
371     SLIST_FOREACH(sp, &test->streams, streams) {
372 	FD_CLR(sp->socket, &test->read_set);
373 	FD_CLR(sp->socket, &test->write_set);
374 	close(sp->socket);
375     }
376 
377     /* Close open test sockets */
378     if (test->ctrl_sck) {
379 	close(test->ctrl_sck);
380     }
381     if (test->listener) {
382 	close(test->listener);
383     }
384     if (test->prot_listener > -1) {     // May remain open if create socket failed
385 	close(test->prot_listener);
386     }
387 
388     /* Cancel any remaining timers. */
389     if (test->stats_timer != NULL) {
390 	tmr_cancel(test->stats_timer);
391 	test->stats_timer = NULL;
392     }
393     if (test->reporter_timer != NULL) {
394 	tmr_cancel(test->reporter_timer);
395 	test->reporter_timer = NULL;
396     }
397     if (test->omit_timer != NULL) {
398 	tmr_cancel(test->omit_timer);
399 	test->omit_timer = NULL;
400     }
401     if (test->congestion_used != NULL) {
402         free(test->congestion_used);
403 	test->congestion_used = NULL;
404     }
405     if (test->timer != NULL) {
406         tmr_cancel(test->timer);
407         test->timer = NULL;
408     }
409 }
410 
411 
412 int
413 iperf_run_server(struct iperf_test *test)
414 {
415     int result, s;
416     int send_streams_accepted, rec_streams_accepted;
417     int streams_to_send = 0, streams_to_rec = 0;
418 #if defined(HAVE_TCP_CONGESTION)
419     int saved_errno;
420 #endif /* HAVE_TCP_CONGESTION */
421     fd_set read_set, write_set;
422     struct iperf_stream *sp;
423     struct iperf_time now;
424     struct iperf_time last_receive_time;
425     struct iperf_time diff_time;
426     struct timeval* timeout;
427     struct timeval used_timeout;
428     int flag;
429     int64_t t_usecs;
430     int64_t timeout_us;
431     int64_t rcv_timeout_us;
432 
433     if (test->logfile)
434         if (iperf_open_logfile(test) < 0)
435             return -1;
436 
437     if (test->affinity != -1)
438 	if (iperf_setaffinity(test, test->affinity) != 0)
439 	    return -2;
440 
441     if (test->json_output)
442 	if (iperf_json_start(test) < 0)
443 	    return -2;
444 
445     if (test->json_output) {
446 	cJSON_AddItemToObject(test->json_start, "version", cJSON_CreateString(version));
447 	cJSON_AddItemToObject(test->json_start, "system_info", cJSON_CreateString(get_system_info()));
448     } else if (test->verbose) {
449 	iperf_printf(test, "%s\n", version);
450 	iperf_printf(test, "%s", "");
451 	iperf_printf(test, "%s\n", get_system_info());
452 	iflush(test);
453     }
454 
455     // Open socket and listen
456     if (iperf_server_listen(test) < 0) {
457         return -2;
458     }
459 
460     iperf_time_now(&last_receive_time); // Initialize last time something was received
461 
462     test->state = IPERF_START;
463     send_streams_accepted = 0;
464     rec_streams_accepted = 0;
465     rcv_timeout_us = (test->settings->rcv_timeout.secs * SEC_TO_US) + test->settings->rcv_timeout.usecs;
466 
467     while (test->state != IPERF_DONE) {
468 
469         // Check if average transfer rate was exceeded (condition set in the callback routines)
470 	if (test->bitrate_limit_exceeded) {
471 	    cleanup_server(test);
472             i_errno = IETOTALRATE;
473             return -1;
474 	}
475 
476         memcpy(&read_set, &test->read_set, sizeof(fd_set));
477         memcpy(&write_set, &test->write_set, sizeof(fd_set));
478 
479 	iperf_time_now(&now);
480 	timeout = tmr_timeout(&now);
481 
482         // Ensure select() will timeout to allow handling error cases that require server restart
483         if (test->state == IPERF_START) {       // In idle mode server may need to restart
484             if (timeout == NULL && test->settings->idle_timeout > 0) {
485                 used_timeout.tv_sec = test->settings->idle_timeout;
486                 used_timeout.tv_usec = 0;
487                 timeout = &used_timeout;
488             }
489         } else if (test->mode != SENDER) {     // In non-reverse active mode server ensures data is received
490             timeout_us = -1;
491             if (timeout != NULL) {
492                 used_timeout.tv_sec = timeout->tv_sec;
493                 used_timeout.tv_usec = timeout->tv_usec;
494                 timeout_us = (timeout->tv_sec * SEC_TO_US) + timeout->tv_usec;
495             }
496             if (timeout_us < 0 || timeout_us > rcv_timeout_us) {
497                 used_timeout.tv_sec = test->settings->rcv_timeout.secs;
498                 used_timeout.tv_usec = test->settings->rcv_timeout.usecs;
499             }
500             timeout = &used_timeout;
501         }
502 
503         result = select(test->max_fd + 1, &read_set, &write_set, NULL, timeout);
504         if (result < 0 && errno != EINTR) {
505             cleanup_server(test);
506             i_errno = IESELECT;
507             return -1;
508         } else if (result == 0) {
509             // If nothing was received during the specified time (per state)
510             // then probably something got stack either at the client, server or network,
511             // and Test should be forced to end.
512             iperf_time_now(&now);
513             t_usecs = 0;
514             if (iperf_time_diff(&now, &last_receive_time, &diff_time) == 0) {
515                 t_usecs = iperf_time_in_usecs(&diff_time);
516                 if (test->state == IPERF_START) {
517                     if (test->settings->idle_timeout > 0 && t_usecs >= test->settings->idle_timeout * SEC_TO_US) {
518                         test->server_forced_idle_restarts_count += 1;
519                         if (test->debug)
520                             printf("Server restart (#%d) in idle state as no connection request was received for %d sec\n",
521                                 test->server_forced_idle_restarts_count, test->settings->idle_timeout);
522                         cleanup_server(test);
523                         return 2;
524                     }
525                 }
526                 else if (test->mode != SENDER && t_usecs > rcv_timeout_us) {
527                     test->server_forced_no_msg_restarts_count += 1;
528                     i_errno = IENOMSG;
529                     if (iperf_get_verbose(test))
530                         iperf_err(test, "Server restart (#%d) in active test as message receive timed-out",
531                                   test->server_forced_no_msg_restarts_count);
532                     cleanup_server(test);
533                     return -1;
534                 }
535 
536             }
537         }
538 
539 	if (result > 0) {
540             iperf_time_now(&last_receive_time);
541             if (FD_ISSET(test->listener, &read_set)) {
542                 if (test->state != CREATE_STREAMS) {
543                     if (iperf_accept(test) < 0) {
544 			cleanup_server(test);
545                         return -1;
546                     }
547                     FD_CLR(test->listener, &read_set);
548 
549                     // Set streams number
550                     if (test->mode == BIDIRECTIONAL) {
551                         streams_to_send = test->num_streams;
552                         streams_to_rec = test->num_streams;
553                     } else if (test->mode == RECEIVER) {
554                         streams_to_rec = test->num_streams;
555                         streams_to_send = 0;
556                     } else {
557                         streams_to_send = test->num_streams;
558                         streams_to_rec = 0;
559                     }
560                 }
561             }
562             if (FD_ISSET(test->ctrl_sck, &read_set)) {
563                 if (iperf_handle_message_server(test) < 0) {
564 		    cleanup_server(test);
565                     return -1;
566 		}
567                 FD_CLR(test->ctrl_sck, &read_set);
568             }
569 
570             if (test->state == CREATE_STREAMS) {
571                 if (FD_ISSET(test->prot_listener, &read_set)) {
572 
573                     if ((s = test->protocol->accept(test)) < 0) {
574 			cleanup_server(test);
575                         return -1;
576 		    }
577 
578 #if defined(HAVE_TCP_CONGESTION)
579 		    if (test->protocol->id == Ptcp) {
580 			if (test->congestion) {
581 			    if (setsockopt(s, IPPROTO_TCP, TCP_CONGESTION, test->congestion, strlen(test->congestion)) < 0) {
582 				/*
583 				 * ENOENT means we tried to set the
584 				 * congestion algorithm but the algorithm
585 				 * specified doesn't exist.  This can happen
586 				 * if the client and server have different
587 				 * congestion algorithms available.  In this
588 				 * case, print a warning, but otherwise
589 				 * continue.
590 				 */
591 				if (errno == ENOENT) {
592 				    warning("TCP congestion control algorithm not supported");
593 				}
594 				else {
595 				    saved_errno = errno;
596 				    close(s);
597 				    cleanup_server(test);
598 				    errno = saved_errno;
599 				    i_errno = IESETCONGESTION;
600 				    return -1;
601 				}
602 			    }
603 			}
604 			{
605 			    socklen_t len = TCP_CA_NAME_MAX;
606 			    char ca[TCP_CA_NAME_MAX + 1];
607 			    if (getsockopt(s, IPPROTO_TCP, TCP_CONGESTION, ca, &len) < 0) {
608 				saved_errno = errno;
609 				close(s);
610 				cleanup_server(test);
611 				errno = saved_errno;
612 				i_errno = IESETCONGESTION;
613 				return -1;
614 			    }
615                             /*
616                              * If not the first connection, discard prior
617                              * congestion algorithm name so we don't leak
618                              * duplicated strings.  We probably don't need
619                              * the old string anyway.
620                              */
621                             if (test->congestion_used != NULL) {
622                                 free(test->congestion_used);
623                             }
624                             test->congestion_used = strdup(ca);
625 			    if (test->debug) {
626 				printf("Congestion algorithm is %s\n", test->congestion_used);
627 			    }
628 			}
629 		    }
630 #endif /* HAVE_TCP_CONGESTION */
631 
632                     if (!is_closed(s)) {
633 
634                         if (rec_streams_accepted != streams_to_rec) {
635                             flag = 0;
636                             ++rec_streams_accepted;
637                         } else if (send_streams_accepted != streams_to_send) {
638                             flag = 1;
639                             ++send_streams_accepted;
640                         }
641 
642                         if (flag != -1) {
643                             sp = iperf_new_stream(test, s, flag);
644                             if (!sp) {
645                                 cleanup_server(test);
646                                 return -1;
647                             }
648 
649                             if (sp->sender)
650                                 FD_SET(s, &test->write_set);
651                             else
652                                 FD_SET(s, &test->read_set);
653 
654                             if (s > test->max_fd) test->max_fd = s;
655 
656                             /*
657                              * If the protocol isn't UDP, or even if it is but
658                              * we're the receiver, set nonblocking sockets.
659                              * We need this to allow a server receiver to
660                              * maintain interactivity with the control channel.
661                              */
662                             if (test->protocol->id != Pudp ||
663                                 !sp->sender) {
664                                 setnonblocking(s, 1);
665                             }
666 
667                             if (test->on_new_stream)
668                                 test->on_new_stream(sp);
669 
670                             flag = -1;
671                         }
672                     }
673                     FD_CLR(test->prot_listener, &read_set);
674                 }
675 
676 
677                 if (rec_streams_accepted == streams_to_rec && send_streams_accepted == streams_to_send) {
678                     if (test->protocol->id != Ptcp) {
679                         FD_CLR(test->prot_listener, &test->read_set);
680                         close(test->prot_listener);
681                     } else {
682                         if (test->no_delay || test->settings->mss || test->settings->socket_bufsize) {
683                             FD_CLR(test->listener, &test->read_set);
684                             close(test->listener);
685 			    test->listener = 0;
686                             if ((s = netannounce(test->settings->domain, Ptcp, test->bind_address, test->bind_dev, test->server_port)) < 0) {
687 				cleanup_server(test);
688                                 i_errno = IELISTEN;
689                                 return -1;
690                             }
691                             test->listener = s;
692                             FD_SET(test->listener, &test->read_set);
693 			    if (test->listener > test->max_fd) test->max_fd = test->listener;
694                         }
695                     }
696                     test->prot_listener = -1;
697 
698 		    /* Ensure that total requested data rate is not above limit */
699 		    iperf_size_t total_requested_rate = test->num_streams * test->settings->rate * (test->mode == BIDIRECTIONAL? 2 : 1);
700 		    if (test->settings->bitrate_limit > 0 && total_requested_rate > test->settings->bitrate_limit) {
701                         if (iperf_get_verbose(test))
702                             iperf_err(test, "Client total requested throughput rate of %" PRIu64 " bps exceeded %" PRIu64 " bps limit",
703                                       total_requested_rate, test->settings->bitrate_limit);
704 			cleanup_server(test);
705 			i_errno = IETOTALRATE;
706 			return -1;
707 		    }
708 
709 		    // Begin calculating CPU utilization
710 		    cpu_util(NULL);
711 
712 		    if (iperf_set_send_state(test, TEST_START) != 0) {
713 			cleanup_server(test);
714                         return -1;
715 		    }
716                     if (iperf_init_test(test) < 0) {
717 			cleanup_server(test);
718                         return -1;
719 		    }
720 		    if (create_server_timers(test) < 0) {
721 			cleanup_server(test);
722                         return -1;
723 		    }
724 		    if (create_server_omit_timer(test) < 0) {
725 			cleanup_server(test);
726                         return -1;
727 		    }
728 		    if (test->mode != RECEIVER)
729 			if (iperf_create_send_timers(test) < 0) {
730 			    cleanup_server(test);
731 			    return -1;
732 			}
733 		    if (iperf_set_send_state(test, TEST_RUNNING) != 0) {
734 			cleanup_server(test);
735                         return -1;
736 		    }
737                 }
738             }
739 
740             if (test->state == TEST_RUNNING) {
741                 if (test->mode == BIDIRECTIONAL) {
742                     if (iperf_recv(test, &read_set) < 0) {
743                         cleanup_server(test);
744                         return -1;
745                     }
746                     if (iperf_send(test, &write_set) < 0) {
747                         cleanup_server(test);
748                         return -1;
749                     }
750                 } else if (test->mode == SENDER) {
751                     // Reverse mode. Server sends.
752                     if (iperf_send(test, &write_set) < 0) {
753 			cleanup_server(test);
754                         return -1;
755 		    }
756                 } else {
757                     // Regular mode. Server receives.
758                     if (iperf_recv(test, &read_set) < 0) {
759 			cleanup_server(test);
760                         return -1;
761 		    }
762                 }
763 	    }
764         }
765 
766 	if (result == 0 ||
767 	    (timeout != NULL && timeout->tv_sec == 0 && timeout->tv_usec == 0)) {
768 	    /* Run the timers. */
769 	    iperf_time_now(&now);
770 	    tmr_run(&now);
771 	}
772     }
773 
774     cleanup_server(test);
775 
776     if (test->json_output) {
777 	if (iperf_json_finish(test) < 0)
778 	    return -1;
779     }
780 
781     iflush(test);
782 
783     if (test->server_affinity != -1)
784 	if (iperf_clearaffinity(test) != 0)
785 	    return -1;
786 
787     return 0;
788 }
789