1 /*
2 * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
3 * Copyright (c) 2006-2009 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36
37 #include <linux/module.h>
38 #include <linux/moduleparam.h>
39 #include <linux/slab.h>
40 #include <linux/err.h>
41 #include <linux/string.h>
42 #include <linux/list.h>
43 #include <linux/in.h>
44 #include <linux/device.h>
45 #include <linux/pci.h>
46 #include <linux/sched.h>
47 #include <linux/wait.h>
48
49 #include <asm/atomic.h>
50
51 #include <rdma/ib_verbs.h>
52 #include <rdma/rdma_cm.h>
53
54 #include "krping.h"
55 #include "getopt.h"
56
57 #define PFX "krping: "
58
59 extern int krping_debug;
60 #define DEBUG_LOG(...) do { if (krping_debug) log(LOG_INFO, __VA_ARGS__); } while (0)
61 #define BIND_INFO 1
62
63 MODULE_AUTHOR("Steve Wise");
64 MODULE_DESCRIPTION("RDMA ping server");
65 MODULE_LICENSE("Dual BSD/GPL");
66 MODULE_VERSION(krping, 1);
67 MODULE_DEPEND(krping, linuxkpi, 1, 1, 1);
68
69 static __inline uint64_t
get_cycles(void)70 get_cycles(void)
71 {
72 uint32_t low, high;
73 __asm __volatile("rdtsc" : "=a" (low), "=d" (high));
74 return (low | ((u_int64_t)high << 32));
75 }
76
77 typedef uint64_t cycles_t;
78
79 enum mem_type {
80 DMA = 1,
81 REG = 2,
82 };
83
84 static const struct krping_option krping_opts[] = {
85 {"count", OPT_INT, 'C'},
86 {"size", OPT_INT, 'S'},
87 {"addr", OPT_STRING, 'a'},
88 {"addr6", OPT_STRING, 'A'},
89 {"port", OPT_INT, 'p'},
90 {"verbose", OPT_NOPARAM, 'v'},
91 {"validate", OPT_NOPARAM, 'V'},
92 {"server", OPT_NOPARAM, 's'},
93 {"client", OPT_NOPARAM, 'c'},
94 {"server_inv", OPT_NOPARAM, 'I'},
95 {"wlat", OPT_NOPARAM, 'l'},
96 {"rlat", OPT_NOPARAM, 'L'},
97 {"bw", OPT_NOPARAM, 'B'},
98 {"duplex", OPT_NOPARAM, 'd'},
99 {"tos", OPT_INT, 't'},
100 {"txdepth", OPT_INT, 'T'},
101 {"poll", OPT_NOPARAM, 'P'},
102 {"local_dma_lkey", OPT_NOPARAM, 'Z'},
103 {"read_inv", OPT_NOPARAM, 'R'},
104 {"fr", OPT_NOPARAM, 'f'},
105 {NULL, 0, 0}
106 };
107
108 #define htonll(x) cpu_to_be64((x))
109 #define ntohll(x) cpu_to_be64((x))
110
111 static DEFINE_MUTEX(krping_mutex);
112
113 /*
114 * List of running krping threads.
115 */
116 static LIST_HEAD(krping_cbs);
117
118 /*
119 * Invoke like this, one on each side, using the server's address on
120 * the RDMA device (iw%d):
121 *
122 * /bin/echo server,port=9999,addr=192.168.69.142,validate > /proc/krping
123 * /bin/echo client,port=9999,addr=192.168.69.142,validate > /proc/krping
124 * /bin/echo client,port=9999,addr6=2001:db8:0:f101::1,validate > /proc/krping
125 *
126 * krping "ping/pong" loop:
127 * client sends source rkey/addr/len
128 * server receives source rkey/add/len
129 * server rdma reads "ping" data from source
130 * server sends "go ahead" on rdma read completion
131 * client sends sink rkey/addr/len
132 * server receives sink rkey/addr/len
133 * server rdma writes "pong" data to sink
134 * server sends "go ahead" on rdma write completion
135 * <repeat loop>
136 */
137
138 /*
139 * These states are used to signal events between the completion handler
140 * and the main client or server thread.
141 *
142 * Once CONNECTED, they cycle through RDMA_READ_ADV, RDMA_WRITE_ADV,
143 * and RDMA_WRITE_COMPLETE for each ping.
144 */
145 enum test_state {
146 IDLE = 1,
147 CONNECT_REQUEST,
148 ADDR_RESOLVED,
149 ROUTE_RESOLVED,
150 CONNECTED,
151 RDMA_READ_ADV,
152 RDMA_READ_COMPLETE,
153 RDMA_WRITE_ADV,
154 RDMA_WRITE_COMPLETE,
155 ERROR
156 };
157
158 struct krping_rdma_info {
159 uint64_t buf;
160 uint32_t rkey;
161 uint32_t size;
162 };
163
164 /*
165 * Default max buffer size for IO...
166 */
167 #define RPING_BUFSIZE 128*1024
168 #define RPING_SQ_DEPTH 64
169
170 /*
171 * Control block struct.
172 */
173 struct krping_cb {
174 int server; /* 0 iff client */
175 struct ib_cq *cq;
176 struct ib_pd *pd;
177 struct ib_qp *qp;
178
179 struct ib_mr *dma_mr;
180
181 struct ib_fast_reg_page_list *page_list;
182 int page_list_len;
183 struct ib_reg_wr reg_mr_wr;
184 struct ib_send_wr invalidate_wr;
185 struct ib_mr *reg_mr;
186 int server_invalidate;
187 int read_inv;
188 u8 key;
189
190 struct ib_recv_wr rq_wr; /* recv work request record */
191 struct ib_sge recv_sgl; /* recv single SGE */
192 struct krping_rdma_info recv_buf __aligned(16); /* malloc'd buffer */
193 u64 recv_dma_addr;
194 DECLARE_PCI_UNMAP_ADDR(recv_mapping)
195
196 struct ib_send_wr sq_wr; /* send work requrest record */
197 struct ib_sge send_sgl;
198 struct krping_rdma_info send_buf __aligned(16); /* single send buf */
199 u64 send_dma_addr;
200 DECLARE_PCI_UNMAP_ADDR(send_mapping)
201
202 struct ib_rdma_wr rdma_sq_wr; /* rdma work request record */
203 struct ib_sge rdma_sgl; /* rdma single SGE */
204 char *rdma_buf; /* used as rdma sink */
205 u64 rdma_dma_addr;
206 DECLARE_PCI_UNMAP_ADDR(rdma_mapping)
207 struct ib_mr *rdma_mr;
208
209 uint32_t remote_rkey; /* remote guys RKEY */
210 uint64_t remote_addr; /* remote guys TO */
211 uint32_t remote_len; /* remote guys LEN */
212
213 char *start_buf; /* rdma read src */
214 u64 start_dma_addr;
215 DECLARE_PCI_UNMAP_ADDR(start_mapping)
216 struct ib_mr *start_mr;
217
218 enum test_state state; /* used for cond/signalling */
219 wait_queue_head_t sem;
220 struct krping_stats stats;
221
222 uint16_t port; /* dst port in NBO */
223 u8 addr[16] __aligned(8); /* dst addr in NBO */
224 char *addr_str; /* dst addr string */
225 uint8_t addr_type; /* ADDR_FAMILY - IPv4/V6 */
226 int verbose; /* verbose logging */
227 int count; /* ping count */
228 int size; /* ping data size */
229 int validate; /* validate ping data */
230 int wlat; /* run wlat test */
231 int rlat; /* run rlat test */
232 int bw; /* run bw test */
233 int duplex; /* run bw full duplex test */
234 int poll; /* poll or block for rlat test */
235 int txdepth; /* SQ depth */
236 int local_dma_lkey; /* use 0 for lkey */
237 int frtest; /* reg test */
238 int tos; /* type of service */
239
240 /* CM stuff */
241 struct rdma_cm_id *cm_id; /* connection on client side,*/
242 /* listener on server side. */
243 struct rdma_cm_id *child_cm_id; /* connection on server side */
244 struct list_head list;
245 };
246
krping_cma_event_handler(struct rdma_cm_id * cma_id,struct rdma_cm_event * event)247 static int krping_cma_event_handler(struct rdma_cm_id *cma_id,
248 struct rdma_cm_event *event)
249 {
250 int ret;
251 struct krping_cb *cb = cma_id->context;
252
253 DEBUG_LOG("cma_event type %d cma_id %p (%s)\n", event->event, cma_id,
254 (cma_id == cb->cm_id) ? "parent" : "child");
255
256 switch (event->event) {
257 case RDMA_CM_EVENT_ADDR_RESOLVED:
258 cb->state = ADDR_RESOLVED;
259 ret = rdma_resolve_route(cma_id, 2000);
260 if (ret) {
261 printk(KERN_ERR PFX "rdma_resolve_route error %d\n",
262 ret);
263 wake_up_interruptible(&cb->sem);
264 }
265 break;
266
267 case RDMA_CM_EVENT_ROUTE_RESOLVED:
268 cb->state = ROUTE_RESOLVED;
269 wake_up_interruptible(&cb->sem);
270 break;
271
272 case RDMA_CM_EVENT_CONNECT_REQUEST:
273 cb->state = CONNECT_REQUEST;
274 cb->child_cm_id = cma_id;
275 DEBUG_LOG("child cma %p\n", cb->child_cm_id);
276 wake_up_interruptible(&cb->sem);
277 break;
278
279 case RDMA_CM_EVENT_ESTABLISHED:
280 DEBUG_LOG("ESTABLISHED\n");
281 if (!cb->server) {
282 cb->state = CONNECTED;
283 }
284 wake_up_interruptible(&cb->sem);
285 break;
286
287 case RDMA_CM_EVENT_ADDR_ERROR:
288 case RDMA_CM_EVENT_ROUTE_ERROR:
289 case RDMA_CM_EVENT_CONNECT_ERROR:
290 case RDMA_CM_EVENT_UNREACHABLE:
291 case RDMA_CM_EVENT_REJECTED:
292 printk(KERN_ERR PFX "cma event %d, error %d\n", event->event,
293 event->status);
294 cb->state = ERROR;
295 wake_up_interruptible(&cb->sem);
296 break;
297
298 case RDMA_CM_EVENT_DISCONNECTED:
299 printk(KERN_ERR PFX "DISCONNECT EVENT...\n");
300 cb->state = ERROR;
301 wake_up_interruptible(&cb->sem);
302 break;
303
304 case RDMA_CM_EVENT_DEVICE_REMOVAL:
305 printk(KERN_ERR PFX "cma detected device removal!!!!\n");
306 cb->state = ERROR;
307 wake_up_interruptible(&cb->sem);
308 break;
309
310 default:
311 printk(KERN_ERR PFX "oof bad type!\n");
312 wake_up_interruptible(&cb->sem);
313 break;
314 }
315 return 0;
316 }
317
server_recv(struct krping_cb * cb,struct ib_wc * wc)318 static int server_recv(struct krping_cb *cb, struct ib_wc *wc)
319 {
320 if (wc->byte_len != sizeof(cb->recv_buf)) {
321 printk(KERN_ERR PFX "Received bogus data, size %d\n",
322 wc->byte_len);
323 return -1;
324 }
325
326 cb->remote_rkey = ntohl(cb->recv_buf.rkey);
327 cb->remote_addr = ntohll(cb->recv_buf.buf);
328 cb->remote_len = ntohl(cb->recv_buf.size);
329 DEBUG_LOG("Received rkey %x addr %llx len %d from peer\n",
330 cb->remote_rkey, (unsigned long long)cb->remote_addr,
331 cb->remote_len);
332
333 if (cb->state <= CONNECTED || cb->state == RDMA_WRITE_COMPLETE)
334 cb->state = RDMA_READ_ADV;
335 else
336 cb->state = RDMA_WRITE_ADV;
337
338 return 0;
339 }
340
client_recv(struct krping_cb * cb,struct ib_wc * wc)341 static int client_recv(struct krping_cb *cb, struct ib_wc *wc)
342 {
343 if (wc->byte_len != sizeof(cb->recv_buf)) {
344 printk(KERN_ERR PFX "Received bogus data, size %d\n",
345 wc->byte_len);
346 return -1;
347 }
348
349 if (cb->state == RDMA_READ_ADV)
350 cb->state = RDMA_WRITE_ADV;
351 else
352 cb->state = RDMA_WRITE_COMPLETE;
353
354 return 0;
355 }
356
krping_cq_event_handler(struct ib_cq * cq,void * ctx)357 static void krping_cq_event_handler(struct ib_cq *cq, void *ctx)
358 {
359 struct krping_cb *cb = ctx;
360 struct ib_wc wc;
361 struct ib_recv_wr *bad_wr;
362 int ret;
363
364 BUG_ON(cb->cq != cq);
365 if (cb->frtest) {
366 printk(KERN_ERR PFX "cq completion event in frtest!\n");
367 return;
368 }
369 if (!cb->wlat && !cb->rlat && !cb->bw)
370 ib_req_notify_cq(cb->cq, IB_CQ_NEXT_COMP);
371 while ((ret = ib_poll_cq(cb->cq, 1, &wc)) == 1) {
372 if (wc.status) {
373 if (wc.status == IB_WC_WR_FLUSH_ERR) {
374 DEBUG_LOG("cq flushed\n");
375 continue;
376 } else {
377 printk(KERN_ERR PFX "cq completion failed with "
378 "wr_id %jx status %d opcode %d vender_err %x\n",
379 (uintmax_t)wc.wr_id, wc.status, wc.opcode, wc.vendor_err);
380 goto error;
381 }
382 }
383 if (cb->state == ERROR) {
384 printk(KERN_ERR PFX "cq completion in ERROR state\n");
385 return;
386 }
387 switch (wc.opcode) {
388 case IB_WC_SEND:
389 DEBUG_LOG("send completion\n");
390 cb->stats.send_bytes += cb->send_sgl.length;
391 cb->stats.send_msgs++;
392 break;
393
394 case IB_WC_RDMA_WRITE:
395 DEBUG_LOG("rdma write completion\n");
396 cb->stats.write_bytes += cb->rdma_sq_wr.wr.sg_list->length;
397 cb->stats.write_msgs++;
398 cb->state = RDMA_WRITE_COMPLETE;
399 wake_up_interruptible(&cb->sem);
400 break;
401
402 case IB_WC_RDMA_READ:
403 DEBUG_LOG("rdma read completion\n");
404 cb->stats.read_bytes += cb->rdma_sq_wr.wr.sg_list->length;
405 cb->stats.read_msgs++;
406 cb->state = RDMA_READ_COMPLETE;
407 wake_up_interruptible(&cb->sem);
408 break;
409
410 case IB_WC_RECV:
411 DEBUG_LOG("recv completion\n");
412 cb->stats.recv_bytes += sizeof(cb->recv_buf);
413 cb->stats.recv_msgs++;
414 if (cb->wlat || cb->rlat || cb->bw)
415 ret = server_recv(cb, &wc);
416 else
417 ret = cb->server ? server_recv(cb, &wc) :
418 client_recv(cb, &wc);
419 if (ret) {
420 printk(KERN_ERR PFX "recv wc error: %d\n", ret);
421 goto error;
422 }
423
424 ret = ib_post_recv(cb->qp, &cb->rq_wr, &bad_wr);
425 if (ret) {
426 printk(KERN_ERR PFX "post recv error: %d\n",
427 ret);
428 goto error;
429 }
430 wake_up_interruptible(&cb->sem);
431 break;
432
433 default:
434 printk(KERN_ERR PFX
435 "%s:%d Unexpected opcode %d, Shutting down\n",
436 __func__, __LINE__, wc.opcode);
437 goto error;
438 }
439 }
440 if (ret) {
441 printk(KERN_ERR PFX "poll error %d\n", ret);
442 goto error;
443 }
444 return;
445 error:
446 cb->state = ERROR;
447 wake_up_interruptible(&cb->sem);
448 }
449
krping_accept(struct krping_cb * cb)450 static int krping_accept(struct krping_cb *cb)
451 {
452 struct rdma_conn_param conn_param;
453 int ret;
454
455 DEBUG_LOG("accepting client connection request\n");
456
457 memset(&conn_param, 0, sizeof conn_param);
458 conn_param.responder_resources = 1;
459 conn_param.initiator_depth = 1;
460
461 ret = rdma_accept(cb->child_cm_id, &conn_param);
462 if (ret) {
463 printk(KERN_ERR PFX "rdma_accept error: %d\n", ret);
464 return ret;
465 }
466
467 if (!cb->wlat && !cb->rlat && !cb->bw) {
468 wait_event_interruptible(cb->sem, cb->state >= CONNECTED);
469 if (cb->state == ERROR) {
470 printk(KERN_ERR PFX "wait for CONNECTED state %d\n",
471 cb->state);
472 return -1;
473 }
474 }
475 return 0;
476 }
477
krping_setup_wr(struct krping_cb * cb)478 static void krping_setup_wr(struct krping_cb *cb)
479 {
480 cb->recv_sgl.addr = cb->recv_dma_addr;
481 cb->recv_sgl.length = sizeof cb->recv_buf;
482 cb->recv_sgl.lkey = cb->pd->local_dma_lkey;
483 cb->rq_wr.sg_list = &cb->recv_sgl;
484 cb->rq_wr.num_sge = 1;
485
486 cb->send_sgl.addr = cb->send_dma_addr;
487 cb->send_sgl.length = sizeof cb->send_buf;
488 cb->send_sgl.lkey = cb->pd->local_dma_lkey;
489
490 cb->sq_wr.opcode = IB_WR_SEND;
491 cb->sq_wr.send_flags = IB_SEND_SIGNALED;
492 cb->sq_wr.sg_list = &cb->send_sgl;
493 cb->sq_wr.num_sge = 1;
494
495 if (cb->server || cb->wlat || cb->rlat || cb->bw) {
496 cb->rdma_sgl.addr = cb->rdma_dma_addr;
497 cb->rdma_sq_wr.wr.send_flags = IB_SEND_SIGNALED;
498 cb->rdma_sq_wr.wr.sg_list = &cb->rdma_sgl;
499 cb->rdma_sq_wr.wr.num_sge = 1;
500 }
501
502 /*
503 * A chain of 2 WRs, INVALDATE_MR + REG_MR.
504 * both unsignaled. The client uses them to reregister
505 * the rdma buffers with a new key each iteration.
506 */
507 cb->reg_mr_wr.wr.opcode = IB_WR_REG_MR;
508 cb->reg_mr_wr.mr = cb->reg_mr;
509
510 cb->invalidate_wr.next = &cb->reg_mr_wr.wr;
511 cb->invalidate_wr.opcode = IB_WR_LOCAL_INV;
512 }
513
krping_setup_buffers(struct krping_cb * cb)514 static int krping_setup_buffers(struct krping_cb *cb)
515 {
516 int ret;
517
518 DEBUG_LOG(PFX "krping_setup_buffers called on cb %p\n", cb);
519
520 cb->recv_dma_addr = ib_dma_map_single(cb->pd->device,
521 &cb->recv_buf,
522 sizeof(cb->recv_buf), DMA_BIDIRECTIONAL);
523 pci_unmap_addr_set(cb, recv_mapping, cb->recv_dma_addr);
524 cb->send_dma_addr = ib_dma_map_single(cb->pd->device,
525 &cb->send_buf, sizeof(cb->send_buf),
526 DMA_BIDIRECTIONAL);
527 pci_unmap_addr_set(cb, send_mapping, cb->send_dma_addr);
528
529 cb->rdma_buf = ib_dma_alloc_coherent(cb->pd->device, cb->size,
530 &cb->rdma_dma_addr,
531 GFP_KERNEL);
532 if (!cb->rdma_buf) {
533 DEBUG_LOG(PFX "rdma_buf allocation failed\n");
534 ret = -ENOMEM;
535 goto bail;
536 }
537 pci_unmap_addr_set(cb, rdma_mapping, cb->rdma_dma_addr);
538 cb->page_list_len = (((cb->size - 1) & PAGE_MASK) + PAGE_SIZE)
539 >> PAGE_SHIFT;
540 cb->reg_mr = ib_alloc_mr(cb->pd, IB_MR_TYPE_MEM_REG,
541 cb->page_list_len);
542 if (IS_ERR(cb->reg_mr)) {
543 ret = PTR_ERR(cb->reg_mr);
544 DEBUG_LOG(PFX "recv_buf reg_mr failed %d\n", ret);
545 goto bail;
546 }
547 DEBUG_LOG(PFX "reg rkey 0x%x page_list_len %u\n",
548 cb->reg_mr->rkey, cb->page_list_len);
549
550 if (!cb->server || cb->wlat || cb->rlat || cb->bw) {
551
552 cb->start_buf = ib_dma_alloc_coherent(cb->pd->device, cb->size,
553 &cb->start_dma_addr,
554 GFP_KERNEL);
555 if (!cb->start_buf) {
556 DEBUG_LOG(PFX "start_buf malloc failed\n");
557 ret = -ENOMEM;
558 goto bail;
559 }
560 pci_unmap_addr_set(cb, start_mapping, cb->start_dma_addr);
561 }
562
563 krping_setup_wr(cb);
564 DEBUG_LOG(PFX "allocated & registered buffers...\n");
565 return 0;
566 bail:
567 if (cb->reg_mr && !IS_ERR(cb->reg_mr))
568 ib_dereg_mr(cb->reg_mr);
569 if (cb->rdma_mr && !IS_ERR(cb->rdma_mr))
570 ib_dereg_mr(cb->rdma_mr);
571 if (cb->dma_mr && !IS_ERR(cb->dma_mr))
572 ib_dereg_mr(cb->dma_mr);
573 if (cb->rdma_buf) {
574 ib_dma_free_coherent(cb->pd->device, cb->size, cb->rdma_buf,
575 cb->rdma_dma_addr);
576 }
577 if (cb->start_buf) {
578 ib_dma_free_coherent(cb->pd->device, cb->size, cb->start_buf,
579 cb->start_dma_addr);
580 }
581 return ret;
582 }
583
krping_free_buffers(struct krping_cb * cb)584 static void krping_free_buffers(struct krping_cb *cb)
585 {
586 DEBUG_LOG("krping_free_buffers called on cb %p\n", cb);
587
588 if (cb->dma_mr)
589 ib_dereg_mr(cb->dma_mr);
590 if (cb->rdma_mr)
591 ib_dereg_mr(cb->rdma_mr);
592 if (cb->start_mr)
593 ib_dereg_mr(cb->start_mr);
594 if (cb->reg_mr)
595 ib_dereg_mr(cb->reg_mr);
596
597 dma_unmap_single(cb->pd->device->dma_device,
598 pci_unmap_addr(cb, recv_mapping),
599 sizeof(cb->recv_buf), DMA_BIDIRECTIONAL);
600 dma_unmap_single(cb->pd->device->dma_device,
601 pci_unmap_addr(cb, send_mapping),
602 sizeof(cb->send_buf), DMA_BIDIRECTIONAL);
603
604 ib_dma_free_coherent(cb->pd->device, cb->size, cb->rdma_buf,
605 cb->rdma_dma_addr);
606
607 if (cb->start_buf) {
608 ib_dma_free_coherent(cb->pd->device, cb->size, cb->start_buf,
609 cb->start_dma_addr);
610 }
611 }
612
krping_create_qp(struct krping_cb * cb)613 static int krping_create_qp(struct krping_cb *cb)
614 {
615 struct ib_qp_init_attr init_attr;
616 int ret;
617
618 memset(&init_attr, 0, sizeof(init_attr));
619 init_attr.cap.max_send_wr = cb->txdepth;
620 init_attr.cap.max_recv_wr = 2;
621
622 /* For flush_qp() */
623 init_attr.cap.max_send_wr++;
624 init_attr.cap.max_recv_wr++;
625
626 init_attr.cap.max_recv_sge = 1;
627 init_attr.cap.max_send_sge = 1;
628 init_attr.qp_type = IB_QPT_RC;
629 init_attr.send_cq = cb->cq;
630 init_attr.recv_cq = cb->cq;
631 init_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
632
633 if (cb->server) {
634 ret = rdma_create_qp(cb->child_cm_id, cb->pd, &init_attr);
635 if (!ret)
636 cb->qp = cb->child_cm_id->qp;
637 } else {
638 ret = rdma_create_qp(cb->cm_id, cb->pd, &init_attr);
639 if (!ret)
640 cb->qp = cb->cm_id->qp;
641 }
642
643 return ret;
644 }
645
krping_free_qp(struct krping_cb * cb)646 static void krping_free_qp(struct krping_cb *cb)
647 {
648 ib_destroy_qp(cb->qp);
649 ib_destroy_cq(cb->cq);
650 ib_dealloc_pd(cb->pd);
651 }
652
krping_setup_qp(struct krping_cb * cb,struct rdma_cm_id * cm_id)653 static int krping_setup_qp(struct krping_cb *cb, struct rdma_cm_id *cm_id)
654 {
655 int ret;
656 struct ib_cq_init_attr attr = {0};
657
658 cb->pd = ib_alloc_pd(cm_id->device, 0);
659 if (IS_ERR(cb->pd)) {
660 printk(KERN_ERR PFX "ib_alloc_pd failed\n");
661 return PTR_ERR(cb->pd);
662 }
663 DEBUG_LOG("created pd %p\n", cb->pd);
664
665 strlcpy(cb->stats.name, cb->pd->device->name, sizeof(cb->stats.name));
666
667 attr.cqe = cb->txdepth * 2;
668 attr.comp_vector = 0;
669 cb->cq = ib_create_cq(cm_id->device, krping_cq_event_handler, NULL,
670 cb, &attr);
671 if (IS_ERR(cb->cq)) {
672 printk(KERN_ERR PFX "ib_create_cq failed\n");
673 ret = PTR_ERR(cb->cq);
674 goto err1;
675 }
676 DEBUG_LOG("created cq %p\n", cb->cq);
677
678 if (!cb->wlat && !cb->rlat && !cb->bw && !cb->frtest) {
679 ret = ib_req_notify_cq(cb->cq, IB_CQ_NEXT_COMP);
680 if (ret) {
681 printk(KERN_ERR PFX "ib_create_cq failed\n");
682 goto err2;
683 }
684 }
685
686 ret = krping_create_qp(cb);
687 if (ret) {
688 printk(KERN_ERR PFX "krping_create_qp failed: %d\n", ret);
689 goto err2;
690 }
691 DEBUG_LOG("created qp %p\n", cb->qp);
692 return 0;
693 err2:
694 ib_destroy_cq(cb->cq);
695 err1:
696 ib_dealloc_pd(cb->pd);
697 return ret;
698 }
699
700 /*
701 * return the (possibly rebound) rkey for the rdma buffer.
702 * REG mode: invalidate and rebind via reg wr.
703 * other modes: just return the mr rkey.
704 */
krping_rdma_rkey(struct krping_cb * cb,u64 buf,int post_inv)705 static u32 krping_rdma_rkey(struct krping_cb *cb, u64 buf, int post_inv)
706 {
707 u32 rkey;
708 struct ib_send_wr *bad_wr;
709 int ret;
710 struct scatterlist sg = {0};
711
712 cb->invalidate_wr.ex.invalidate_rkey = cb->reg_mr->rkey;
713
714 /*
715 * Update the reg key.
716 */
717 ib_update_fast_reg_key(cb->reg_mr, ++cb->key);
718 cb->reg_mr_wr.key = cb->reg_mr->rkey;
719
720 /*
721 * Update the reg WR with new buf info.
722 */
723 if (buf == (u64)cb->start_dma_addr)
724 cb->reg_mr_wr.access = IB_ACCESS_REMOTE_READ;
725 else
726 cb->reg_mr_wr.access = IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE;
727 sg_dma_address(&sg) = buf;
728 sg_dma_len(&sg) = cb->size;
729
730 ret = ib_map_mr_sg(cb->reg_mr, &sg, 1, NULL, PAGE_SIZE);
731 BUG_ON(ret <= 0 || ret > cb->page_list_len);
732
733 DEBUG_LOG(PFX "post_inv = %d, reg_mr new rkey 0x%x pgsz %u len %u"
734 " iova_start %llx\n",
735 post_inv,
736 cb->reg_mr_wr.key,
737 cb->reg_mr->page_size,
738 (unsigned)cb->reg_mr->length,
739 (unsigned long long)cb->reg_mr->iova);
740
741 if (post_inv)
742 ret = ib_post_send(cb->qp, &cb->invalidate_wr, &bad_wr);
743 else
744 ret = ib_post_send(cb->qp, &cb->reg_mr_wr.wr, &bad_wr);
745 if (ret) {
746 printk(KERN_ERR PFX "post send error %d\n", ret);
747 cb->state = ERROR;
748 }
749 rkey = cb->reg_mr->rkey;
750 return rkey;
751 }
752
krping_format_send(struct krping_cb * cb,u64 buf)753 static void krping_format_send(struct krping_cb *cb, u64 buf)
754 {
755 struct krping_rdma_info *info = &cb->send_buf;
756 u32 rkey;
757
758 /*
759 * Client side will do reg or mw bind before
760 * advertising the rdma buffer. Server side
761 * sends have no data.
762 */
763 if (!cb->server || cb->wlat || cb->rlat || cb->bw) {
764 rkey = krping_rdma_rkey(cb, buf, !cb->server_invalidate);
765 info->buf = htonll(buf);
766 info->rkey = htonl(rkey);
767 info->size = htonl(cb->size);
768 DEBUG_LOG("RDMA addr %llx rkey %x len %d\n",
769 (unsigned long long)buf, rkey, cb->size);
770 }
771 }
772
krping_test_server(struct krping_cb * cb)773 static void krping_test_server(struct krping_cb *cb)
774 {
775 struct ib_send_wr *bad_wr, inv;
776 int ret;
777
778 while (1) {
779 /* Wait for client's Start STAG/TO/Len */
780 wait_event_interruptible(cb->sem, cb->state >= RDMA_READ_ADV);
781 if (cb->state != RDMA_READ_ADV) {
782 printk(KERN_ERR PFX "wait for RDMA_READ_ADV state %d\n",
783 cb->state);
784 break;
785 }
786
787 DEBUG_LOG("server received sink adv\n");
788
789 cb->rdma_sq_wr.rkey = cb->remote_rkey;
790 cb->rdma_sq_wr.remote_addr = cb->remote_addr;
791 cb->rdma_sq_wr.wr.sg_list->length = cb->remote_len;
792 cb->rdma_sgl.lkey = krping_rdma_rkey(cb, cb->rdma_dma_addr, !cb->read_inv);
793 cb->rdma_sq_wr.wr.next = NULL;
794
795 /* Issue RDMA Read. */
796 if (cb->read_inv)
797 cb->rdma_sq_wr.wr.opcode = IB_WR_RDMA_READ_WITH_INV;
798 else {
799
800 cb->rdma_sq_wr.wr.opcode = IB_WR_RDMA_READ;
801 /*
802 * Immediately follow the read with a
803 * fenced LOCAL_INV.
804 */
805 cb->rdma_sq_wr.wr.next = &inv;
806 memset(&inv, 0, sizeof inv);
807 inv.opcode = IB_WR_LOCAL_INV;
808 inv.ex.invalidate_rkey = cb->reg_mr->rkey;
809 inv.send_flags = IB_SEND_FENCE;
810 }
811
812 ret = ib_post_send(cb->qp, &cb->rdma_sq_wr.wr, &bad_wr);
813 if (ret) {
814 printk(KERN_ERR PFX "post send error %d\n", ret);
815 break;
816 }
817 cb->rdma_sq_wr.wr.next = NULL;
818
819 DEBUG_LOG("server posted rdma read req \n");
820
821 /* Wait for read completion */
822 wait_event_interruptible(cb->sem,
823 cb->state >= RDMA_READ_COMPLETE);
824 if (cb->state != RDMA_READ_COMPLETE) {
825 printk(KERN_ERR PFX
826 "wait for RDMA_READ_COMPLETE state %d\n",
827 cb->state);
828 break;
829 }
830 DEBUG_LOG("server received read complete\n");
831
832 /* Display data in recv buf */
833 if (cb->verbose)
834 printk(KERN_INFO PFX "server ping data: %s\n",
835 cb->rdma_buf);
836
837 /* Tell client to continue */
838 if (cb->server && cb->server_invalidate) {
839 cb->sq_wr.ex.invalidate_rkey = cb->remote_rkey;
840 cb->sq_wr.opcode = IB_WR_SEND_WITH_INV;
841 DEBUG_LOG("send-w-inv rkey 0x%x\n", cb->remote_rkey);
842 }
843 ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr);
844 if (ret) {
845 printk(KERN_ERR PFX "post send error %d\n", ret);
846 break;
847 }
848 DEBUG_LOG("server posted go ahead\n");
849
850 /* Wait for client's RDMA STAG/TO/Len */
851 wait_event_interruptible(cb->sem, cb->state >= RDMA_WRITE_ADV);
852 if (cb->state != RDMA_WRITE_ADV) {
853 printk(KERN_ERR PFX
854 "wait for RDMA_WRITE_ADV state %d\n",
855 cb->state);
856 break;
857 }
858 DEBUG_LOG("server received sink adv\n");
859
860 /* RDMA Write echo data */
861 cb->rdma_sq_wr.wr.opcode = IB_WR_RDMA_WRITE;
862 cb->rdma_sq_wr.rkey = cb->remote_rkey;
863 cb->rdma_sq_wr.remote_addr = cb->remote_addr;
864 cb->rdma_sq_wr.wr.sg_list->length = strlen(cb->rdma_buf) + 1;
865 if (cb->local_dma_lkey)
866 cb->rdma_sgl.lkey = cb->pd->local_dma_lkey;
867 else
868 cb->rdma_sgl.lkey = krping_rdma_rkey(cb, cb->rdma_dma_addr, 0);
869
870 DEBUG_LOG("rdma write from lkey %x laddr %llx len %d\n",
871 cb->rdma_sq_wr.wr.sg_list->lkey,
872 (unsigned long long)cb->rdma_sq_wr.wr.sg_list->addr,
873 cb->rdma_sq_wr.wr.sg_list->length);
874
875 ret = ib_post_send(cb->qp, &cb->rdma_sq_wr.wr, &bad_wr);
876 if (ret) {
877 printk(KERN_ERR PFX "post send error %d\n", ret);
878 break;
879 }
880
881 /* Wait for completion */
882 ret = wait_event_interruptible(cb->sem, cb->state >=
883 RDMA_WRITE_COMPLETE);
884 if (cb->state != RDMA_WRITE_COMPLETE) {
885 printk(KERN_ERR PFX
886 "wait for RDMA_WRITE_COMPLETE state %d\n",
887 cb->state);
888 break;
889 }
890 DEBUG_LOG("server rdma write complete \n");
891
892 cb->state = CONNECTED;
893
894 /* Tell client to begin again */
895 if (cb->server && cb->server_invalidate) {
896 cb->sq_wr.ex.invalidate_rkey = cb->remote_rkey;
897 cb->sq_wr.opcode = IB_WR_SEND_WITH_INV;
898 DEBUG_LOG("send-w-inv rkey 0x%x\n", cb->remote_rkey);
899 }
900 ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr);
901 if (ret) {
902 printk(KERN_ERR PFX "post send error %d\n", ret);
903 break;
904 }
905 DEBUG_LOG("server posted go ahead\n");
906 }
907 }
908
rlat_test(struct krping_cb * cb)909 static void rlat_test(struct krping_cb *cb)
910 {
911 int scnt;
912 int iters = cb->count;
913 struct timeval start_tv, stop_tv;
914 int ret;
915 struct ib_wc wc;
916 struct ib_send_wr *bad_wr;
917 int ne;
918
919 scnt = 0;
920 cb->rdma_sq_wr.wr.opcode = IB_WR_RDMA_READ;
921 cb->rdma_sq_wr.rkey = cb->remote_rkey;
922 cb->rdma_sq_wr.remote_addr = cb->remote_addr;
923 cb->rdma_sq_wr.wr.sg_list->length = cb->size;
924
925 microtime(&start_tv);
926 if (!cb->poll) {
927 cb->state = RDMA_READ_ADV;
928 ib_req_notify_cq(cb->cq, IB_CQ_NEXT_COMP);
929 }
930 while (scnt < iters) {
931
932 cb->state = RDMA_READ_ADV;
933 ret = ib_post_send(cb->qp, &cb->rdma_sq_wr.wr, &bad_wr);
934 if (ret) {
935 printk(KERN_ERR PFX
936 "Couldn't post send: ret=%d scnt %d\n",
937 ret, scnt);
938 return;
939 }
940
941 do {
942 if (!cb->poll) {
943 wait_event_interruptible(cb->sem,
944 cb->state != RDMA_READ_ADV);
945 if (cb->state == RDMA_READ_COMPLETE) {
946 ne = 1;
947 ib_req_notify_cq(cb->cq,
948 IB_CQ_NEXT_COMP);
949 } else {
950 ne = -1;
951 }
952 } else
953 ne = ib_poll_cq(cb->cq, 1, &wc);
954 if (cb->state == ERROR) {
955 printk(KERN_ERR PFX
956 "state == ERROR...bailing scnt %d\n",
957 scnt);
958 return;
959 }
960 } while (ne == 0);
961
962 if (ne < 0) {
963 printk(KERN_ERR PFX "poll CQ failed %d\n", ne);
964 return;
965 }
966 if (cb->poll && wc.status != IB_WC_SUCCESS) {
967 printk(KERN_ERR PFX "Completion wth error at %s:\n",
968 cb->server ? "server" : "client");
969 printk(KERN_ERR PFX "Failed status %d: wr_id %d\n",
970 wc.status, (int) wc.wr_id);
971 return;
972 }
973 ++scnt;
974 }
975 microtime(&stop_tv);
976
977 if (stop_tv.tv_usec < start_tv.tv_usec) {
978 stop_tv.tv_usec += 1000000;
979 stop_tv.tv_sec -= 1;
980 }
981
982 printk(KERN_ERR PFX "delta sec %lu delta usec %lu iter %d size %d\n",
983 (unsigned long)(stop_tv.tv_sec - start_tv.tv_sec),
984 (unsigned long)(stop_tv.tv_usec - start_tv.tv_usec),
985 scnt, cb->size);
986 }
987
wlat_test(struct krping_cb * cb)988 static void wlat_test(struct krping_cb *cb)
989 {
990 int ccnt, scnt, rcnt;
991 int iters=cb->count;
992 volatile char *poll_buf = (char *) cb->start_buf;
993 char *buf = (char *)cb->rdma_buf;
994 struct timeval start_tv, stop_tv;
995 cycles_t *post_cycles_start = NULL;
996 cycles_t *post_cycles_stop = NULL;
997 cycles_t *poll_cycles_start = NULL;
998 cycles_t *poll_cycles_stop = NULL;
999 cycles_t *last_poll_cycles_start = NULL;
1000 cycles_t sum_poll = 0, sum_post = 0, sum_last_poll = 0;
1001 int i;
1002 int cycle_iters = 1000;
1003
1004 ccnt = 0;
1005 scnt = 0;
1006 rcnt = 0;
1007
1008 post_cycles_start = kmalloc(cycle_iters * sizeof(cycles_t), GFP_KERNEL);
1009 if (!post_cycles_start) {
1010 printk(KERN_ERR PFX "%s kmalloc failed\n", __FUNCTION__);
1011 goto done;
1012 }
1013 post_cycles_stop = kmalloc(cycle_iters * sizeof(cycles_t), GFP_KERNEL);
1014 if (!post_cycles_stop) {
1015 printk(KERN_ERR PFX "%s kmalloc failed\n", __FUNCTION__);
1016 goto done;
1017 }
1018 poll_cycles_start = kmalloc(cycle_iters * sizeof(cycles_t), GFP_KERNEL);
1019 if (!poll_cycles_start) {
1020 printk(KERN_ERR PFX "%s kmalloc failed\n", __FUNCTION__);
1021 goto done;
1022 }
1023 poll_cycles_stop = kmalloc(cycle_iters * sizeof(cycles_t), GFP_KERNEL);
1024 if (!poll_cycles_stop) {
1025 printk(KERN_ERR PFX "%s kmalloc failed\n", __FUNCTION__);
1026 goto done;
1027 }
1028 last_poll_cycles_start = kmalloc(cycle_iters * sizeof(cycles_t),
1029 GFP_KERNEL);
1030 if (!last_poll_cycles_start) {
1031 printk(KERN_ERR PFX "%s kmalloc failed\n", __FUNCTION__);
1032 goto done;
1033 }
1034 cb->rdma_sq_wr.wr.opcode = IB_WR_RDMA_WRITE;
1035 cb->rdma_sq_wr.rkey = cb->remote_rkey;
1036 cb->rdma_sq_wr.remote_addr = cb->remote_addr;
1037 cb->rdma_sq_wr.wr.sg_list->length = cb->size;
1038
1039 if (cycle_iters > iters)
1040 cycle_iters = iters;
1041 microtime(&start_tv);
1042 while (scnt < iters || ccnt < iters || rcnt < iters) {
1043
1044 /* Wait till buffer changes. */
1045 if (rcnt < iters && !(scnt < 1 && !cb->server)) {
1046 ++rcnt;
1047 while (*poll_buf != (char)rcnt) {
1048 if (cb->state == ERROR) {
1049 printk(KERN_ERR PFX
1050 "state = ERROR, bailing\n");
1051 goto done;
1052 }
1053 }
1054 }
1055
1056 if (scnt < iters) {
1057 struct ib_send_wr *bad_wr;
1058
1059 *buf = (char)scnt+1;
1060 if (scnt < cycle_iters)
1061 post_cycles_start[scnt] = get_cycles();
1062 if (ib_post_send(cb->qp, &cb->rdma_sq_wr.wr, &bad_wr)) {
1063 printk(KERN_ERR PFX
1064 "Couldn't post send: scnt=%d\n",
1065 scnt);
1066 goto done;
1067 }
1068 if (scnt < cycle_iters)
1069 post_cycles_stop[scnt] = get_cycles();
1070 scnt++;
1071 }
1072
1073 if (ccnt < iters) {
1074 struct ib_wc wc;
1075 int ne;
1076
1077 if (ccnt < cycle_iters)
1078 poll_cycles_start[ccnt] = get_cycles();
1079 do {
1080 if (ccnt < cycle_iters)
1081 last_poll_cycles_start[ccnt] =
1082 get_cycles();
1083 ne = ib_poll_cq(cb->cq, 1, &wc);
1084 } while (ne == 0);
1085 if (ccnt < cycle_iters)
1086 poll_cycles_stop[ccnt] = get_cycles();
1087 ++ccnt;
1088
1089 if (ne < 0) {
1090 printk(KERN_ERR PFX "poll CQ failed %d\n", ne);
1091 goto done;
1092 }
1093 if (wc.status != IB_WC_SUCCESS) {
1094 printk(KERN_ERR PFX
1095 "Completion wth error at %s:\n",
1096 cb->server ? "server" : "client");
1097 printk(KERN_ERR PFX
1098 "Failed status %d: wr_id %d\n",
1099 wc.status, (int) wc.wr_id);
1100 printk(KERN_ERR PFX
1101 "scnt=%d, rcnt=%d, ccnt=%d\n",
1102 scnt, rcnt, ccnt);
1103 goto done;
1104 }
1105 }
1106 }
1107 microtime(&stop_tv);
1108
1109 if (stop_tv.tv_usec < start_tv.tv_usec) {
1110 stop_tv.tv_usec += 1000000;
1111 stop_tv.tv_sec -= 1;
1112 }
1113
1114 for (i=0; i < cycle_iters; i++) {
1115 sum_post += post_cycles_stop[i] - post_cycles_start[i];
1116 sum_poll += poll_cycles_stop[i] - poll_cycles_start[i];
1117 sum_last_poll += poll_cycles_stop[i]-last_poll_cycles_start[i];
1118 }
1119 printk(KERN_ERR PFX
1120 "delta sec %lu delta usec %lu iter %d size %d cycle_iters %d"
1121 " sum_post %llu sum_poll %llu sum_last_poll %llu\n",
1122 (unsigned long)(stop_tv.tv_sec - start_tv.tv_sec),
1123 (unsigned long)(stop_tv.tv_usec - start_tv.tv_usec),
1124 scnt, cb->size, cycle_iters,
1125 (unsigned long long)sum_post, (unsigned long long)sum_poll,
1126 (unsigned long long)sum_last_poll);
1127 done:
1128 kfree(post_cycles_start);
1129 kfree(post_cycles_stop);
1130 kfree(poll_cycles_start);
1131 kfree(poll_cycles_stop);
1132 kfree(last_poll_cycles_start);
1133 }
1134
bw_test(struct krping_cb * cb)1135 static void bw_test(struct krping_cb *cb)
1136 {
1137 int ccnt, scnt, rcnt;
1138 int iters=cb->count;
1139 struct timeval start_tv, stop_tv;
1140 cycles_t *post_cycles_start = NULL;
1141 cycles_t *post_cycles_stop = NULL;
1142 cycles_t *poll_cycles_start = NULL;
1143 cycles_t *poll_cycles_stop = NULL;
1144 cycles_t *last_poll_cycles_start = NULL;
1145 cycles_t sum_poll = 0, sum_post = 0, sum_last_poll = 0;
1146 int i;
1147 int cycle_iters = 1000;
1148
1149 ccnt = 0;
1150 scnt = 0;
1151 rcnt = 0;
1152
1153 post_cycles_start = kmalloc(cycle_iters * sizeof(cycles_t), GFP_KERNEL);
1154 if (!post_cycles_start) {
1155 printk(KERN_ERR PFX "%s kmalloc failed\n", __FUNCTION__);
1156 goto done;
1157 }
1158 post_cycles_stop = kmalloc(cycle_iters * sizeof(cycles_t), GFP_KERNEL);
1159 if (!post_cycles_stop) {
1160 printk(KERN_ERR PFX "%s kmalloc failed\n", __FUNCTION__);
1161 goto done;
1162 }
1163 poll_cycles_start = kmalloc(cycle_iters * sizeof(cycles_t), GFP_KERNEL);
1164 if (!poll_cycles_start) {
1165 printk(KERN_ERR PFX "%s kmalloc failed\n", __FUNCTION__);
1166 goto done;
1167 }
1168 poll_cycles_stop = kmalloc(cycle_iters * sizeof(cycles_t), GFP_KERNEL);
1169 if (!poll_cycles_stop) {
1170 printk(KERN_ERR PFX "%s kmalloc failed\n", __FUNCTION__);
1171 goto done;
1172 }
1173 last_poll_cycles_start = kmalloc(cycle_iters * sizeof(cycles_t),
1174 GFP_KERNEL);
1175 if (!last_poll_cycles_start) {
1176 printk(KERN_ERR PFX "%s kmalloc failed\n", __FUNCTION__);
1177 goto done;
1178 }
1179 cb->rdma_sq_wr.wr.opcode = IB_WR_RDMA_WRITE;
1180 cb->rdma_sq_wr.rkey = cb->remote_rkey;
1181 cb->rdma_sq_wr.remote_addr = cb->remote_addr;
1182 cb->rdma_sq_wr.wr.sg_list->length = cb->size;
1183
1184 if (cycle_iters > iters)
1185 cycle_iters = iters;
1186 microtime(&start_tv);
1187 while (scnt < iters || ccnt < iters) {
1188
1189 while (scnt < iters && scnt - ccnt < cb->txdepth) {
1190 struct ib_send_wr *bad_wr;
1191
1192 if (scnt < cycle_iters)
1193 post_cycles_start[scnt] = get_cycles();
1194 if (ib_post_send(cb->qp, &cb->rdma_sq_wr.wr, &bad_wr)) {
1195 printk(KERN_ERR PFX
1196 "Couldn't post send: scnt=%d\n",
1197 scnt);
1198 goto done;
1199 }
1200 if (scnt < cycle_iters)
1201 post_cycles_stop[scnt] = get_cycles();
1202 ++scnt;
1203 }
1204
1205 if (ccnt < iters) {
1206 int ne;
1207 struct ib_wc wc;
1208
1209 if (ccnt < cycle_iters)
1210 poll_cycles_start[ccnt] = get_cycles();
1211 do {
1212 if (ccnt < cycle_iters)
1213 last_poll_cycles_start[ccnt] =
1214 get_cycles();
1215 ne = ib_poll_cq(cb->cq, 1, &wc);
1216 } while (ne == 0);
1217 if (ccnt < cycle_iters)
1218 poll_cycles_stop[ccnt] = get_cycles();
1219 ccnt += 1;
1220
1221 if (ne < 0) {
1222 printk(KERN_ERR PFX "poll CQ failed %d\n", ne);
1223 goto done;
1224 }
1225 if (wc.status != IB_WC_SUCCESS) {
1226 printk(KERN_ERR PFX
1227 "Completion wth error at %s:\n",
1228 cb->server ? "server" : "client");
1229 printk(KERN_ERR PFX
1230 "Failed status %d: wr_id %d\n",
1231 wc.status, (int) wc.wr_id);
1232 goto done;
1233 }
1234 }
1235 }
1236 microtime(&stop_tv);
1237
1238 if (stop_tv.tv_usec < start_tv.tv_usec) {
1239 stop_tv.tv_usec += 1000000;
1240 stop_tv.tv_sec -= 1;
1241 }
1242
1243 for (i=0; i < cycle_iters; i++) {
1244 sum_post += post_cycles_stop[i] - post_cycles_start[i];
1245 sum_poll += poll_cycles_stop[i] - poll_cycles_start[i];
1246 sum_last_poll += poll_cycles_stop[i]-last_poll_cycles_start[i];
1247 }
1248 printk(KERN_ERR PFX
1249 "delta sec %lu delta usec %lu iter %d size %d cycle_iters %d"
1250 " sum_post %llu sum_poll %llu sum_last_poll %llu\n",
1251 (unsigned long)(stop_tv.tv_sec - start_tv.tv_sec),
1252 (unsigned long)(stop_tv.tv_usec - start_tv.tv_usec),
1253 scnt, cb->size, cycle_iters,
1254 (unsigned long long)sum_post, (unsigned long long)sum_poll,
1255 (unsigned long long)sum_last_poll);
1256 done:
1257 kfree(post_cycles_start);
1258 kfree(post_cycles_stop);
1259 kfree(poll_cycles_start);
1260 kfree(poll_cycles_stop);
1261 kfree(last_poll_cycles_start);
1262 }
1263
krping_rlat_test_server(struct krping_cb * cb)1264 static void krping_rlat_test_server(struct krping_cb *cb)
1265 {
1266 struct ib_send_wr *bad_wr;
1267 struct ib_wc wc;
1268 int ret;
1269
1270 /* Spin waiting for client's Start STAG/TO/Len */
1271 while (cb->state < RDMA_READ_ADV) {
1272 krping_cq_event_handler(cb->cq, cb);
1273 }
1274
1275 /* Send STAG/TO/Len to client */
1276 krping_format_send(cb, cb->start_dma_addr);
1277 ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr);
1278 if (ret) {
1279 printk(KERN_ERR PFX "post send error %d\n", ret);
1280 return;
1281 }
1282
1283 /* Spin waiting for send completion */
1284 while ((ret = ib_poll_cq(cb->cq, 1, &wc) == 0));
1285 if (ret < 0) {
1286 printk(KERN_ERR PFX "poll error %d\n", ret);
1287 return;
1288 }
1289 if (wc.status) {
1290 printk(KERN_ERR PFX "send completiong error %d\n", wc.status);
1291 return;
1292 }
1293
1294 wait_event_interruptible(cb->sem, cb->state == ERROR);
1295 }
1296
krping_wlat_test_server(struct krping_cb * cb)1297 static void krping_wlat_test_server(struct krping_cb *cb)
1298 {
1299 struct ib_send_wr *bad_wr;
1300 struct ib_wc wc;
1301 int ret;
1302
1303 /* Spin waiting for client's Start STAG/TO/Len */
1304 while (cb->state < RDMA_READ_ADV) {
1305 krping_cq_event_handler(cb->cq, cb);
1306 }
1307
1308 /* Send STAG/TO/Len to client */
1309 krping_format_send(cb, cb->start_dma_addr);
1310 ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr);
1311 if (ret) {
1312 printk(KERN_ERR PFX "post send error %d\n", ret);
1313 return;
1314 }
1315
1316 /* Spin waiting for send completion */
1317 while ((ret = ib_poll_cq(cb->cq, 1, &wc) == 0));
1318 if (ret < 0) {
1319 printk(KERN_ERR PFX "poll error %d\n", ret);
1320 return;
1321 }
1322 if (wc.status) {
1323 printk(KERN_ERR PFX "send completiong error %d\n", wc.status);
1324 return;
1325 }
1326
1327 wlat_test(cb);
1328 wait_event_interruptible(cb->sem, cb->state == ERROR);
1329 }
1330
krping_bw_test_server(struct krping_cb * cb)1331 static void krping_bw_test_server(struct krping_cb *cb)
1332 {
1333 struct ib_send_wr *bad_wr;
1334 struct ib_wc wc;
1335 int ret;
1336
1337 /* Spin waiting for client's Start STAG/TO/Len */
1338 while (cb->state < RDMA_READ_ADV) {
1339 krping_cq_event_handler(cb->cq, cb);
1340 }
1341
1342 /* Send STAG/TO/Len to client */
1343 krping_format_send(cb, cb->start_dma_addr);
1344 ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr);
1345 if (ret) {
1346 printk(KERN_ERR PFX "post send error %d\n", ret);
1347 return;
1348 }
1349
1350 /* Spin waiting for send completion */
1351 while ((ret = ib_poll_cq(cb->cq, 1, &wc) == 0));
1352 if (ret < 0) {
1353 printk(KERN_ERR PFX "poll error %d\n", ret);
1354 return;
1355 }
1356 if (wc.status) {
1357 printk(KERN_ERR PFX "send completiong error %d\n", wc.status);
1358 return;
1359 }
1360
1361 if (cb->duplex)
1362 bw_test(cb);
1363 wait_event_interruptible(cb->sem, cb->state == ERROR);
1364 }
1365
reg_supported(struct ib_device * dev)1366 static int reg_supported(struct ib_device *dev)
1367 {
1368 u64 needed_flags = IB_DEVICE_MEM_MGT_EXTENSIONS;
1369
1370 if ((dev->attrs.device_cap_flags & needed_flags) != needed_flags) {
1371 printk(KERN_ERR PFX
1372 "Fastreg not supported - device_cap_flags 0x%llx\n",
1373 (unsigned long long)dev->attrs.device_cap_flags);
1374 return 0;
1375 }
1376 DEBUG_LOG("Fastreg supported - device_cap_flags 0x%llx\n",
1377 (unsigned long long)dev->attrs.device_cap_flags);
1378 return 1;
1379 }
1380
fill_sockaddr(struct sockaddr_storage * sin,struct krping_cb * cb)1381 static void fill_sockaddr(struct sockaddr_storage *sin, struct krping_cb *cb)
1382 {
1383 memset(sin, 0, sizeof(*sin));
1384
1385 if (cb->addr_type == AF_INET) {
1386 struct sockaddr_in *sin4 = (struct sockaddr_in *)sin;
1387 sin4->sin_len = sizeof(*sin4);
1388 sin4->sin_family = AF_INET;
1389 memcpy((void *)&sin4->sin_addr.s_addr, cb->addr, 4);
1390 sin4->sin_port = cb->port;
1391 } else if (cb->addr_type == AF_INET6) {
1392 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sin;
1393 sin6->sin6_len = sizeof(*sin6);
1394 sin6->sin6_family = AF_INET6;
1395 memcpy((void *)&sin6->sin6_addr, cb->addr, 16);
1396 sin6->sin6_port = cb->port;
1397 }
1398 }
1399
krping_bind_server(struct krping_cb * cb)1400 static int krping_bind_server(struct krping_cb *cb)
1401 {
1402 struct sockaddr_storage sin;
1403 int ret;
1404
1405
1406 fill_sockaddr(&sin, cb);
1407
1408 ret = rdma_bind_addr(cb->cm_id, (struct sockaddr *)&sin);
1409 if (ret) {
1410 printk(KERN_ERR PFX "rdma_bind_addr error %d\n", ret);
1411 return ret;
1412 }
1413 DEBUG_LOG("rdma_bind_addr successful\n");
1414
1415 DEBUG_LOG("rdma_listen\n");
1416 ret = rdma_listen(cb->cm_id, 3);
1417 if (ret) {
1418 printk(KERN_ERR PFX "rdma_listen failed: %d\n", ret);
1419 return ret;
1420 }
1421
1422 wait_event_interruptible(cb->sem, cb->state >= CONNECT_REQUEST);
1423 if (cb->state != CONNECT_REQUEST) {
1424 printk(KERN_ERR PFX "wait for CONNECT_REQUEST state %d\n",
1425 cb->state);
1426 return -1;
1427 }
1428
1429 if (!reg_supported(cb->child_cm_id->device))
1430 return -EINVAL;
1431
1432 return 0;
1433 }
1434
krping_run_server(struct krping_cb * cb)1435 static void krping_run_server(struct krping_cb *cb)
1436 {
1437 struct ib_recv_wr *bad_wr;
1438 int ret;
1439
1440 ret = krping_bind_server(cb);
1441 if (ret)
1442 return;
1443
1444 ret = krping_setup_qp(cb, cb->child_cm_id);
1445 if (ret) {
1446 printk(KERN_ERR PFX "setup_qp failed: %d\n", ret);
1447 goto err0;
1448 }
1449
1450 ret = krping_setup_buffers(cb);
1451 if (ret) {
1452 printk(KERN_ERR PFX "krping_setup_buffers failed: %d\n", ret);
1453 goto err1;
1454 }
1455
1456 ret = ib_post_recv(cb->qp, &cb->rq_wr, &bad_wr);
1457 if (ret) {
1458 printk(KERN_ERR PFX "ib_post_recv failed: %d\n", ret);
1459 goto err2;
1460 }
1461
1462 ret = krping_accept(cb);
1463 if (ret) {
1464 printk(KERN_ERR PFX "connect error %d\n", ret);
1465 goto err2;
1466 }
1467
1468 if (cb->wlat)
1469 krping_wlat_test_server(cb);
1470 else if (cb->rlat)
1471 krping_rlat_test_server(cb);
1472 else if (cb->bw)
1473 krping_bw_test_server(cb);
1474 else
1475 krping_test_server(cb);
1476 rdma_disconnect(cb->child_cm_id);
1477 err2:
1478 krping_free_buffers(cb);
1479 err1:
1480 krping_free_qp(cb);
1481 err0:
1482 rdma_destroy_id(cb->child_cm_id);
1483 }
1484
krping_test_client(struct krping_cb * cb)1485 static void krping_test_client(struct krping_cb *cb)
1486 {
1487 int ping, start, cc, i, ret;
1488 struct ib_send_wr *bad_wr;
1489 unsigned char c;
1490
1491 start = 65;
1492 for (ping = 0; !cb->count || ping < cb->count; ping++) {
1493 cb->state = RDMA_READ_ADV;
1494
1495 /* Put some ascii text in the buffer. */
1496 cc = sprintf(cb->start_buf, "rdma-ping-%d: ", ping);
1497 for (i = cc, c = start; i < cb->size; i++) {
1498 cb->start_buf[i] = c;
1499 c++;
1500 if (c > 122)
1501 c = 65;
1502 }
1503 start++;
1504 if (start > 122)
1505 start = 65;
1506 cb->start_buf[cb->size - 1] = 0;
1507
1508 krping_format_send(cb, cb->start_dma_addr);
1509 if (cb->state == ERROR) {
1510 printk(KERN_ERR PFX "krping_format_send failed\n");
1511 break;
1512 }
1513 ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr);
1514 if (ret) {
1515 printk(KERN_ERR PFX "post send error %d\n", ret);
1516 break;
1517 }
1518
1519 /* Wait for server to ACK */
1520 wait_event_interruptible(cb->sem, cb->state >= RDMA_WRITE_ADV);
1521 if (cb->state != RDMA_WRITE_ADV) {
1522 printk(KERN_ERR PFX
1523 "wait for RDMA_WRITE_ADV state %d\n",
1524 cb->state);
1525 break;
1526 }
1527
1528 krping_format_send(cb, cb->rdma_dma_addr);
1529 ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr);
1530 if (ret) {
1531 printk(KERN_ERR PFX "post send error %d\n", ret);
1532 break;
1533 }
1534
1535 /* Wait for the server to say the RDMA Write is complete. */
1536 wait_event_interruptible(cb->sem,
1537 cb->state >= RDMA_WRITE_COMPLETE);
1538 if (cb->state != RDMA_WRITE_COMPLETE) {
1539 printk(KERN_ERR PFX
1540 "wait for RDMA_WRITE_COMPLETE state %d\n",
1541 cb->state);
1542 break;
1543 }
1544
1545 if (cb->validate)
1546 if (memcmp(cb->start_buf, cb->rdma_buf, cb->size)) {
1547 printk(KERN_ERR PFX "data mismatch!\n");
1548 break;
1549 }
1550
1551 if (cb->verbose)
1552 printk(KERN_INFO PFX "ping data: %s\n", cb->rdma_buf);
1553 #ifdef SLOW_KRPING
1554 wait_event_interruptible_timeout(cb->sem, cb->state == ERROR, HZ);
1555 #endif
1556 }
1557 }
1558
krping_rlat_test_client(struct krping_cb * cb)1559 static void krping_rlat_test_client(struct krping_cb *cb)
1560 {
1561 struct ib_send_wr *bad_wr;
1562 struct ib_wc wc;
1563 int ret;
1564
1565 cb->state = RDMA_READ_ADV;
1566
1567 /* Send STAG/TO/Len to client */
1568 krping_format_send(cb, cb->start_dma_addr);
1569 if (cb->state == ERROR) {
1570 printk(KERN_ERR PFX "krping_format_send failed\n");
1571 return;
1572 }
1573 ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr);
1574 if (ret) {
1575 printk(KERN_ERR PFX "post send error %d\n", ret);
1576 return;
1577 }
1578
1579 /* Spin waiting for send completion */
1580 while ((ret = ib_poll_cq(cb->cq, 1, &wc) == 0));
1581 if (ret < 0) {
1582 printk(KERN_ERR PFX "poll error %d\n", ret);
1583 return;
1584 }
1585 if (wc.status) {
1586 printk(KERN_ERR PFX "send completion error %d\n", wc.status);
1587 return;
1588 }
1589
1590 /* Spin waiting for server's Start STAG/TO/Len */
1591 while (cb->state < RDMA_WRITE_ADV) {
1592 krping_cq_event_handler(cb->cq, cb);
1593 }
1594
1595 #if 0
1596 {
1597 int i;
1598 struct timeval start, stop;
1599 time_t sec;
1600 suseconds_t usec;
1601 unsigned long long elapsed;
1602 struct ib_wc wc;
1603 struct ib_send_wr *bad_wr;
1604 int ne;
1605
1606 cb->rdma_sq_wr.wr.opcode = IB_WR_RDMA_WRITE;
1607 cb->rdma_sq_wr.rkey = cb->remote_rkey;
1608 cb->rdma_sq_wr.remote_addr = cb->remote_addr;
1609 cb->rdma_sq_wr.wr.sg_list->length = 0;
1610 cb->rdma_sq_wr.wr.num_sge = 0;
1611
1612 microtime(&start);
1613 for (i=0; i < 100000; i++) {
1614 if (ib_post_send(cb->qp, &cb->rdma_sq_wr.wr, &bad_wr)) {
1615 printk(KERN_ERR PFX "Couldn't post send\n");
1616 return;
1617 }
1618 do {
1619 ne = ib_poll_cq(cb->cq, 1, &wc);
1620 } while (ne == 0);
1621 if (ne < 0) {
1622 printk(KERN_ERR PFX "poll CQ failed %d\n", ne);
1623 return;
1624 }
1625 if (wc.status != IB_WC_SUCCESS) {
1626 printk(KERN_ERR PFX "Completion wth error at %s:\n",
1627 cb->server ? "server" : "client");
1628 printk(KERN_ERR PFX "Failed status %d: wr_id %d\n",
1629 wc.status, (int) wc.wr_id);
1630 return;
1631 }
1632 }
1633 microtime(&stop);
1634
1635 if (stop.tv_usec < start.tv_usec) {
1636 stop.tv_usec += 1000000;
1637 stop.tv_sec -= 1;
1638 }
1639 sec = stop.tv_sec - start.tv_sec;
1640 usec = stop.tv_usec - start.tv_usec;
1641 elapsed = sec * 1000000 + usec;
1642 printk(KERN_ERR PFX "0B-write-lat iters 100000 usec %llu\n", elapsed);
1643 }
1644 #endif
1645
1646 rlat_test(cb);
1647 }
1648
krping_wlat_test_client(struct krping_cb * cb)1649 static void krping_wlat_test_client(struct krping_cb *cb)
1650 {
1651 struct ib_send_wr *bad_wr;
1652 struct ib_wc wc;
1653 int ret;
1654
1655 cb->state = RDMA_READ_ADV;
1656
1657 /* Send STAG/TO/Len to client */
1658 krping_format_send(cb, cb->start_dma_addr);
1659 if (cb->state == ERROR) {
1660 printk(KERN_ERR PFX "krping_format_send failed\n");
1661 return;
1662 }
1663 ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr);
1664 if (ret) {
1665 printk(KERN_ERR PFX "post send error %d\n", ret);
1666 return;
1667 }
1668
1669 /* Spin waiting for send completion */
1670 while ((ret = ib_poll_cq(cb->cq, 1, &wc) == 0));
1671 if (ret < 0) {
1672 printk(KERN_ERR PFX "poll error %d\n", ret);
1673 return;
1674 }
1675 if (wc.status) {
1676 printk(KERN_ERR PFX "send completion error %d\n", wc.status);
1677 return;
1678 }
1679
1680 /* Spin waiting for server's Start STAG/TO/Len */
1681 while (cb->state < RDMA_WRITE_ADV) {
1682 krping_cq_event_handler(cb->cq, cb);
1683 }
1684
1685 wlat_test(cb);
1686 }
1687
krping_bw_test_client(struct krping_cb * cb)1688 static void krping_bw_test_client(struct krping_cb *cb)
1689 {
1690 struct ib_send_wr *bad_wr;
1691 struct ib_wc wc;
1692 int ret;
1693
1694 cb->state = RDMA_READ_ADV;
1695
1696 /* Send STAG/TO/Len to client */
1697 krping_format_send(cb, cb->start_dma_addr);
1698 if (cb->state == ERROR) {
1699 printk(KERN_ERR PFX "krping_format_send failed\n");
1700 return;
1701 }
1702 ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr);
1703 if (ret) {
1704 printk(KERN_ERR PFX "post send error %d\n", ret);
1705 return;
1706 }
1707
1708 /* Spin waiting for send completion */
1709 while ((ret = ib_poll_cq(cb->cq, 1, &wc) == 0));
1710 if (ret < 0) {
1711 printk(KERN_ERR PFX "poll error %d\n", ret);
1712 return;
1713 }
1714 if (wc.status) {
1715 printk(KERN_ERR PFX "send completion error %d\n", wc.status);
1716 return;
1717 }
1718
1719 /* Spin waiting for server's Start STAG/TO/Len */
1720 while (cb->state < RDMA_WRITE_ADV) {
1721 krping_cq_event_handler(cb->cq, cb);
1722 }
1723
1724 bw_test(cb);
1725 }
1726
1727 /*
1728 * Manual qp flush test
1729 */
flush_qp(struct krping_cb * cb)1730 static void flush_qp(struct krping_cb *cb)
1731 {
1732 struct ib_send_wr wr = { 0 }, *bad;
1733 struct ib_recv_wr recv_wr = { 0 }, *recv_bad;
1734 struct ib_wc wc;
1735 int ret;
1736 int flushed = 0;
1737 int ccnt = 0;
1738
1739 rdma_disconnect(cb->cm_id);
1740 DEBUG_LOG("disconnected!\n");
1741
1742 wr.opcode = IB_WR_SEND;
1743 wr.wr_id = 0xdeadbeefcafebabe;
1744 ret = ib_post_send(cb->qp, &wr, &bad);
1745 if (ret) {
1746 printk(KERN_ERR PFX "%s post_send failed ret %d\n", __func__, ret);
1747 return;
1748 }
1749
1750 recv_wr.wr_id = 0xcafebabedeadbeef;
1751 ret = ib_post_recv(cb->qp, &recv_wr, &recv_bad);
1752 if (ret) {
1753 printk(KERN_ERR PFX "%s post_recv failed ret %d\n", __func__, ret);
1754 return;
1755 }
1756
1757 /* poll until the flush WRs complete */
1758 do {
1759 ret = ib_poll_cq(cb->cq, 1, &wc);
1760 if (ret < 0) {
1761 printk(KERN_ERR PFX "ib_poll_cq failed %d\n", ret);
1762 return;
1763 }
1764 if (ret == 0)
1765 continue;
1766 ccnt++;
1767 if (wc.wr_id == 0xdeadbeefcafebabe ||
1768 wc.wr_id == 0xcafebabedeadbeef)
1769 flushed++;
1770 } while (flushed != 2);
1771 DEBUG_LOG("qp_flushed! ccnt %u\n", ccnt);
1772 }
1773
krping_fr_test(struct krping_cb * cb)1774 static void krping_fr_test(struct krping_cb *cb)
1775 {
1776 struct ib_send_wr inv, *bad;
1777 struct ib_reg_wr fr;
1778 struct ib_wc wc;
1779 u8 key = 0;
1780 struct ib_mr *mr;
1781 int ret;
1782 int size = cb->size;
1783 int plen = (((size - 1) & PAGE_MASK) + PAGE_SIZE) >> PAGE_SHIFT;
1784 unsigned long start;
1785 int count = 0;
1786 int scnt = 0;
1787 struct scatterlist sg = {0};
1788
1789 mr = ib_alloc_mr(cb->pd, IB_MR_TYPE_MEM_REG, plen);
1790 if (IS_ERR(mr)) {
1791 printk(KERN_ERR PFX "ib_alloc_mr failed %ld\n", PTR_ERR(mr));
1792 return;
1793 }
1794
1795 sg_dma_address(&sg) = (dma_addr_t)0xcafebabe0000ULL;
1796 sg_dma_len(&sg) = size;
1797 ret = ib_map_mr_sg(mr, &sg, 1, NULL, PAGE_SIZE);
1798 if (ret <= 0) {
1799 printk(KERN_ERR PFX "ib_map_mr_sge err %d\n", ret);
1800 goto err2;
1801 }
1802
1803 memset(&fr, 0, sizeof fr);
1804 fr.wr.opcode = IB_WR_REG_MR;
1805 fr.access = IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE;
1806 fr.mr = mr;
1807 fr.wr.next = &inv;
1808
1809 memset(&inv, 0, sizeof inv);
1810 inv.opcode = IB_WR_LOCAL_INV;
1811 inv.send_flags = IB_SEND_SIGNALED;
1812
1813 DEBUG_LOG("fr_test: stag index 0x%x plen %u size %u depth %u\n", mr->rkey >> 8, plen, cb->size, cb->txdepth);
1814 start = time_uptime;
1815 while (!cb->count || count <= cb->count) {
1816 if (SIGPENDING(curthread)) {
1817 printk(KERN_ERR PFX "signal!\n");
1818 break;
1819 }
1820 if ((time_uptime - start) >= 9) {
1821 DEBUG_LOG("fr_test: pausing 1 second! count %u latest size %u plen %u\n", count, size, plen);
1822 wait_event_interruptible_timeout(cb->sem, cb->state == ERROR, HZ);
1823 if (cb->state == ERROR)
1824 break;
1825 start = time_uptime;
1826 }
1827 while (scnt < (cb->txdepth>>1)) {
1828 ib_update_fast_reg_key(mr, ++key);
1829 fr.key = mr->rkey;
1830 inv.ex.invalidate_rkey = mr->rkey;
1831
1832 size = arc4random() % cb->size;
1833 if (size == 0)
1834 size = cb->size;
1835 sg_dma_len(&sg) = size;
1836 ret = ib_map_mr_sg(mr, &sg, 1, NULL, PAGE_SIZE);
1837 if (ret <= 0) {
1838 printk(KERN_ERR PFX "ib_map_mr_sge err %d\n", ret);
1839 goto err2;
1840 }
1841 ret = ib_post_send(cb->qp, &fr.wr, &bad);
1842 if (ret) {
1843 printk(KERN_ERR PFX "ib_post_send failed %d\n", ret);
1844 goto err2;
1845 }
1846 scnt++;
1847 }
1848
1849 ret = ib_poll_cq(cb->cq, 1, &wc);
1850 if (ret < 0) {
1851 printk(KERN_ERR PFX "ib_poll_cq failed %d\n", ret);
1852 goto err2;
1853 }
1854 if (ret == 1) {
1855 if (wc.status) {
1856 printk(KERN_ERR PFX "completion error %u\n", wc.status);
1857 goto err2;
1858 }
1859 count++;
1860 scnt--;
1861 }
1862 }
1863 err2:
1864 flush_qp(cb);
1865 DEBUG_LOG("fr_test: done!\n");
1866 ib_dereg_mr(mr);
1867 }
1868
krping_connect_client(struct krping_cb * cb)1869 static int krping_connect_client(struct krping_cb *cb)
1870 {
1871 struct rdma_conn_param conn_param;
1872 int ret;
1873
1874 memset(&conn_param, 0, sizeof conn_param);
1875 conn_param.responder_resources = 1;
1876 conn_param.initiator_depth = 1;
1877 conn_param.retry_count = 10;
1878
1879 ret = rdma_connect(cb->cm_id, &conn_param);
1880 if (ret) {
1881 printk(KERN_ERR PFX "rdma_connect error %d\n", ret);
1882 return ret;
1883 }
1884
1885 wait_event_interruptible(cb->sem, cb->state >= CONNECTED);
1886 if (cb->state == ERROR) {
1887 printk(KERN_ERR PFX "wait for CONNECTED state %d\n", cb->state);
1888 return -1;
1889 }
1890
1891 DEBUG_LOG("rdma_connect successful\n");
1892 return 0;
1893 }
1894
krping_bind_client(struct krping_cb * cb)1895 static int krping_bind_client(struct krping_cb *cb)
1896 {
1897 struct sockaddr_storage sin;
1898 int ret;
1899
1900 fill_sockaddr(&sin, cb);
1901
1902 ret = rdma_resolve_addr(cb->cm_id, NULL, (struct sockaddr *)&sin, 2000);
1903 if (ret) {
1904 printk(KERN_ERR PFX "rdma_resolve_addr error %d\n", ret);
1905 return ret;
1906 }
1907
1908 wait_event_interruptible(cb->sem, cb->state >= ROUTE_RESOLVED);
1909 if (cb->state != ROUTE_RESOLVED) {
1910 printk(KERN_ERR PFX
1911 "addr/route resolution did not resolve: state %d\n",
1912 cb->state);
1913 return -EINTR;
1914 }
1915
1916 if (!reg_supported(cb->cm_id->device))
1917 return -EINVAL;
1918
1919 DEBUG_LOG("rdma_resolve_addr - rdma_resolve_route successful\n");
1920 return 0;
1921 }
1922
krping_run_client(struct krping_cb * cb)1923 static void krping_run_client(struct krping_cb *cb)
1924 {
1925 struct ib_recv_wr *bad_wr;
1926 int ret;
1927
1928 /* set type of service, if any */
1929 if (cb->tos != 0)
1930 rdma_set_service_type(cb->cm_id, cb->tos);
1931
1932 ret = krping_bind_client(cb);
1933 if (ret)
1934 return;
1935
1936 ret = krping_setup_qp(cb, cb->cm_id);
1937 if (ret) {
1938 printk(KERN_ERR PFX "setup_qp failed: %d\n", ret);
1939 return;
1940 }
1941
1942 ret = krping_setup_buffers(cb);
1943 if (ret) {
1944 printk(KERN_ERR PFX "krping_setup_buffers failed: %d\n", ret);
1945 goto err1;
1946 }
1947
1948 ret = ib_post_recv(cb->qp, &cb->rq_wr, &bad_wr);
1949 if (ret) {
1950 printk(KERN_ERR PFX "ib_post_recv failed: %d\n", ret);
1951 goto err2;
1952 }
1953
1954 ret = krping_connect_client(cb);
1955 if (ret) {
1956 printk(KERN_ERR PFX "connect error %d\n", ret);
1957 goto err2;
1958 }
1959
1960 if (cb->wlat)
1961 krping_wlat_test_client(cb);
1962 else if (cb->rlat)
1963 krping_rlat_test_client(cb);
1964 else if (cb->bw)
1965 krping_bw_test_client(cb);
1966 else if (cb->frtest)
1967 krping_fr_test(cb);
1968 else
1969 krping_test_client(cb);
1970 rdma_disconnect(cb->cm_id);
1971 err2:
1972 krping_free_buffers(cb);
1973 err1:
1974 krping_free_qp(cb);
1975 }
1976
1977 static uint16_t
krping_get_ipv6_scope_id(char * name)1978 krping_get_ipv6_scope_id(char *name)
1979 {
1980 struct ifnet *ifp;
1981 uint16_t retval;
1982
1983 if (name == NULL)
1984 return (0);
1985 CURVNET_SET_QUIET(TD_TO_VNET(curthread));
1986 ifp = ifunit_ref(name);
1987 CURVNET_RESTORE();
1988 if (ifp == NULL)
1989 return (0);
1990 retval = ifp->if_index;
1991 if_rele(ifp);
1992 return (retval);
1993 }
1994
krping_doit(char * cmd)1995 int krping_doit(char *cmd)
1996 {
1997 struct krping_cb *cb;
1998 int op;
1999 int ret = 0;
2000 char *optarg;
2001 char *scope;
2002 unsigned long optint;
2003
2004 cb = kzalloc(sizeof(*cb), GFP_KERNEL);
2005 if (!cb)
2006 return -ENOMEM;
2007
2008 mutex_lock(&krping_mutex);
2009 list_add_tail(&cb->list, &krping_cbs);
2010 mutex_unlock(&krping_mutex);
2011
2012 cb->server = -1;
2013 cb->state = IDLE;
2014 cb->size = 64;
2015 cb->txdepth = RPING_SQ_DEPTH;
2016 init_waitqueue_head(&cb->sem);
2017
2018 while ((op = krping_getopt("krping", &cmd, krping_opts, NULL, &optarg,
2019 &optint)) != 0) {
2020 switch (op) {
2021 case 'a':
2022 cb->addr_str = optarg;
2023 cb->addr_type = AF_INET;
2024 DEBUG_LOG("ipaddr (%s)\n", optarg);
2025 if (inet_pton(AF_INET, optarg, cb->addr) != 1) {
2026 printk(KERN_ERR PFX "bad addr string %s\n",
2027 optarg);
2028 ret = EINVAL;
2029 }
2030 break;
2031 case 'A':
2032 cb->addr_str = optarg;
2033 cb->addr_type = AF_INET6;
2034 DEBUG_LOG("ipv6addr (%s)\n", optarg);
2035 scope = strstr(optarg, "%");
2036 /* extract scope ID, if any */
2037 if (scope != NULL)
2038 *scope++ = 0;
2039 /* extract IPv6 network address */
2040 if (inet_pton(AF_INET6, optarg, cb->addr) != 1) {
2041 printk(KERN_ERR PFX "bad addr string %s\n",
2042 optarg);
2043 ret = EINVAL;
2044 } else if (IN6_IS_SCOPE_LINKLOCAL((struct in6_addr *)cb->addr) ||
2045 IN6_IS_ADDR_MC_INTFACELOCAL((struct in6_addr *)cb->addr)) {
2046 uint16_t scope_id = krping_get_ipv6_scope_id(scope);
2047 DEBUG_LOG("ipv6 scope ID = %d\n", scope_id);
2048 cb->addr[2] = scope_id >> 8;
2049 cb->addr[3] = scope_id & 0xFF;
2050 }
2051 break;
2052 case 'p':
2053 cb->port = htons(optint);
2054 DEBUG_LOG("port %d\n", (int)optint);
2055 break;
2056 case 'P':
2057 cb->poll = 1;
2058 DEBUG_LOG("server\n");
2059 break;
2060 case 's':
2061 cb->server = 1;
2062 DEBUG_LOG("server\n");
2063 break;
2064 case 'c':
2065 cb->server = 0;
2066 DEBUG_LOG("client\n");
2067 break;
2068 case 'S':
2069 cb->size = optint;
2070 if ((cb->size < 1) ||
2071 (cb->size > RPING_BUFSIZE)) {
2072 printk(KERN_ERR PFX "Invalid size %d "
2073 "(valid range is 1 to %d)\n",
2074 cb->size, RPING_BUFSIZE);
2075 ret = EINVAL;
2076 } else
2077 DEBUG_LOG("size %d\n", (int)optint);
2078 break;
2079 case 'C':
2080 cb->count = optint;
2081 if (cb->count < 0) {
2082 printk(KERN_ERR PFX "Invalid count %d\n",
2083 cb->count);
2084 ret = EINVAL;
2085 } else
2086 DEBUG_LOG("count %d\n", (int) cb->count);
2087 break;
2088 case 'v':
2089 cb->verbose++;
2090 DEBUG_LOG("verbose\n");
2091 break;
2092 case 'V':
2093 cb->validate++;
2094 DEBUG_LOG("validate data\n");
2095 break;
2096 case 'l':
2097 cb->wlat++;
2098 break;
2099 case 'L':
2100 cb->rlat++;
2101 break;
2102 case 'B':
2103 cb->bw++;
2104 break;
2105 case 'd':
2106 cb->duplex++;
2107 break;
2108 case 'I':
2109 cb->server_invalidate = 1;
2110 break;
2111 case 't':
2112 cb->tos = optint;
2113 DEBUG_LOG("type of service, tos=%d\n", (int) cb->tos);
2114 break;
2115 case 'T':
2116 cb->txdepth = optint;
2117 DEBUG_LOG("txdepth %d\n", (int) cb->txdepth);
2118 break;
2119 case 'Z':
2120 cb->local_dma_lkey = 1;
2121 DEBUG_LOG("using local dma lkey\n");
2122 break;
2123 case 'R':
2124 cb->read_inv = 1;
2125 DEBUG_LOG("using read-with-inv\n");
2126 break;
2127 case 'f':
2128 cb->frtest = 1;
2129 DEBUG_LOG("fast-reg test!\n");
2130 break;
2131 default:
2132 printk(KERN_ERR PFX "unknown opt %s\n", optarg);
2133 ret = -EINVAL;
2134 break;
2135 }
2136 }
2137 if (ret)
2138 goto out;
2139
2140 if (cb->server == -1) {
2141 printk(KERN_ERR PFX "must be either client or server\n");
2142 ret = -EINVAL;
2143 goto out;
2144 }
2145
2146 if (cb->server && cb->frtest) {
2147 printk(KERN_ERR PFX "must be client to run frtest\n");
2148 ret = -EINVAL;
2149 goto out;
2150 }
2151
2152 if ((cb->frtest + cb->bw + cb->rlat + cb->wlat) > 1) {
2153 printk(KERN_ERR PFX "Pick only one test: fr, bw, rlat, wlat\n");
2154 ret = -EINVAL;
2155 goto out;
2156 }
2157
2158 if (cb->wlat || cb->rlat || cb->bw) {
2159 printk(KERN_ERR PFX "wlat, rlat, and bw tests only support mem_mode MR - which is no longer supported\n");
2160 ret = -EINVAL;
2161 goto out;
2162 }
2163
2164 cb->cm_id = rdma_create_id(TD_TO_VNET(curthread), krping_cma_event_handler, cb, RDMA_PS_TCP, IB_QPT_RC);
2165 if (IS_ERR(cb->cm_id)) {
2166 ret = PTR_ERR(cb->cm_id);
2167 printk(KERN_ERR PFX "rdma_create_id error %d\n", ret);
2168 goto out;
2169 }
2170 DEBUG_LOG("created cm_id %p\n", cb->cm_id);
2171
2172 if (cb->server)
2173 krping_run_server(cb);
2174 else
2175 krping_run_client(cb);
2176
2177 DEBUG_LOG("destroy cm_id %p\n", cb->cm_id);
2178 rdma_destroy_id(cb->cm_id);
2179 out:
2180 mutex_lock(&krping_mutex);
2181 list_del(&cb->list);
2182 mutex_unlock(&krping_mutex);
2183 kfree(cb);
2184 return ret;
2185 }
2186
2187 void
krping_walk_cb_list(void (* f)(struct krping_stats *,void *),void * arg)2188 krping_walk_cb_list(void (*f)(struct krping_stats *, void *), void *arg)
2189 {
2190 struct krping_cb *cb;
2191
2192 mutex_lock(&krping_mutex);
2193 list_for_each_entry(cb, &krping_cbs, list)
2194 (*f)(cb->pd ? &cb->stats : NULL, arg);
2195 mutex_unlock(&krping_mutex);
2196 }
2197
2198 void
krping_cancel_all(void)2199 krping_cancel_all(void)
2200 {
2201 struct krping_cb *cb;
2202
2203 mutex_lock(&krping_mutex);
2204 list_for_each_entry(cb, &krping_cbs, list) {
2205 cb->state = ERROR;
2206 wake_up_interruptible(&cb->sem);
2207 }
2208 mutex_unlock(&krping_mutex);
2209 }
2210
2211