1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2009-2013 Chelsio, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36
37 #include "opt_inet.h"
38
39 #ifdef TCP_OFFLOAD
40 #include <sys/types.h>
41 #include <sys/malloc.h>
42 #include <sys/socket.h>
43 #include <sys/socketvar.h>
44 #include <sys/sockio.h>
45 #include <sys/taskqueue.h>
46 #include <netinet/in.h>
47 #include <net/route.h>
48
49 #include <netinet/in_systm.h>
50 #include <netinet/in_pcb.h>
51 #include <netinet/ip.h>
52 #include <netinet/ip_var.h>
53 #include <netinet/tcp_var.h>
54 #include <netinet/tcp.h>
55 #include <netinet/tcpip.h>
56
57 #include <netinet/toecore.h>
58
59 struct sge_iq;
60 struct rss_header;
61 struct cpl_set_tcb_rpl;
62 #include <linux/types.h>
63 #include "offload.h"
64 #include "tom/t4_tom.h"
65
66 #include "iw_cxgbe.h"
67 #include "user.h"
68
69 static int creds(struct toepcb *toep, struct inpcb *inp, size_t wrsize);
70 static int max_fr_immd = T4_MAX_FR_IMMD;//SYSCTL parameter later...
71
alloc_ird(struct c4iw_dev * dev,u32 ird)72 static int alloc_ird(struct c4iw_dev *dev, u32 ird)
73 {
74 int ret = 0;
75
76 spin_lock_irq(&dev->lock);
77 if (ird <= dev->avail_ird)
78 dev->avail_ird -= ird;
79 else
80 ret = -ENOMEM;
81 spin_unlock_irq(&dev->lock);
82
83 if (ret)
84 log(LOG_WARNING, "%s: device IRD resources exhausted\n",
85 device_get_nameunit(dev->rdev.adap->dev));
86
87 return ret;
88 }
89
free_ird(struct c4iw_dev * dev,int ird)90 static void free_ird(struct c4iw_dev *dev, int ird)
91 {
92 spin_lock_irq(&dev->lock);
93 dev->avail_ird += ird;
94 spin_unlock_irq(&dev->lock);
95 }
96
set_state(struct c4iw_qp * qhp,enum c4iw_qp_state state)97 static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state)
98 {
99 unsigned long flag;
100 spin_lock_irqsave(&qhp->lock, flag);
101 qhp->attr.state = state;
102 spin_unlock_irqrestore(&qhp->lock, flag);
103 }
104
destroy_qp(struct c4iw_rdev * rdev,struct t4_wq * wq,struct c4iw_dev_ucontext * uctx)105 static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
106 struct c4iw_dev_ucontext *uctx)
107 {
108 struct c4iw_dev *rhp = rdev_to_c4iw_dev(rdev);
109 /*
110 * uP clears EQ contexts when the connection exits rdma mode,
111 * so no need to post a RESET WR for these EQs.
112 */
113 dma_free_coherent(rhp->ibdev.dma_device,
114 wq->rq.memsize, wq->rq.queue,
115 dma_unmap_addr(&wq->rq, mapping));
116 dma_free_coherent(rhp->ibdev.dma_device,
117 wq->sq.memsize, wq->sq.queue,
118 dma_unmap_addr(&wq->sq, mapping));
119 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
120 kfree(wq->rq.sw_rq);
121 kfree(wq->sq.sw_sq);
122 c4iw_put_qpid(rdev, wq->rq.qid, uctx);
123 c4iw_put_qpid(rdev, wq->sq.qid, uctx);
124 return 0;
125 }
126
create_qp(struct c4iw_rdev * rdev,struct t4_wq * wq,struct t4_cq * rcq,struct t4_cq * scq,struct c4iw_dev_ucontext * uctx)127 static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
128 struct t4_cq *rcq, struct t4_cq *scq,
129 struct c4iw_dev_ucontext *uctx)
130 {
131 struct adapter *sc = rdev->adap;
132 struct c4iw_dev *rhp = rdev_to_c4iw_dev(rdev);
133 int user = (uctx != &rdev->uctx);
134 struct fw_ri_res_wr *res_wr;
135 struct fw_ri_res *res;
136 int wr_len;
137 struct c4iw_wr_wait wr_wait;
138 int ret = 0;
139 int eqsize;
140 struct wrqe *wr;
141 u64 sq_bar2_qoffset = 0, rq_bar2_qoffset = 0;
142
143 wq->sq.qid = c4iw_get_qpid(rdev, uctx);
144 if (!wq->sq.qid)
145 return -ENOMEM;
146
147 wq->rq.qid = c4iw_get_qpid(rdev, uctx);
148 if (!wq->rq.qid) {
149 ret = -ENOMEM;
150 goto free_sq_qid;
151 }
152
153 if (!user) {
154 wq->sq.sw_sq = kzalloc(wq->sq.size * sizeof *wq->sq.sw_sq,
155 GFP_KERNEL);
156 if (!wq->sq.sw_sq) {
157 ret = -ENOMEM;
158 goto free_rq_qid;
159 }
160
161 wq->rq.sw_rq = kzalloc(wq->rq.size * sizeof *wq->rq.sw_rq,
162 GFP_KERNEL);
163 if (!wq->rq.sw_rq) {
164 ret = -ENOMEM;
165 goto free_sw_sq;
166 }
167 }
168
169 /*
170 * RQT must be a power of 2 and at least 16 deep.
171 */
172 wq->rq.rqt_size = roundup_pow_of_two(max_t(u16, wq->rq.size, 16));
173 wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size);
174 if (!wq->rq.rqt_hwaddr) {
175 ret = -ENOMEM;
176 goto free_sw_rq;
177 }
178
179 /*QP memory, allocate DMAable memory for Send & Receive Queues */
180 wq->sq.queue = dma_alloc_coherent(rhp->ibdev.dma_device, wq->sq.memsize,
181 &(wq->sq.dma_addr), GFP_KERNEL);
182 if (!wq->sq.queue) {
183 ret = -ENOMEM;
184 goto free_hwaddr;
185 }
186 wq->sq.phys_addr = vtophys(wq->sq.queue);
187 dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);
188 memset(wq->sq.queue, 0, wq->sq.memsize);
189
190 wq->rq.queue = dma_alloc_coherent(rhp->ibdev.dma_device,
191 wq->rq.memsize, &(wq->rq.dma_addr), GFP_KERNEL);
192 if (!wq->rq.queue) {
193 ret = -ENOMEM;
194 goto free_sq_dma;
195 }
196 wq->rq.phys_addr = vtophys(wq->rq.queue);
197 dma_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr);
198 memset(wq->rq.queue, 0, wq->rq.memsize);
199
200 CTR5(KTR_IW_CXGBE,
201 "%s QP sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx",
202 __func__,
203 wq->sq.queue, (unsigned long long)wq->sq.phys_addr,
204 wq->rq.queue, (unsigned long long)wq->rq.phys_addr);
205
206 /* Doorbell/WC regions, determine the BAR2 queue offset and qid. */
207 t4_bar2_sge_qregs(rdev->adap, wq->sq.qid, T4_BAR2_QTYPE_EGRESS, user,
208 &sq_bar2_qoffset, &wq->sq.bar2_qid);
209 t4_bar2_sge_qregs(rdev->adap, wq->rq.qid, T4_BAR2_QTYPE_EGRESS, user,
210 &rq_bar2_qoffset, &wq->rq.bar2_qid);
211
212 if (user) {
213 /* Compute BAR2 DB/WC physical address(page-aligned) for
214 * Userspace mapping.
215 */
216 wq->sq.bar2_pa = (rdev->bar2_pa + sq_bar2_qoffset) & PAGE_MASK;
217 wq->rq.bar2_pa = (rdev->bar2_pa + rq_bar2_qoffset) & PAGE_MASK;
218 CTR3(KTR_IW_CXGBE,
219 "%s BAR2 DB/WC sq base pa 0x%llx rq base pa 0x%llx",
220 __func__, (unsigned long long)wq->sq.bar2_pa,
221 (unsigned long long)wq->rq.bar2_pa);
222 } else {
223 /* Compute BAR2 DB/WC virtual address to access in kernel. */
224 wq->sq.bar2_va = (void __iomem *)((u64)rdev->bar2_kva +
225 sq_bar2_qoffset);
226 wq->rq.bar2_va = (void __iomem *)((u64)rdev->bar2_kva +
227 rq_bar2_qoffset);
228 CTR3(KTR_IW_CXGBE, "%s BAR2 DB/WC sq base va %p rq base va %p",
229 __func__, (unsigned long long)wq->sq.bar2_va,
230 (unsigned long long)wq->rq.bar2_va);
231 }
232
233 wq->rdev = rdev;
234 wq->rq.msn = 1;
235
236 /* build fw_ri_res_wr */
237 wr_len = sizeof *res_wr + 2 * sizeof *res;
238
239 wr = alloc_wrqe(wr_len, &sc->sge.ctrlq[0]);
240 if (wr == NULL) {
241 ret = -ENOMEM;
242 goto free_rq_dma;
243 }
244 res_wr = wrtod(wr);
245
246 memset(res_wr, 0, wr_len);
247 res_wr->op_nres = cpu_to_be32(
248 V_FW_WR_OP(FW_RI_RES_WR) |
249 V_FW_RI_RES_WR_NRES(2) |
250 F_FW_WR_COMPL);
251 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
252 res_wr->cookie = (unsigned long) &wr_wait;
253 res = res_wr->res;
254 res->u.sqrq.restype = FW_RI_RES_TYPE_SQ;
255 res->u.sqrq.op = FW_RI_RES_OP_WRITE;
256
257 /* eqsize is the number of 64B entries plus the status page size. */
258 eqsize = wq->sq.size * T4_SQ_NUM_SLOTS +
259 rdev->hw_queue.t4_eq_status_entries;
260
261 res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
262 V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */
263 V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */
264 V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */
265 V_FW_RI_RES_WR_IQID(scq->cqid));
266 res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
267 V_FW_RI_RES_WR_DCAEN(0) |
268 V_FW_RI_RES_WR_DCACPU(0) |
269 V_FW_RI_RES_WR_FBMIN(chip_id(sc) <= CHELSIO_T5 ?
270 X_FETCHBURSTMIN_64B : X_FETCHBURSTMIN_64B_T6) |
271 V_FW_RI_RES_WR_FBMAX(3) |
272 V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
273 V_FW_RI_RES_WR_CIDXFTHRESH(0) |
274 V_FW_RI_RES_WR_EQSIZE(eqsize));
275 res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid);
276 res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr);
277 res++;
278 res->u.sqrq.restype = FW_RI_RES_TYPE_RQ;
279 res->u.sqrq.op = FW_RI_RES_OP_WRITE;
280
281 /* eqsize is the number of 64B entries plus the status page size. */
282 eqsize = wq->rq.size * T4_RQ_NUM_SLOTS +
283 rdev->hw_queue.t4_eq_status_entries;
284 res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
285 V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */
286 V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */
287 V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */
288 V_FW_RI_RES_WR_IQID(rcq->cqid));
289 res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
290 V_FW_RI_RES_WR_DCAEN(0) |
291 V_FW_RI_RES_WR_DCACPU(0) |
292 V_FW_RI_RES_WR_FBMIN(chip_id(sc) <= CHELSIO_T5 ?
293 X_FETCHBURSTMIN_64B : X_FETCHBURSTMIN_64B_T6) |
294 V_FW_RI_RES_WR_FBMAX(3) |
295 V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
296 V_FW_RI_RES_WR_CIDXFTHRESH(0) |
297 V_FW_RI_RES_WR_EQSIZE(eqsize));
298 res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid);
299 res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr);
300
301 c4iw_init_wr_wait(&wr_wait);
302
303 t4_wrq_tx(sc, wr);
304 ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, wq->sq.qid,
305 NULL, __func__);
306 if (ret)
307 goto free_rq_dma;
308
309 CTR5(KTR_IW_CXGBE,
310 "%s sqid 0x%x rqid 0x%x kdb 0x%p squdb 0x%llx rqudb 0x%llx",
311 __func__, wq->sq.qid, wq->rq.qid,
312 (unsigned long long)wq->sq.bar2_va,
313 (unsigned long long)wq->rq.bar2_va);
314
315 return 0;
316 free_rq_dma:
317 dma_free_coherent(rhp->ibdev.dma_device,
318 wq->rq.memsize, wq->rq.queue,
319 dma_unmap_addr(&wq->rq, mapping));
320 free_sq_dma:
321 dma_free_coherent(rhp->ibdev.dma_device,
322 wq->sq.memsize, wq->sq.queue,
323 dma_unmap_addr(&wq->sq, mapping));
324 free_hwaddr:
325 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
326 free_sw_rq:
327 kfree(wq->rq.sw_rq);
328 free_sw_sq:
329 kfree(wq->sq.sw_sq);
330 free_rq_qid:
331 c4iw_put_qpid(rdev, wq->rq.qid, uctx);
332 free_sq_qid:
333 c4iw_put_qpid(rdev, wq->sq.qid, uctx);
334 return ret;
335 }
336
build_immd(struct t4_sq * sq,struct fw_ri_immd * immdp,const struct ib_send_wr * wr,int max,u32 * plenp)337 static int build_immd(struct t4_sq *sq, struct fw_ri_immd *immdp,
338 const struct ib_send_wr *wr, int max, u32 *plenp)
339 {
340 u8 *dstp, *srcp;
341 u32 plen = 0;
342 int i;
343 int rem, len;
344
345 dstp = (u8 *)immdp->data;
346 for (i = 0; i < wr->num_sge; i++) {
347 if ((plen + wr->sg_list[i].length) > max)
348 return -EMSGSIZE;
349 srcp = (u8 *)(unsigned long)wr->sg_list[i].addr;
350 plen += wr->sg_list[i].length;
351 rem = wr->sg_list[i].length;
352 while (rem) {
353 if (dstp == (u8 *)&sq->queue[sq->size])
354 dstp = (u8 *)sq->queue;
355 if (rem <= (u8 *)&sq->queue[sq->size] - dstp)
356 len = rem;
357 else
358 len = (u8 *)&sq->queue[sq->size] - dstp;
359 memcpy(dstp, srcp, len);
360 dstp += len;
361 srcp += len;
362 rem -= len;
363 }
364 }
365 len = roundup(plen + sizeof *immdp, 16) - (plen + sizeof *immdp);
366 if (len)
367 memset(dstp, 0, len);
368 immdp->op = FW_RI_DATA_IMMD;
369 immdp->r1 = 0;
370 immdp->r2 = 0;
371 immdp->immdlen = cpu_to_be32(plen);
372 *plenp = plen;
373 return 0;
374 }
375
build_isgl(__be64 * queue_start,__be64 * queue_end,struct fw_ri_isgl * isglp,struct ib_sge * sg_list,int num_sge,u32 * plenp)376 static int build_isgl(__be64 *queue_start, __be64 *queue_end,
377 struct fw_ri_isgl *isglp, struct ib_sge *sg_list,
378 int num_sge, u32 *plenp)
379
380 {
381 int i;
382 u32 plen = 0;
383 __be64 *flitp = (__be64 *)isglp->sge;
384
385 for (i = 0; i < num_sge; i++) {
386 if ((plen + sg_list[i].length) < plen)
387 return -EMSGSIZE;
388 plen += sg_list[i].length;
389 *flitp = cpu_to_be64(((u64)sg_list[i].lkey << 32) |
390 sg_list[i].length);
391 if (++flitp == queue_end)
392 flitp = queue_start;
393 *flitp = cpu_to_be64(sg_list[i].addr);
394 if (++flitp == queue_end)
395 flitp = queue_start;
396 }
397 *flitp = (__force __be64)0;
398 isglp->op = FW_RI_DATA_ISGL;
399 isglp->r1 = 0;
400 isglp->nsge = cpu_to_be16(num_sge);
401 isglp->r2 = 0;
402 if (plenp)
403 *plenp = plen;
404 return 0;
405 }
406
build_rdma_send(struct t4_sq * sq,union t4_wr * wqe,const struct ib_send_wr * wr,u8 * len16)407 static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe,
408 const struct ib_send_wr *wr, u8 *len16)
409 {
410 u32 plen;
411 int size;
412 int ret;
413
414 if (wr->num_sge > T4_MAX_SEND_SGE)
415 return -EINVAL;
416 switch (wr->opcode) {
417 case IB_WR_SEND:
418 if (wr->send_flags & IB_SEND_SOLICITED)
419 wqe->send.sendop_pkd = cpu_to_be32(
420 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE));
421 else
422 wqe->send.sendop_pkd = cpu_to_be32(
423 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND));
424 wqe->send.stag_inv = 0;
425 break;
426 case IB_WR_SEND_WITH_INV:
427 if (wr->send_flags & IB_SEND_SOLICITED)
428 wqe->send.sendop_pkd = cpu_to_be32(
429 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE_INV));
430 else
431 wqe->send.sendop_pkd = cpu_to_be32(
432 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_INV));
433 wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
434 break;
435
436 default:
437 return -EINVAL;
438 }
439 wqe->send.r3 = 0;
440 wqe->send.r4 = 0;
441
442 plen = 0;
443 if (wr->num_sge) {
444 if (wr->send_flags & IB_SEND_INLINE) {
445 ret = build_immd(sq, wqe->send.u.immd_src, wr,
446 T4_MAX_SEND_INLINE, &plen);
447 if (ret)
448 return ret;
449 size = sizeof wqe->send + sizeof(struct fw_ri_immd) +
450 plen;
451 } else {
452 ret = build_isgl((__be64 *)sq->queue,
453 (__be64 *)&sq->queue[sq->size],
454 wqe->send.u.isgl_src,
455 wr->sg_list, wr->num_sge, &plen);
456 if (ret)
457 return ret;
458 size = sizeof wqe->send + sizeof(struct fw_ri_isgl) +
459 wr->num_sge * sizeof(struct fw_ri_sge);
460 }
461 } else {
462 wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD;
463 wqe->send.u.immd_src[0].r1 = 0;
464 wqe->send.u.immd_src[0].r2 = 0;
465 wqe->send.u.immd_src[0].immdlen = 0;
466 size = sizeof wqe->send + sizeof(struct fw_ri_immd);
467 plen = 0;
468 }
469 *len16 = DIV_ROUND_UP(size, 16);
470 wqe->send.plen = cpu_to_be32(plen);
471 return 0;
472 }
473
build_rdma_write(struct t4_sq * sq,union t4_wr * wqe,const struct ib_send_wr * wr,u8 * len16)474 static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe,
475 const struct ib_send_wr *wr, u8 *len16)
476 {
477 u32 plen;
478 int size;
479 int ret;
480
481 if (wr->num_sge > T4_MAX_SEND_SGE)
482 return -EINVAL;
483 wqe->write.immd_data = 0;
484 wqe->write.stag_sink = cpu_to_be32(rdma_wr(wr)->rkey);
485 wqe->write.to_sink = cpu_to_be64(rdma_wr(wr)->remote_addr);
486 if (wr->num_sge) {
487 if (wr->send_flags & IB_SEND_INLINE) {
488 ret = build_immd(sq, wqe->write.u.immd_src, wr,
489 T4_MAX_WRITE_INLINE, &plen);
490 if (ret)
491 return ret;
492 size = sizeof wqe->write + sizeof(struct fw_ri_immd) +
493 plen;
494 } else {
495 ret = build_isgl((__be64 *)sq->queue,
496 (__be64 *)&sq->queue[sq->size],
497 wqe->write.u.isgl_src,
498 wr->sg_list, wr->num_sge, &plen);
499 if (ret)
500 return ret;
501 size = sizeof wqe->write + sizeof(struct fw_ri_isgl) +
502 wr->num_sge * sizeof(struct fw_ri_sge);
503 }
504 } else {
505 wqe->write.u.immd_src[0].op = FW_RI_DATA_IMMD;
506 wqe->write.u.immd_src[0].r1 = 0;
507 wqe->write.u.immd_src[0].r2 = 0;
508 wqe->write.u.immd_src[0].immdlen = 0;
509 size = sizeof wqe->write + sizeof(struct fw_ri_immd);
510 plen = 0;
511 }
512 *len16 = DIV_ROUND_UP(size, 16);
513 wqe->write.plen = cpu_to_be32(plen);
514 return 0;
515 }
516
build_rdma_read(union t4_wr * wqe,const struct ib_send_wr * wr,u8 * len16)517 static int build_rdma_read(union t4_wr *wqe, const struct ib_send_wr *wr, u8 *len16)
518 {
519 if (wr->num_sge > 1)
520 return -EINVAL;
521 if (wr->num_sge && wr->sg_list[0].length) {
522 wqe->read.stag_src = cpu_to_be32(rdma_wr(wr)->rkey);
523 wqe->read.to_src_hi = cpu_to_be32((u32)(rdma_wr(wr)->remote_addr
524 >> 32));
525 wqe->read.to_src_lo =
526 cpu_to_be32((u32)rdma_wr(wr)->remote_addr);
527 wqe->read.stag_sink = cpu_to_be32(wr->sg_list[0].lkey);
528 wqe->read.plen = cpu_to_be32(wr->sg_list[0].length);
529 wqe->read.to_sink_hi = cpu_to_be32((u32)(wr->sg_list[0].addr
530 >> 32));
531 wqe->read.to_sink_lo = cpu_to_be32((u32)(wr->sg_list[0].addr));
532 } else {
533 wqe->read.stag_src = cpu_to_be32(2);
534 wqe->read.to_src_hi = 0;
535 wqe->read.to_src_lo = 0;
536 wqe->read.stag_sink = cpu_to_be32(2);
537 wqe->read.plen = 0;
538 wqe->read.to_sink_hi = 0;
539 wqe->read.to_sink_lo = 0;
540 }
541 wqe->read.r2 = 0;
542 wqe->read.r5 = 0;
543 *len16 = DIV_ROUND_UP(sizeof wqe->read, 16);
544 return 0;
545 }
546
build_rdma_recv(struct c4iw_qp * qhp,union t4_recv_wr * wqe,const struct ib_recv_wr * wr,u8 * len16)547 static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
548 const struct ib_recv_wr *wr, u8 *len16)
549 {
550 int ret;
551
552 ret = build_isgl((__be64 *)qhp->wq.rq.queue,
553 (__be64 *)&qhp->wq.rq.queue[qhp->wq.rq.size],
554 &wqe->recv.isgl, wr->sg_list, wr->num_sge, NULL);
555 if (ret)
556 return ret;
557 *len16 = DIV_ROUND_UP(sizeof wqe->recv +
558 wr->num_sge * sizeof(struct fw_ri_sge), 16);
559 return 0;
560 }
561
build_inv_stag(union t4_wr * wqe,const struct ib_send_wr * wr,u8 * len16)562 static int build_inv_stag(union t4_wr *wqe, const struct ib_send_wr *wr,
563 u8 *len16)
564 {
565 wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
566 wqe->inv.r2 = 0;
567 *len16 = DIV_ROUND_UP(sizeof wqe->inv, 16);
568 return 0;
569 }
570
free_qp_work(struct work_struct * work)571 static void free_qp_work(struct work_struct *work)
572 {
573 struct c4iw_ucontext *ucontext;
574 struct c4iw_qp *qhp;
575 struct c4iw_dev *rhp;
576
577 qhp = container_of(work, struct c4iw_qp, free_work);
578 ucontext = qhp->ucontext;
579 rhp = qhp->rhp;
580
581 CTR3(KTR_IW_CXGBE, "%s qhp %p ucontext %p", __func__,
582 qhp, ucontext);
583 destroy_qp(&rhp->rdev, &qhp->wq,
584 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
585
586 if (ucontext)
587 c4iw_put_ucontext(ucontext);
588 kfree(qhp);
589 }
590
queue_qp_free(struct kref * kref)591 static void queue_qp_free(struct kref *kref)
592 {
593 struct c4iw_qp *qhp;
594
595 qhp = container_of(kref, struct c4iw_qp, kref);
596 CTR2(KTR_IW_CXGBE, "%s qhp %p", __func__, qhp);
597 queue_work(qhp->rhp->rdev.free_workq, &qhp->free_work);
598 }
599
c4iw_qp_add_ref(struct ib_qp * qp)600 void c4iw_qp_add_ref(struct ib_qp *qp)
601 {
602 CTR2(KTR_IW_CXGBE, "%s ib_qp %p", __func__, qp);
603 kref_get(&to_c4iw_qp(qp)->kref);
604 }
605
c4iw_qp_rem_ref(struct ib_qp * qp)606 void c4iw_qp_rem_ref(struct ib_qp *qp)
607 {
608 CTR2(KTR_IW_CXGBE, "%s ib_qp %p", __func__, qp);
609 kref_put(&to_c4iw_qp(qp)->kref, queue_qp_free);
610 }
611
complete_sq_drain_wr(struct c4iw_qp * qhp,const struct ib_send_wr * wr)612 static void complete_sq_drain_wr(struct c4iw_qp *qhp, const struct ib_send_wr *wr)
613 {
614 struct t4_cqe cqe = {};
615 struct c4iw_cq *schp;
616 unsigned long flag;
617 struct t4_cq *cq;
618
619 schp = to_c4iw_cq(qhp->ibqp.send_cq);
620 cq = &schp->cq;
621
622 PDBG("%s drain sq id %u\n", __func__, qhp->wq.sq.qid);
623 cqe.u.drain_cookie = wr->wr_id;
624 cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
625 V_CQE_OPCODE(C4IW_DRAIN_OPCODE) |
626 V_CQE_TYPE(1) |
627 V_CQE_SWCQE(1) |
628 V_CQE_QPID(qhp->wq.sq.qid));
629
630 spin_lock_irqsave(&schp->lock, flag);
631 cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
632 cq->sw_queue[cq->sw_pidx] = cqe;
633 t4_swcq_produce(cq);
634 spin_unlock_irqrestore(&schp->lock, flag);
635
636 spin_lock_irqsave(&schp->comp_handler_lock, flag);
637 (*schp->ibcq.comp_handler)(&schp->ibcq,
638 schp->ibcq.cq_context);
639 spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
640 }
641
complete_rq_drain_wr(struct c4iw_qp * qhp,const struct ib_recv_wr * wr)642 static void complete_rq_drain_wr(struct c4iw_qp *qhp, const struct ib_recv_wr *wr)
643 {
644 struct t4_cqe cqe = {};
645 struct c4iw_cq *rchp;
646 unsigned long flag;
647 struct t4_cq *cq;
648
649 rchp = to_c4iw_cq(qhp->ibqp.recv_cq);
650 cq = &rchp->cq;
651
652 PDBG("%s drain rq id %u\n", __func__, qhp->wq.sq.qid);
653 cqe.u.drain_cookie = wr->wr_id;
654 cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
655 V_CQE_OPCODE(C4IW_DRAIN_OPCODE) |
656 V_CQE_TYPE(0) |
657 V_CQE_SWCQE(1) |
658 V_CQE_QPID(qhp->wq.sq.qid));
659
660 spin_lock_irqsave(&rchp->lock, flag);
661 cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
662 cq->sw_queue[cq->sw_pidx] = cqe;
663 t4_swcq_produce(cq);
664 spin_unlock_irqrestore(&rchp->lock, flag);
665
666 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
667 (*rchp->ibcq.comp_handler)(&rchp->ibcq,
668 rchp->ibcq.cq_context);
669 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
670 }
671
build_tpte_memreg(struct fw_ri_fr_nsmr_tpte_wr * fr,const struct ib_reg_wr * wr,struct c4iw_mr * mhp,u8 * len16)672 static int build_tpte_memreg(struct fw_ri_fr_nsmr_tpte_wr *fr,
673 const struct ib_reg_wr *wr, struct c4iw_mr *mhp, u8 *len16)
674 {
675 __be64 *p = (__be64 *)fr->pbl;
676
677 if (wr->mr->page_size > C4IW_MAX_PAGE_SIZE)
678 return -EINVAL;
679
680 fr->r2 = cpu_to_be32(0);
681 fr->stag = cpu_to_be32(mhp->ibmr.rkey);
682
683 fr->tpte.valid_to_pdid = cpu_to_be32(F_FW_RI_TPTE_VALID |
684 V_FW_RI_TPTE_STAGKEY((mhp->ibmr.rkey & M_FW_RI_TPTE_STAGKEY)) |
685 V_FW_RI_TPTE_STAGSTATE(1) |
686 V_FW_RI_TPTE_STAGTYPE(FW_RI_STAG_NSMR) |
687 V_FW_RI_TPTE_PDID(mhp->attr.pdid));
688 fr->tpte.locread_to_qpid = cpu_to_be32(
689 V_FW_RI_TPTE_PERM(c4iw_ib_to_tpt_access(wr->access)) |
690 V_FW_RI_TPTE_ADDRTYPE(FW_RI_VA_BASED_TO) |
691 V_FW_RI_TPTE_PS(ilog2(wr->mr->page_size) - 12));
692 fr->tpte.nosnoop_pbladdr = cpu_to_be32(V_FW_RI_TPTE_PBLADDR(
693 PBL_OFF(&mhp->rhp->rdev, mhp->attr.pbl_addr)>>3));
694 fr->tpte.dca_mwbcnt_pstag = cpu_to_be32(0);
695 fr->tpte.len_hi = cpu_to_be32(mhp->ibmr.length >> 32);
696 fr->tpte.len_lo = cpu_to_be32(mhp->ibmr.length & 0xffffffff);
697 fr->tpte.va_hi = cpu_to_be32(mhp->ibmr.iova >> 32);
698 fr->tpte.va_lo_fbo = cpu_to_be32(mhp->ibmr.iova & 0xffffffff);
699
700 p[0] = cpu_to_be64((u64)mhp->mpl[0]);
701 p[1] = cpu_to_be64((u64)mhp->mpl[1]);
702
703 *len16 = DIV_ROUND_UP(sizeof(*fr), 16);
704 return 0;
705 }
706
build_memreg(struct t4_sq * sq,union t4_wr * wqe,const struct ib_reg_wr * wr,struct c4iw_mr * mhp,u8 * len16,bool dsgl_supported)707 static int build_memreg(struct t4_sq *sq, union t4_wr *wqe,
708 const struct ib_reg_wr *wr, struct c4iw_mr *mhp, u8 *len16,
709 bool dsgl_supported)
710 {
711 struct fw_ri_immd *imdp;
712 __be64 *p;
713 int i;
714 int pbllen = roundup(mhp->mpl_len * sizeof(u64), 32);
715 int rem;
716
717 if (mhp->mpl_len > t4_max_fr_depth(&mhp->rhp->rdev, use_dsgl))
718 return -EINVAL;
719 if (wr->mr->page_size > C4IW_MAX_PAGE_SIZE)
720 return -EINVAL;
721
722 wqe->fr.qpbinde_to_dcacpu = 0;
723 wqe->fr.pgsz_shift = ilog2(wr->mr->page_size) - 12;
724 wqe->fr.addr_type = FW_RI_VA_BASED_TO;
725 wqe->fr.mem_perms = c4iw_ib_to_tpt_access(wr->access);
726 wqe->fr.len_hi = cpu_to_be32(mhp->ibmr.length >> 32);
727 wqe->fr.len_lo = cpu_to_be32(mhp->ibmr.length & 0xffffffff);
728 wqe->fr.stag = cpu_to_be32(wr->key);
729 wqe->fr.va_hi = cpu_to_be32(mhp->ibmr.iova >> 32);
730 wqe->fr.va_lo_fbo = cpu_to_be32(mhp->ibmr.iova & 0xffffffff);
731
732 if (dsgl_supported && use_dsgl && (pbllen > max_fr_immd)) {
733 struct fw_ri_dsgl *sglp;
734
735 for (i = 0; i < mhp->mpl_len; i++)
736 mhp->mpl[i] =
737 (__force u64)cpu_to_be64((u64)mhp->mpl[i]);
738
739 sglp = (struct fw_ri_dsgl *)(&wqe->fr + 1);
740 sglp->op = FW_RI_DATA_DSGL;
741 sglp->r1 = 0;
742 sglp->nsge = cpu_to_be16(1);
743 sglp->addr0 = cpu_to_be64(mhp->mpl_addr);
744 sglp->len0 = cpu_to_be32(pbllen);
745
746 *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*sglp), 16);
747 } else {
748 imdp = (struct fw_ri_immd *)(&wqe->fr + 1);
749 imdp->op = FW_RI_DATA_IMMD;
750 imdp->r1 = 0;
751 imdp->r2 = 0;
752 imdp->immdlen = cpu_to_be32(pbllen);
753 p = (__be64 *)(imdp + 1);
754 rem = pbllen;
755 for (i = 0; i < mhp->mpl_len; i++) {
756 *p = cpu_to_be64((u64)mhp->mpl[i]);
757 rem -= sizeof(*p);
758 if (++p == (__be64 *)&sq->queue[sq->size])
759 p = (__be64 *)sq->queue;
760 }
761 BUG_ON(rem < 0);
762 while (rem) {
763 *p = 0;
764 rem -= sizeof(*p);
765 if (++p == (__be64 *)&sq->queue[sq->size])
766 p = (__be64 *)sq->queue;
767 }
768 *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*imdp)
769 + pbllen, 16);
770 }
771
772 return 0;
773 }
774
c4iw_post_send(struct ib_qp * ibqp,const struct ib_send_wr * wr,const struct ib_send_wr ** bad_wr)775 int c4iw_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
776 const struct ib_send_wr **bad_wr)
777 {
778 int err = 0;
779 u8 len16 = 0;
780 enum fw_wr_opcodes fw_opcode = 0;
781 enum fw_ri_wr_flags fw_flags;
782 struct c4iw_qp *qhp;
783 union t4_wr *wqe = NULL;
784 u32 num_wrs;
785 struct t4_swsqe *swsqe;
786 unsigned long flag;
787 u16 idx = 0;
788 struct c4iw_rdev *rdev;
789
790 qhp = to_c4iw_qp(ibqp);
791 rdev = &qhp->rhp->rdev;
792 spin_lock_irqsave(&qhp->lock, flag);
793 if (t4_wq_in_error(&qhp->wq)) {
794 spin_unlock_irqrestore(&qhp->lock, flag);
795 complete_sq_drain_wr(qhp, wr);
796 return err;
797 }
798 num_wrs = t4_sq_avail(&qhp->wq);
799 if (num_wrs == 0) {
800 spin_unlock_irqrestore(&qhp->lock, flag);
801 *bad_wr = wr;
802 return -ENOMEM;
803 }
804 while (wr) {
805 if (num_wrs == 0) {
806 err = -ENOMEM;
807 *bad_wr = wr;
808 break;
809 }
810 wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue +
811 qhp->wq.sq.wq_pidx * T4_EQ_ENTRY_SIZE);
812
813 fw_flags = 0;
814 if (wr->send_flags & IB_SEND_SOLICITED)
815 fw_flags |= FW_RI_SOLICITED_EVENT_FLAG;
816 if (wr->send_flags & IB_SEND_SIGNALED || qhp->sq_sig_all)
817 fw_flags |= FW_RI_COMPLETION_FLAG;
818 swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx];
819 switch (wr->opcode) {
820 case IB_WR_SEND_WITH_INV:
821 case IB_WR_SEND:
822 if (wr->send_flags & IB_SEND_FENCE)
823 fw_flags |= FW_RI_READ_FENCE_FLAG;
824 fw_opcode = FW_RI_SEND_WR;
825 if (wr->opcode == IB_WR_SEND)
826 swsqe->opcode = FW_RI_SEND;
827 else
828 swsqe->opcode = FW_RI_SEND_WITH_INV;
829 err = build_rdma_send(&qhp->wq.sq, wqe, wr, &len16);
830 break;
831 case IB_WR_RDMA_WRITE:
832 fw_opcode = FW_RI_RDMA_WRITE_WR;
833 swsqe->opcode = FW_RI_RDMA_WRITE;
834 err = build_rdma_write(&qhp->wq.sq, wqe, wr, &len16);
835 break;
836 case IB_WR_RDMA_READ:
837 case IB_WR_RDMA_READ_WITH_INV:
838 fw_opcode = FW_RI_RDMA_READ_WR;
839 swsqe->opcode = FW_RI_READ_REQ;
840 if (wr->opcode == IB_WR_RDMA_READ_WITH_INV) {
841 c4iw_invalidate_mr(qhp->rhp,
842 wr->sg_list[0].lkey);
843 fw_flags = FW_RI_RDMA_READ_INVALIDATE;
844 } else {
845 fw_flags = 0;
846 }
847 err = build_rdma_read(wqe, wr, &len16);
848 if (err)
849 break;
850 swsqe->read_len = wr->sg_list[0].length;
851 if (!qhp->wq.sq.oldest_read)
852 qhp->wq.sq.oldest_read = swsqe;
853 break;
854 case IB_WR_REG_MR: {
855 struct c4iw_mr *mhp = to_c4iw_mr(reg_wr(wr)->mr);
856
857 swsqe->opcode = FW_RI_FAST_REGISTER;
858 if (rdev->adap->params.fr_nsmr_tpte_wr_support &&
859 !mhp->attr.state && mhp->mpl_len <= 2) {
860 fw_opcode = FW_RI_FR_NSMR_TPTE_WR;
861 err = build_tpte_memreg(&wqe->fr_tpte, reg_wr(wr),
862 mhp, &len16);
863 } else {
864 fw_opcode = FW_RI_FR_NSMR_WR;
865 err = build_memreg(&qhp->wq.sq, wqe, reg_wr(wr),
866 mhp, &len16,
867 rdev->adap->params.ulptx_memwrite_dsgl);
868 }
869 if (err)
870 break;
871 mhp->attr.state = 1;
872 break;
873 }
874 case IB_WR_LOCAL_INV:
875 if (wr->send_flags & IB_SEND_FENCE)
876 fw_flags |= FW_RI_LOCAL_FENCE_FLAG;
877 fw_opcode = FW_RI_INV_LSTAG_WR;
878 swsqe->opcode = FW_RI_LOCAL_INV;
879 err = build_inv_stag(wqe, wr, &len16);
880 c4iw_invalidate_mr(qhp->rhp, wr->ex.invalidate_rkey);
881 break;
882 default:
883 CTR2(KTR_IW_CXGBE, "%s post of type =%d TBD!", __func__,
884 wr->opcode);
885 err = -EINVAL;
886 }
887 if (err) {
888 *bad_wr = wr;
889 break;
890 }
891 swsqe->idx = qhp->wq.sq.pidx;
892 swsqe->complete = 0;
893 swsqe->signaled = (wr->send_flags & IB_SEND_SIGNALED) ||
894 qhp->sq_sig_all;
895 swsqe->flushed = 0;
896 swsqe->wr_id = wr->wr_id;
897
898 init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16);
899
900 CTR5(KTR_IW_CXGBE,
901 "%s cookie 0x%llx pidx 0x%x opcode 0x%x read_len %u",
902 __func__, (unsigned long long)wr->wr_id, qhp->wq.sq.pidx,
903 swsqe->opcode, swsqe->read_len);
904 wr = wr->next;
905 num_wrs--;
906 t4_sq_produce(&qhp->wq, len16);
907 idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
908 }
909
910 t4_ring_sq_db(&qhp->wq, idx, wqe, rdev->adap->iwt.wc_en);
911 spin_unlock_irqrestore(&qhp->lock, flag);
912 return err;
913 }
914
c4iw_post_receive(struct ib_qp * ibqp,const struct ib_recv_wr * wr,const struct ib_recv_wr ** bad_wr)915 int c4iw_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
916 const struct ib_recv_wr **bad_wr)
917 {
918 int err = 0;
919 struct c4iw_qp *qhp;
920 union t4_recv_wr *wqe = NULL;
921 u32 num_wrs;
922 u8 len16 = 0;
923 unsigned long flag;
924 u16 idx = 0;
925
926 qhp = to_c4iw_qp(ibqp);
927 spin_lock_irqsave(&qhp->lock, flag);
928 if (t4_wq_in_error(&qhp->wq)) {
929 spin_unlock_irqrestore(&qhp->lock, flag);
930 complete_rq_drain_wr(qhp, wr);
931 return err;
932 }
933 num_wrs = t4_rq_avail(&qhp->wq);
934 if (num_wrs == 0) {
935 spin_unlock_irqrestore(&qhp->lock, flag);
936 *bad_wr = wr;
937 return -ENOMEM;
938 }
939 while (wr) {
940 if (wr->num_sge > T4_MAX_RECV_SGE) {
941 err = -EINVAL;
942 *bad_wr = wr;
943 break;
944 }
945 wqe = (union t4_recv_wr *)((u8 *)qhp->wq.rq.queue +
946 qhp->wq.rq.wq_pidx *
947 T4_EQ_ENTRY_SIZE);
948 if (num_wrs)
949 err = build_rdma_recv(qhp, wqe, wr, &len16);
950 else
951 err = -ENOMEM;
952 if (err) {
953 *bad_wr = wr;
954 break;
955 }
956
957 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id;
958
959 wqe->recv.opcode = FW_RI_RECV_WR;
960 wqe->recv.r1 = 0;
961 wqe->recv.wrid = qhp->wq.rq.pidx;
962 wqe->recv.r2[0] = 0;
963 wqe->recv.r2[1] = 0;
964 wqe->recv.r2[2] = 0;
965 wqe->recv.len16 = len16;
966 CTR3(KTR_IW_CXGBE, "%s cookie 0x%llx pidx %u", __func__,
967 (unsigned long long) wr->wr_id, qhp->wq.rq.pidx);
968 t4_rq_produce(&qhp->wq, len16);
969 idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
970 wr = wr->next;
971 num_wrs--;
972 }
973
974 t4_ring_rq_db(&qhp->wq, idx, wqe, qhp->rhp->rdev.adap->iwt.wc_en);
975 spin_unlock_irqrestore(&qhp->lock, flag);
976 return err;
977 }
978
build_term_codes(struct t4_cqe * err_cqe,u8 * layer_type,u8 * ecode)979 static inline void build_term_codes(struct t4_cqe *err_cqe, u8 *layer_type,
980 u8 *ecode)
981 {
982 int status;
983 int tagged;
984 int opcode;
985 int rqtype;
986 int send_inv;
987
988 if (!err_cqe) {
989 *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
990 *ecode = 0;
991 return;
992 }
993
994 status = CQE_STATUS(err_cqe);
995 opcode = CQE_OPCODE(err_cqe);
996 rqtype = RQ_TYPE(err_cqe);
997 send_inv = (opcode == FW_RI_SEND_WITH_INV) ||
998 (opcode == FW_RI_SEND_WITH_SE_INV);
999 tagged = (opcode == FW_RI_RDMA_WRITE) ||
1000 (rqtype && (opcode == FW_RI_READ_RESP));
1001
1002 switch (status) {
1003 case T4_ERR_STAG:
1004 if (send_inv) {
1005 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
1006 *ecode = RDMAP_CANT_INV_STAG;
1007 } else {
1008 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1009 *ecode = RDMAP_INV_STAG;
1010 }
1011 break;
1012 case T4_ERR_PDID:
1013 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1014 if ((opcode == FW_RI_SEND_WITH_INV) ||
1015 (opcode == FW_RI_SEND_WITH_SE_INV))
1016 *ecode = RDMAP_CANT_INV_STAG;
1017 else
1018 *ecode = RDMAP_STAG_NOT_ASSOC;
1019 break;
1020 case T4_ERR_QPID:
1021 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1022 *ecode = RDMAP_STAG_NOT_ASSOC;
1023 break;
1024 case T4_ERR_ACCESS:
1025 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1026 *ecode = RDMAP_ACC_VIOL;
1027 break;
1028 case T4_ERR_WRAP:
1029 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1030 *ecode = RDMAP_TO_WRAP;
1031 break;
1032 case T4_ERR_BOUND:
1033 if (tagged) {
1034 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
1035 *ecode = DDPT_BASE_BOUNDS;
1036 } else {
1037 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1038 *ecode = RDMAP_BASE_BOUNDS;
1039 }
1040 break;
1041 case T4_ERR_INVALIDATE_SHARED_MR:
1042 case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
1043 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
1044 *ecode = RDMAP_CANT_INV_STAG;
1045 break;
1046 case T4_ERR_ECC:
1047 case T4_ERR_ECC_PSTAG:
1048 case T4_ERR_INTERNAL_ERR:
1049 *layer_type = LAYER_RDMAP|RDMAP_LOCAL_CATA;
1050 *ecode = 0;
1051 break;
1052 case T4_ERR_OUT_OF_RQE:
1053 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1054 *ecode = DDPU_INV_MSN_NOBUF;
1055 break;
1056 case T4_ERR_PBL_ADDR_BOUND:
1057 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
1058 *ecode = DDPT_BASE_BOUNDS;
1059 break;
1060 case T4_ERR_CRC:
1061 *layer_type = LAYER_MPA|DDP_LLP;
1062 *ecode = MPA_CRC_ERR;
1063 break;
1064 case T4_ERR_MARKER:
1065 *layer_type = LAYER_MPA|DDP_LLP;
1066 *ecode = MPA_MARKER_ERR;
1067 break;
1068 case T4_ERR_PDU_LEN_ERR:
1069 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1070 *ecode = DDPU_MSG_TOOBIG;
1071 break;
1072 case T4_ERR_DDP_VERSION:
1073 if (tagged) {
1074 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
1075 *ecode = DDPT_INV_VERS;
1076 } else {
1077 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1078 *ecode = DDPU_INV_VERS;
1079 }
1080 break;
1081 case T4_ERR_RDMA_VERSION:
1082 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
1083 *ecode = RDMAP_INV_VERS;
1084 break;
1085 case T4_ERR_OPCODE:
1086 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
1087 *ecode = RDMAP_INV_OPCODE;
1088 break;
1089 case T4_ERR_DDP_QUEUE_NUM:
1090 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1091 *ecode = DDPU_INV_QN;
1092 break;
1093 case T4_ERR_MSN:
1094 case T4_ERR_MSN_GAP:
1095 case T4_ERR_MSN_RANGE:
1096 case T4_ERR_IRD_OVERFLOW:
1097 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1098 *ecode = DDPU_INV_MSN_RANGE;
1099 break;
1100 case T4_ERR_TBIT:
1101 *layer_type = LAYER_DDP|DDP_LOCAL_CATA;
1102 *ecode = 0;
1103 break;
1104 case T4_ERR_MO:
1105 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1106 *ecode = DDPU_INV_MO;
1107 break;
1108 default:
1109 *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
1110 *ecode = 0;
1111 break;
1112 }
1113 }
1114
post_terminate(struct c4iw_qp * qhp,struct t4_cqe * err_cqe,gfp_t gfp)1115 static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
1116 gfp_t gfp)
1117 {
1118 int ret;
1119 struct fw_ri_wr *wqe;
1120 struct terminate_message *term;
1121 struct wrqe *wr;
1122 struct socket *so = qhp->ep->com.so;
1123 struct inpcb *inp = sotoinpcb(so);
1124 struct tcpcb *tp = intotcpcb(inp);
1125 struct toepcb *toep = tp->t_toe;
1126
1127 CTR4(KTR_IW_CXGBE, "%s qhp %p qid 0x%x tid %u", __func__, qhp,
1128 qhp->wq.sq.qid, qhp->ep->hwtid);
1129
1130 wr = alloc_wrqe(sizeof(*wqe), &toep->ofld_txq->wrq);
1131 if (wr == NULL)
1132 return;
1133 wqe = wrtod(wr);
1134
1135 memset(wqe, 0, sizeof *wqe);
1136 wqe->op_compl = cpu_to_be32(V_FW_WR_OP(FW_RI_WR));
1137 wqe->flowid_len16 = cpu_to_be32(
1138 V_FW_WR_FLOWID(qhp->ep->hwtid) |
1139 V_FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
1140
1141 wqe->u.terminate.type = FW_RI_TYPE_TERMINATE;
1142 wqe->u.terminate.immdlen = cpu_to_be32(sizeof *term);
1143 term = (struct terminate_message *)wqe->u.terminate.termmsg;
1144 if (qhp->attr.layer_etype == (LAYER_MPA|DDP_LLP)) {
1145 term->layer_etype = qhp->attr.layer_etype;
1146 term->ecode = qhp->attr.ecode;
1147 } else
1148 build_term_codes(err_cqe, &term->layer_etype, &term->ecode);
1149 ret = creds(toep, inp, sizeof(*wqe));
1150 if (ret) {
1151 free_wrqe(wr);
1152 return;
1153 }
1154 t4_wrq_tx(qhp->rhp->rdev.adap, wr);
1155 }
1156
1157 /* Assumes qhp lock is held. */
__flush_qp(struct c4iw_qp * qhp,struct c4iw_cq * rchp,struct c4iw_cq * schp)1158 static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
1159 struct c4iw_cq *schp)
1160 {
1161 int count;
1162 int rq_flushed, sq_flushed;
1163 unsigned long flag;
1164
1165 CTR4(KTR_IW_CXGBE, "%s qhp %p rchp %p schp %p", __func__, qhp, rchp,
1166 schp);
1167
1168 /* locking hierarchy: cq lock first, then qp lock. */
1169 spin_lock_irqsave(&rchp->lock, flag);
1170 spin_lock(&qhp->lock);
1171
1172 if (qhp->wq.flushed) {
1173 spin_unlock(&qhp->lock);
1174 spin_unlock_irqrestore(&rchp->lock, flag);
1175 return;
1176 }
1177 qhp->wq.flushed = 1;
1178
1179 c4iw_flush_hw_cq(rchp);
1180 c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
1181 rq_flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
1182 spin_unlock(&qhp->lock);
1183 spin_unlock_irqrestore(&rchp->lock, flag);
1184
1185 /* locking hierarchy: cq lock first, then qp lock. */
1186 spin_lock_irqsave(&schp->lock, flag);
1187 spin_lock(&qhp->lock);
1188 if (schp != rchp)
1189 c4iw_flush_hw_cq(schp);
1190 sq_flushed = c4iw_flush_sq(qhp);
1191 spin_unlock(&qhp->lock);
1192 spin_unlock_irqrestore(&schp->lock, flag);
1193
1194 if (schp == rchp) {
1195 if (t4_clear_cq_armed(&rchp->cq) &&
1196 (rq_flushed || sq_flushed)) {
1197 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
1198 (*rchp->ibcq.comp_handler)(&rchp->ibcq,
1199 rchp->ibcq.cq_context);
1200 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
1201 }
1202 } else {
1203 if (t4_clear_cq_armed(&rchp->cq) && rq_flushed) {
1204 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
1205 (*rchp->ibcq.comp_handler)(&rchp->ibcq,
1206 rchp->ibcq.cq_context);
1207 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
1208 }
1209 if (t4_clear_cq_armed(&schp->cq) && sq_flushed) {
1210 spin_lock_irqsave(&schp->comp_handler_lock, flag);
1211 (*schp->ibcq.comp_handler)(&schp->ibcq,
1212 schp->ibcq.cq_context);
1213 spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
1214 }
1215 }
1216 }
1217
flush_qp(struct c4iw_qp * qhp)1218 static void flush_qp(struct c4iw_qp *qhp)
1219 {
1220 struct c4iw_cq *rchp, *schp;
1221 unsigned long flag;
1222
1223 rchp = to_c4iw_cq(qhp->ibqp.recv_cq);
1224 schp = to_c4iw_cq(qhp->ibqp.send_cq);
1225
1226 t4_set_wq_in_error(&qhp->wq);
1227 if (qhp->ibqp.uobject) {
1228 t4_set_cq_in_error(&rchp->cq);
1229 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
1230 (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
1231 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
1232 if (schp != rchp) {
1233 t4_set_cq_in_error(&schp->cq);
1234 spin_lock_irqsave(&schp->comp_handler_lock, flag);
1235 (*schp->ibcq.comp_handler)(&schp->ibcq,
1236 schp->ibcq.cq_context);
1237 spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
1238 }
1239 return;
1240 }
1241 __flush_qp(qhp, rchp, schp);
1242 }
1243
1244 static int
rdma_fini(struct c4iw_dev * rhp,struct c4iw_qp * qhp,struct c4iw_ep * ep)1245 rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp, struct c4iw_ep *ep)
1246 {
1247 struct c4iw_rdev *rdev = &rhp->rdev;
1248 struct adapter *sc = rdev->adap;
1249 struct fw_ri_wr *wqe;
1250 int ret;
1251 struct wrqe *wr;
1252 struct socket *so = ep->com.so;
1253 struct inpcb *inp = sotoinpcb(so);
1254 struct tcpcb *tp = intotcpcb(inp);
1255 struct toepcb *toep = tp->t_toe;
1256
1257 KASSERT(rhp == qhp->rhp && ep == qhp->ep, ("%s: EDOOFUS", __func__));
1258
1259 CTR5(KTR_IW_CXGBE, "%s qhp %p qid 0x%x ep %p tid %u", __func__, qhp,
1260 qhp->wq.sq.qid, ep, ep->hwtid);
1261
1262 wr = alloc_wrqe(sizeof(*wqe), &toep->ofld_txq->wrq);
1263 if (wr == NULL)
1264 return (0);
1265 wqe = wrtod(wr);
1266
1267 memset(wqe, 0, sizeof *wqe);
1268
1269 wqe->op_compl = cpu_to_be32(V_FW_WR_OP(FW_RI_WR) | F_FW_WR_COMPL);
1270 wqe->flowid_len16 = cpu_to_be32(V_FW_WR_FLOWID(ep->hwtid) |
1271 V_FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
1272 wqe->cookie = (unsigned long) &ep->com.wr_wait;
1273 wqe->u.fini.type = FW_RI_TYPE_FINI;
1274
1275 c4iw_init_wr_wait(&ep->com.wr_wait);
1276
1277 ret = creds(toep, inp, sizeof(*wqe));
1278 if (ret) {
1279 free_wrqe(wr);
1280 return ret;
1281 }
1282 t4_wrq_tx(sc, wr);
1283
1284 ret = c4iw_wait_for_reply(rdev, &ep->com.wr_wait, ep->hwtid,
1285 qhp->wq.sq.qid, ep->com.so, __func__);
1286 return ret;
1287 }
1288
build_rtr_msg(u8 p2p_type,struct fw_ri_init * init)1289 static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init)
1290 {
1291 CTR2(KTR_IW_CXGBE, "%s p2p_type = %d", __func__, p2p_type);
1292 memset(&init->u, 0, sizeof init->u);
1293 switch (p2p_type) {
1294 case FW_RI_INIT_P2PTYPE_RDMA_WRITE:
1295 init->u.write.opcode = FW_RI_RDMA_WRITE_WR;
1296 init->u.write.stag_sink = cpu_to_be32(1);
1297 init->u.write.to_sink = cpu_to_be64(1);
1298 init->u.write.u.immd_src[0].op = FW_RI_DATA_IMMD;
1299 init->u.write.len16 = DIV_ROUND_UP(sizeof init->u.write +
1300 sizeof(struct fw_ri_immd),
1301 16);
1302 break;
1303 case FW_RI_INIT_P2PTYPE_READ_REQ:
1304 init->u.write.opcode = FW_RI_RDMA_READ_WR;
1305 init->u.read.stag_src = cpu_to_be32(1);
1306 init->u.read.to_src_lo = cpu_to_be32(1);
1307 init->u.read.stag_sink = cpu_to_be32(1);
1308 init->u.read.to_sink_lo = cpu_to_be32(1);
1309 init->u.read.len16 = DIV_ROUND_UP(sizeof init->u.read, 16);
1310 break;
1311 }
1312 }
1313
1314 static int
creds(struct toepcb * toep,struct inpcb * inp,size_t wrsize)1315 creds(struct toepcb *toep, struct inpcb *inp, size_t wrsize)
1316 {
1317 struct ofld_tx_sdesc *txsd;
1318
1319 CTR3(KTR_IW_CXGBE, "%s:creB %p %u", __func__, toep , wrsize);
1320 INP_WLOCK(inp);
1321 if ((inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) != 0) {
1322 INP_WUNLOCK(inp);
1323 return (EINVAL);
1324 }
1325 txsd = &toep->txsd[toep->txsd_pidx];
1326 txsd->tx_credits = howmany(wrsize, 16);
1327 txsd->plen = 0;
1328 KASSERT(toep->tx_credits >= txsd->tx_credits && toep->txsd_avail > 0,
1329 ("%s: not enough credits (%d)", __func__, toep->tx_credits));
1330 toep->tx_credits -= txsd->tx_credits;
1331 if (__predict_false(++toep->txsd_pidx == toep->txsd_total))
1332 toep->txsd_pidx = 0;
1333 toep->txsd_avail--;
1334 INP_WUNLOCK(inp);
1335 CTR5(KTR_IW_CXGBE, "%s:creE %p %u %u %u", __func__, toep ,
1336 txsd->tx_credits, toep->tx_credits, toep->txsd_pidx);
1337 return (0);
1338 }
1339
rdma_init(struct c4iw_dev * rhp,struct c4iw_qp * qhp)1340 static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
1341 {
1342 struct fw_ri_wr *wqe;
1343 int ret;
1344 struct wrqe *wr;
1345 struct c4iw_ep *ep = qhp->ep;
1346 struct c4iw_rdev *rdev = &qhp->rhp->rdev;
1347 struct adapter *sc = rdev->adap;
1348 struct socket *so = ep->com.so;
1349 struct inpcb *inp = sotoinpcb(so);
1350 struct tcpcb *tp = intotcpcb(inp);
1351 struct toepcb *toep = tp->t_toe;
1352
1353 CTR5(KTR_IW_CXGBE, "%s qhp %p qid 0x%x ep %p tid %u", __func__, qhp,
1354 qhp->wq.sq.qid, ep, ep->hwtid);
1355
1356 wr = alloc_wrqe(sizeof(*wqe), &toep->ofld_txq->wrq);
1357 if (wr == NULL)
1358 return (0);
1359 wqe = wrtod(wr);
1360 ret = alloc_ird(rhp, qhp->attr.max_ird);
1361 if (ret) {
1362 qhp->attr.max_ird = 0;
1363 free_wrqe(wr);
1364 return ret;
1365 }
1366
1367 memset(wqe, 0, sizeof *wqe);
1368
1369 wqe->op_compl = cpu_to_be32(
1370 V_FW_WR_OP(FW_RI_WR) |
1371 F_FW_WR_COMPL);
1372 wqe->flowid_len16 = cpu_to_be32(V_FW_WR_FLOWID(ep->hwtid) |
1373 V_FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
1374
1375 wqe->cookie = (unsigned long) &ep->com.wr_wait;
1376
1377 wqe->u.init.type = FW_RI_TYPE_INIT;
1378 wqe->u.init.mpareqbit_p2ptype =
1379 V_FW_RI_WR_MPAREQBIT(qhp->attr.mpa_attr.initiator) |
1380 V_FW_RI_WR_P2PTYPE(qhp->attr.mpa_attr.p2p_type);
1381 wqe->u.init.mpa_attrs = FW_RI_MPA_IETF_ENABLE;
1382 if (qhp->attr.mpa_attr.recv_marker_enabled)
1383 wqe->u.init.mpa_attrs |= FW_RI_MPA_RX_MARKER_ENABLE;
1384 if (qhp->attr.mpa_attr.xmit_marker_enabled)
1385 wqe->u.init.mpa_attrs |= FW_RI_MPA_TX_MARKER_ENABLE;
1386 if (qhp->attr.mpa_attr.crc_enabled)
1387 wqe->u.init.mpa_attrs |= FW_RI_MPA_CRC_ENABLE;
1388
1389 wqe->u.init.qp_caps = FW_RI_QP_RDMA_READ_ENABLE |
1390 FW_RI_QP_RDMA_WRITE_ENABLE |
1391 FW_RI_QP_BIND_ENABLE;
1392 if (!qhp->ibqp.uobject)
1393 wqe->u.init.qp_caps |= FW_RI_QP_FAST_REGISTER_ENABLE |
1394 FW_RI_QP_STAG0_ENABLE;
1395 wqe->u.init.nrqe = cpu_to_be16(t4_rqes_posted(&qhp->wq));
1396 wqe->u.init.pdid = cpu_to_be32(qhp->attr.pd);
1397 wqe->u.init.qpid = cpu_to_be32(qhp->wq.sq.qid);
1398 wqe->u.init.sq_eqid = cpu_to_be32(qhp->wq.sq.qid);
1399 wqe->u.init.rq_eqid = cpu_to_be32(qhp->wq.rq.qid);
1400 wqe->u.init.scqid = cpu_to_be32(qhp->attr.scq);
1401 wqe->u.init.rcqid = cpu_to_be32(qhp->attr.rcq);
1402 wqe->u.init.ord_max = cpu_to_be32(qhp->attr.max_ord);
1403 wqe->u.init.ird_max = cpu_to_be32(qhp->attr.max_ird);
1404 wqe->u.init.iss = cpu_to_be32(ep->snd_seq);
1405 wqe->u.init.irs = cpu_to_be32(ep->rcv_seq);
1406 wqe->u.init.hwrqsize = cpu_to_be32(qhp->wq.rq.rqt_size);
1407 wqe->u.init.hwrqaddr = cpu_to_be32(qhp->wq.rq.rqt_hwaddr -
1408 sc->vres.rq.start);
1409 if (qhp->attr.mpa_attr.initiator)
1410 build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init);
1411
1412 c4iw_init_wr_wait(&ep->com.wr_wait);
1413
1414 ret = creds(toep, inp, sizeof(*wqe));
1415 if (ret) {
1416 free_wrqe(wr);
1417 free_ird(rhp, qhp->attr.max_ird);
1418 return ret;
1419 }
1420 t4_wrq_tx(sc, wr);
1421
1422 ret = c4iw_wait_for_reply(rdev, &ep->com.wr_wait, ep->hwtid,
1423 qhp->wq.sq.qid, ep->com.so, __func__);
1424
1425 toep->params.ulp_mode = ULP_MODE_RDMA;
1426 free_ird(rhp, qhp->attr.max_ird);
1427
1428 return ret;
1429 }
1430
c4iw_modify_qp(struct c4iw_dev * rhp,struct c4iw_qp * qhp,enum c4iw_qp_attr_mask mask,struct c4iw_qp_attributes * attrs,int internal)1431 int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1432 enum c4iw_qp_attr_mask mask,
1433 struct c4iw_qp_attributes *attrs,
1434 int internal)
1435 {
1436 int ret = 0;
1437 struct c4iw_qp_attributes newattr = qhp->attr;
1438 int disconnect = 0;
1439 int terminate = 0;
1440 int abort = 0;
1441 int free = 0;
1442 struct c4iw_ep *ep = NULL;
1443
1444 CTR5(KTR_IW_CXGBE, "%s qhp %p sqid 0x%x rqid 0x%x ep %p", __func__, qhp,
1445 qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep);
1446 CTR3(KTR_IW_CXGBE, "%s state %d -> %d", __func__, qhp->attr.state,
1447 (mask & C4IW_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1);
1448
1449 mutex_lock(&qhp->mutex);
1450
1451 /* Process attr changes if in IDLE */
1452 if (mask & C4IW_QP_ATTR_VALID_MODIFY) {
1453 if (qhp->attr.state != C4IW_QP_STATE_IDLE) {
1454 ret = -EIO;
1455 goto out;
1456 }
1457 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_READ)
1458 newattr.enable_rdma_read = attrs->enable_rdma_read;
1459 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_WRITE)
1460 newattr.enable_rdma_write = attrs->enable_rdma_write;
1461 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_BIND)
1462 newattr.enable_bind = attrs->enable_bind;
1463 if (mask & C4IW_QP_ATTR_MAX_ORD) {
1464 if (attrs->max_ord > c4iw_max_read_depth) {
1465 ret = -EINVAL;
1466 goto out;
1467 }
1468 newattr.max_ord = attrs->max_ord;
1469 }
1470 if (mask & C4IW_QP_ATTR_MAX_IRD) {
1471 if (attrs->max_ird > cur_max_read_depth(rhp)) {
1472 ret = -EINVAL;
1473 goto out;
1474 }
1475 newattr.max_ird = attrs->max_ird;
1476 }
1477 qhp->attr = newattr;
1478 }
1479
1480 if (!(mask & C4IW_QP_ATTR_NEXT_STATE))
1481 goto out;
1482 if (qhp->attr.state == attrs->next_state)
1483 goto out;
1484
1485 /* Return EINPROGRESS if QP is already in transition state.
1486 * Eg: CLOSING->IDLE transition or *->ERROR transition.
1487 * This can happen while connection is switching(due to rdma_fini)
1488 * from iWARP/RDDP to TOE mode and any inflight RDMA RX data will
1489 * reach TOE driver -> TCP stack -> iWARP driver. In this way
1490 * iWARP driver keep receiving inflight RDMA RX data until socket
1491 * is closed or aborted. And if iWARP CM is in FPDU sate, then
1492 * it tries to put QP in TERM state and disconnects endpoint.
1493 * But as QP is already in transition state, this event is ignored.
1494 */
1495 if ((qhp->attr.state >= C4IW_QP_STATE_ERROR) &&
1496 (attrs->next_state == C4IW_QP_STATE_TERMINATE)) {
1497 ret = -EINPROGRESS;
1498 goto out;
1499 }
1500
1501 switch (qhp->attr.state) {
1502 case C4IW_QP_STATE_IDLE:
1503 switch (attrs->next_state) {
1504 case C4IW_QP_STATE_RTS:
1505 if (!(mask & C4IW_QP_ATTR_LLP_STREAM_HANDLE)) {
1506 ret = -EINVAL;
1507 goto out;
1508 }
1509 if (!(mask & C4IW_QP_ATTR_MPA_ATTR)) {
1510 ret = -EINVAL;
1511 goto out;
1512 }
1513 qhp->attr.mpa_attr = attrs->mpa_attr;
1514 qhp->attr.llp_stream_handle = attrs->llp_stream_handle;
1515 qhp->ep = qhp->attr.llp_stream_handle;
1516 set_state(qhp, C4IW_QP_STATE_RTS);
1517
1518 /*
1519 * Ref the endpoint here and deref when we
1520 * disassociate the endpoint from the QP. This
1521 * happens in CLOSING->IDLE transition or *->ERROR
1522 * transition.
1523 */
1524 c4iw_get_ep(&qhp->ep->com);
1525 ret = rdma_init(rhp, qhp);
1526 if (ret)
1527 goto err;
1528 break;
1529 case C4IW_QP_STATE_ERROR:
1530 set_state(qhp, C4IW_QP_STATE_ERROR);
1531 flush_qp(qhp);
1532 break;
1533 default:
1534 ret = -EINVAL;
1535 goto out;
1536 }
1537 break;
1538 case C4IW_QP_STATE_RTS:
1539 switch (attrs->next_state) {
1540 case C4IW_QP_STATE_CLOSING:
1541 BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2);
1542 t4_set_wq_in_error(&qhp->wq);
1543 set_state(qhp, C4IW_QP_STATE_CLOSING);
1544 ep = qhp->ep;
1545 if (!internal) {
1546 abort = 0;
1547 disconnect = 1;
1548 c4iw_get_ep(&qhp->ep->com);
1549 }
1550 ret = rdma_fini(rhp, qhp, ep);
1551 if (ret)
1552 goto err;
1553 break;
1554 case C4IW_QP_STATE_TERMINATE:
1555 t4_set_wq_in_error(&qhp->wq);
1556 set_state(qhp, C4IW_QP_STATE_TERMINATE);
1557 qhp->attr.layer_etype = attrs->layer_etype;
1558 qhp->attr.ecode = attrs->ecode;
1559 ep = qhp->ep;
1560 if (!internal) {
1561 c4iw_get_ep(&qhp->ep->com);
1562 terminate = 1;
1563 disconnect = 1;
1564 } else {
1565 terminate = qhp->attr.send_term;
1566 ret = rdma_fini(rhp, qhp, ep);
1567 if (ret)
1568 goto err;
1569 }
1570 break;
1571 case C4IW_QP_STATE_ERROR:
1572 t4_set_wq_in_error(&qhp->wq);
1573 set_state(qhp, C4IW_QP_STATE_ERROR);
1574 if (!internal) {
1575 abort = 1;
1576 disconnect = 1;
1577 ep = qhp->ep;
1578 c4iw_get_ep(&qhp->ep->com);
1579 }
1580 goto err;
1581 break;
1582 default:
1583 ret = -EINVAL;
1584 goto out;
1585 }
1586 break;
1587 case C4IW_QP_STATE_CLOSING:
1588
1589 /*
1590 * Allow kernel users to move to ERROR for qp draining.
1591 */
1592 if (!internal && (qhp->ibqp.uobject || attrs->next_state !=
1593 C4IW_QP_STATE_ERROR)) {
1594 ret = -EINVAL;
1595 goto out;
1596 }
1597 switch (attrs->next_state) {
1598 case C4IW_QP_STATE_IDLE:
1599 flush_qp(qhp);
1600 set_state(qhp, C4IW_QP_STATE_IDLE);
1601 qhp->attr.llp_stream_handle = NULL;
1602 c4iw_put_ep(&qhp->ep->com);
1603 qhp->ep = NULL;
1604 wake_up(&qhp->wait);
1605 break;
1606 case C4IW_QP_STATE_ERROR:
1607 goto err;
1608 default:
1609 ret = -EINVAL;
1610 goto err;
1611 }
1612 break;
1613 case C4IW_QP_STATE_ERROR:
1614 if (attrs->next_state != C4IW_QP_STATE_IDLE) {
1615 ret = -EINVAL;
1616 goto out;
1617 }
1618 if (!t4_sq_empty(&qhp->wq) || !t4_rq_empty(&qhp->wq)) {
1619 ret = -EINVAL;
1620 goto out;
1621 }
1622 set_state(qhp, C4IW_QP_STATE_IDLE);
1623 break;
1624 case C4IW_QP_STATE_TERMINATE:
1625 if (!internal) {
1626 ret = -EINVAL;
1627 goto out;
1628 }
1629 goto err;
1630 break;
1631 default:
1632 printf("%s in a bad state %d\n",
1633 __func__, qhp->attr.state);
1634 ret = -EINVAL;
1635 goto err;
1636 break;
1637 }
1638 goto out;
1639 err:
1640 CTR3(KTR_IW_CXGBE, "%s disassociating ep %p qpid 0x%x", __func__,
1641 qhp->ep, qhp->wq.sq.qid);
1642
1643 /* disassociate the LLP connection */
1644 qhp->attr.llp_stream_handle = NULL;
1645 if (!ep)
1646 ep = qhp->ep;
1647 qhp->ep = NULL;
1648 set_state(qhp, C4IW_QP_STATE_ERROR);
1649 free = 1;
1650 abort = 1;
1651 BUG_ON(!ep);
1652 flush_qp(qhp);
1653 wake_up(&qhp->wait);
1654 out:
1655 mutex_unlock(&qhp->mutex);
1656
1657 if (terminate)
1658 post_terminate(qhp, NULL, internal ? GFP_ATOMIC : GFP_KERNEL);
1659
1660 /*
1661 * If disconnect is 1, then we need to initiate a disconnect
1662 * on the EP. This can be a normal close (RTS->CLOSING) or
1663 * an abnormal close (RTS/CLOSING->ERROR).
1664 */
1665 if (disconnect) {
1666 __c4iw_ep_disconnect(ep, abort, internal ? GFP_ATOMIC :
1667 GFP_KERNEL);
1668 c4iw_put_ep(&ep->com);
1669 }
1670
1671 /*
1672 * If free is 1, then we've disassociated the EP from the QP
1673 * and we need to dereference the EP.
1674 */
1675 if (free)
1676 c4iw_put_ep(&ep->com);
1677 CTR2(KTR_IW_CXGBE, "%s exit state %d", __func__, qhp->attr.state);
1678 return ret;
1679 }
1680
c4iw_destroy_qp(struct ib_qp * ib_qp)1681 int c4iw_destroy_qp(struct ib_qp *ib_qp)
1682 {
1683 struct c4iw_dev *rhp;
1684 struct c4iw_qp *qhp;
1685 struct c4iw_qp_attributes attrs;
1686
1687 CTR2(KTR_IW_CXGBE, "%s ib_qp %p", __func__, ib_qp);
1688 qhp = to_c4iw_qp(ib_qp);
1689 rhp = qhp->rhp;
1690
1691 attrs.next_state = C4IW_QP_STATE_ERROR;
1692 if (qhp->attr.state == C4IW_QP_STATE_TERMINATE)
1693 c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1694 else
1695 c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
1696 wait_event(qhp->wait, !qhp->ep);
1697
1698 remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
1699
1700 free_ird(rhp, qhp->attr.max_ird);
1701 c4iw_qp_rem_ref(ib_qp);
1702
1703 CTR3(KTR_IW_CXGBE, "%s ib_qp %p qpid 0x%0x", __func__, ib_qp,
1704 qhp->wq.sq.qid);
1705 return 0;
1706 }
1707
1708 struct ib_qp *
c4iw_create_qp(struct ib_pd * pd,struct ib_qp_init_attr * attrs,struct ib_udata * udata)1709 c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
1710 struct ib_udata *udata)
1711 {
1712 struct c4iw_dev *rhp;
1713 struct c4iw_qp *qhp;
1714 struct c4iw_pd *php;
1715 struct c4iw_cq *schp;
1716 struct c4iw_cq *rchp;
1717 struct c4iw_create_qp_resp uresp;
1718 unsigned int sqsize, rqsize;
1719 struct c4iw_ucontext *ucontext;
1720 int ret;
1721 struct c4iw_mm_entry *sq_key_mm = NULL, *rq_key_mm = NULL;
1722 struct c4iw_mm_entry *sq_db_key_mm = NULL, *rq_db_key_mm = NULL;
1723
1724 CTR2(KTR_IW_CXGBE, "%s ib_pd %p", __func__, pd);
1725
1726 if (attrs->qp_type != IB_QPT_RC)
1727 return ERR_PTR(-EINVAL);
1728
1729 php = to_c4iw_pd(pd);
1730 rhp = php->rhp;
1731 schp = get_chp(rhp, ((struct c4iw_cq *)attrs->send_cq)->cq.cqid);
1732 rchp = get_chp(rhp, ((struct c4iw_cq *)attrs->recv_cq)->cq.cqid);
1733 if (!schp || !rchp)
1734 return ERR_PTR(-EINVAL);
1735
1736 if (attrs->cap.max_inline_data > T4_MAX_SEND_INLINE)
1737 return ERR_PTR(-EINVAL);
1738
1739 if (attrs->cap.max_recv_wr > rhp->rdev.hw_queue.t4_max_rq_size)
1740 return ERR_PTR(-E2BIG);
1741 rqsize = attrs->cap.max_recv_wr + 1;
1742 if (rqsize < 8)
1743 rqsize = 8;
1744
1745 if (attrs->cap.max_send_wr > rhp->rdev.hw_queue.t4_max_sq_size)
1746 return ERR_PTR(-E2BIG);
1747 sqsize = attrs->cap.max_send_wr + 1;
1748 if (sqsize < 8)
1749 sqsize = 8;
1750
1751 ucontext = pd->uobject ? to_c4iw_ucontext(pd->uobject->context) : NULL;
1752
1753 qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
1754 if (!qhp)
1755 return ERR_PTR(-ENOMEM);
1756 qhp->wq.sq.size = sqsize;
1757 qhp->wq.sq.memsize =
1758 (sqsize + rhp->rdev.hw_queue.t4_eq_status_entries) *
1759 sizeof(*qhp->wq.sq.queue) + 16 * sizeof(__be64);
1760 qhp->wq.sq.flush_cidx = -1;
1761 qhp->wq.rq.size = rqsize;
1762 qhp->wq.rq.memsize =
1763 (rqsize + rhp->rdev.hw_queue.t4_eq_status_entries) *
1764 sizeof(*qhp->wq.rq.queue);
1765
1766 if (ucontext) {
1767 qhp->wq.sq.memsize = roundup(qhp->wq.sq.memsize, PAGE_SIZE);
1768 qhp->wq.rq.memsize = roundup(qhp->wq.rq.memsize, PAGE_SIZE);
1769 }
1770
1771 CTR5(KTR_IW_CXGBE, "%s sqsize %u sqmemsize %zu rqsize %u rqmemsize %zu",
1772 __func__, sqsize, qhp->wq.sq.memsize, rqsize, qhp->wq.rq.memsize);
1773
1774 ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq,
1775 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
1776 if (ret)
1777 goto err1;
1778
1779 attrs->cap.max_recv_wr = rqsize - 1;
1780 attrs->cap.max_send_wr = sqsize - 1;
1781 attrs->cap.max_inline_data = T4_MAX_SEND_INLINE;
1782
1783 qhp->rhp = rhp;
1784 qhp->attr.pd = php->pdid;
1785 qhp->attr.scq = ((struct c4iw_cq *) attrs->send_cq)->cq.cqid;
1786 qhp->attr.rcq = ((struct c4iw_cq *) attrs->recv_cq)->cq.cqid;
1787 qhp->attr.sq_num_entries = attrs->cap.max_send_wr;
1788 qhp->attr.rq_num_entries = attrs->cap.max_recv_wr;
1789 qhp->attr.sq_max_sges = attrs->cap.max_send_sge;
1790 qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge;
1791 qhp->attr.rq_max_sges = attrs->cap.max_recv_sge;
1792 qhp->attr.state = C4IW_QP_STATE_IDLE;
1793 qhp->attr.next_state = C4IW_QP_STATE_IDLE;
1794 qhp->attr.enable_rdma_read = 1;
1795 qhp->attr.enable_rdma_write = 1;
1796 qhp->attr.enable_bind = 1;
1797 qhp->attr.max_ord = 0;
1798 qhp->attr.max_ird = 0;
1799 qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR;
1800 spin_lock_init(&qhp->lock);
1801 mutex_init(&qhp->mutex);
1802 init_waitqueue_head(&qhp->wait);
1803 kref_init(&qhp->kref);
1804 INIT_WORK(&qhp->free_work, free_qp_work);
1805
1806 ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
1807 if (ret)
1808 goto err2;
1809
1810 if (udata) {
1811 sq_key_mm = kmalloc(sizeof(*sq_key_mm), GFP_KERNEL);
1812 if (!sq_key_mm) {
1813 ret = -ENOMEM;
1814 goto err3;
1815 }
1816 rq_key_mm = kmalloc(sizeof(*rq_key_mm), GFP_KERNEL);
1817 if (!rq_key_mm) {
1818 ret = -ENOMEM;
1819 goto err4;
1820 }
1821 sq_db_key_mm = kmalloc(sizeof(*sq_db_key_mm), GFP_KERNEL);
1822 if (!sq_db_key_mm) {
1823 ret = -ENOMEM;
1824 goto err5;
1825 }
1826 rq_db_key_mm = kmalloc(sizeof(*rq_db_key_mm), GFP_KERNEL);
1827 if (!rq_db_key_mm) {
1828 ret = -ENOMEM;
1829 goto err6;
1830 }
1831 uresp.flags = 0;
1832 uresp.qid_mask = rhp->rdev.qpmask;
1833 uresp.sqid = qhp->wq.sq.qid;
1834 uresp.sq_size = qhp->wq.sq.size;
1835 uresp.sq_memsize = qhp->wq.sq.memsize;
1836 uresp.rqid = qhp->wq.rq.qid;
1837 uresp.rq_size = qhp->wq.rq.size;
1838 uresp.rq_memsize = qhp->wq.rq.memsize;
1839 spin_lock(&ucontext->mmap_lock);
1840 uresp.ma_sync_key = 0;
1841 uresp.sq_key = ucontext->key;
1842 ucontext->key += PAGE_SIZE;
1843 uresp.rq_key = ucontext->key;
1844 ucontext->key += PAGE_SIZE;
1845 uresp.sq_db_gts_key = ucontext->key;
1846 ucontext->key += PAGE_SIZE;
1847 uresp.rq_db_gts_key = ucontext->key;
1848 ucontext->key += PAGE_SIZE;
1849 spin_unlock(&ucontext->mmap_lock);
1850 ret = ib_copy_to_udata(udata, &uresp, sizeof uresp);
1851 if (ret)
1852 goto err7;
1853 sq_key_mm->key = uresp.sq_key;
1854 sq_key_mm->addr = qhp->wq.sq.phys_addr;
1855 sq_key_mm->len = PAGE_ALIGN(qhp->wq.sq.memsize);
1856 CTR4(KTR_IW_CXGBE, "%s sq_key_mm %x, %x, %d", __func__,
1857 sq_key_mm->key, sq_key_mm->addr,
1858 sq_key_mm->len);
1859 insert_mmap(ucontext, sq_key_mm);
1860 rq_key_mm->key = uresp.rq_key;
1861 rq_key_mm->addr = qhp->wq.rq.phys_addr;
1862 rq_key_mm->len = PAGE_ALIGN(qhp->wq.rq.memsize);
1863 CTR4(KTR_IW_CXGBE, "%s rq_key_mm %x, %x, %d", __func__,
1864 rq_key_mm->key, rq_key_mm->addr,
1865 rq_key_mm->len);
1866 insert_mmap(ucontext, rq_key_mm);
1867 sq_db_key_mm->key = uresp.sq_db_gts_key;
1868 sq_db_key_mm->addr = (u64)qhp->wq.sq.bar2_pa;
1869 sq_db_key_mm->len = PAGE_SIZE;
1870 CTR4(KTR_IW_CXGBE, "%s sq_db_key_mm %x, %x, %d", __func__,
1871 sq_db_key_mm->key, sq_db_key_mm->addr,
1872 sq_db_key_mm->len);
1873 insert_mmap(ucontext, sq_db_key_mm);
1874 rq_db_key_mm->key = uresp.rq_db_gts_key;
1875 rq_db_key_mm->addr = (u64)qhp->wq.rq.bar2_pa;
1876 rq_db_key_mm->len = PAGE_SIZE;
1877 CTR4(KTR_IW_CXGBE, "%s rq_db_key_mm %x, %x, %d", __func__,
1878 rq_db_key_mm->key, rq_db_key_mm->addr,
1879 rq_db_key_mm->len);
1880 insert_mmap(ucontext, rq_db_key_mm);
1881
1882 c4iw_get_ucontext(ucontext);
1883 qhp->ucontext = ucontext;
1884 }
1885 qhp->ibqp.qp_num = qhp->wq.sq.qid;
1886 init_timer(&(qhp->timer));
1887
1888 CTR5(KTR_IW_CXGBE, "%s sq id %u size %u memsize %zu num_entries %u",
1889 __func__, qhp->wq.sq.qid,
1890 qhp->wq.sq.size, qhp->wq.sq.memsize, attrs->cap.max_send_wr);
1891 CTR5(KTR_IW_CXGBE, "%s rq id %u size %u memsize %zu num_entries %u",
1892 __func__, qhp->wq.rq.qid,
1893 qhp->wq.rq.size, qhp->wq.rq.memsize, attrs->cap.max_recv_wr);
1894 return &qhp->ibqp;
1895 err7:
1896 kfree(rq_db_key_mm);
1897 err6:
1898 kfree(sq_db_key_mm);
1899 err5:
1900 kfree(rq_key_mm);
1901 err4:
1902 kfree(sq_key_mm);
1903 err3:
1904 remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
1905 err2:
1906 destroy_qp(&rhp->rdev, &qhp->wq,
1907 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
1908 err1:
1909 kfree(qhp);
1910 return ERR_PTR(ret);
1911 }
1912
c4iw_ib_modify_qp(struct ib_qp * ibqp,struct ib_qp_attr * attr,int attr_mask,struct ib_udata * udata)1913 int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1914 int attr_mask, struct ib_udata *udata)
1915 {
1916 struct c4iw_dev *rhp;
1917 struct c4iw_qp *qhp;
1918 enum c4iw_qp_attr_mask mask = 0;
1919 struct c4iw_qp_attributes attrs;
1920
1921 CTR2(KTR_IW_CXGBE, "%s ib_qp %p", __func__, ibqp);
1922
1923 /* iwarp does not support the RTR state */
1924 if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
1925 attr_mask &= ~IB_QP_STATE;
1926
1927 /* Make sure we still have something left to do */
1928 if (!attr_mask)
1929 return 0;
1930
1931 memset(&attrs, 0, sizeof attrs);
1932 qhp = to_c4iw_qp(ibqp);
1933 rhp = qhp->rhp;
1934
1935 attrs.next_state = c4iw_convert_state(attr->qp_state);
1936 attrs.enable_rdma_read = (attr->qp_access_flags &
1937 IB_ACCESS_REMOTE_READ) ? 1 : 0;
1938 attrs.enable_rdma_write = (attr->qp_access_flags &
1939 IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
1940 attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0;
1941
1942
1943 mask |= (attr_mask & IB_QP_STATE) ? C4IW_QP_ATTR_NEXT_STATE : 0;
1944 mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ?
1945 (C4IW_QP_ATTR_ENABLE_RDMA_READ |
1946 C4IW_QP_ATTR_ENABLE_RDMA_WRITE |
1947 C4IW_QP_ATTR_ENABLE_RDMA_BIND) : 0;
1948
1949 return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0);
1950 }
1951
c4iw_get_qp(struct ib_device * dev,int qpn)1952 struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn)
1953 {
1954 CTR3(KTR_IW_CXGBE, "%s ib_dev %p qpn 0x%x", __func__, dev, qpn);
1955 return (struct ib_qp *)get_qhp(to_c4iw_dev(dev), qpn);
1956 }
1957
c4iw_ib_query_qp(struct ib_qp * ibqp,struct ib_qp_attr * attr,int attr_mask,struct ib_qp_init_attr * init_attr)1958 int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1959 int attr_mask, struct ib_qp_init_attr *init_attr)
1960 {
1961 struct c4iw_qp *qhp = to_c4iw_qp(ibqp);
1962
1963 memset(attr, 0, sizeof *attr);
1964 memset(init_attr, 0, sizeof *init_attr);
1965 attr->qp_state = to_ib_qp_state(qhp->attr.state);
1966 init_attr->cap.max_send_wr = qhp->attr.sq_num_entries;
1967 init_attr->cap.max_recv_wr = qhp->attr.rq_num_entries;
1968 init_attr->cap.max_send_sge = qhp->attr.sq_max_sges;
1969 init_attr->cap.max_recv_sge = qhp->attr.sq_max_sges;
1970 init_attr->cap.max_inline_data = T4_MAX_SEND_INLINE;
1971 init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0;
1972 return 0;
1973 }
1974 #endif
1975