1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2010, LSI Corp.
5 * All rights reserved.
6 * Author : Manjunath Ranganathaiah
7 * Support: [email protected]
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 *
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
18 * distribution.
19 * 3. Neither the name of the <ORGANIZATION> nor the names of its
20 * contributors may be used to endorse or promote products derived
21 * from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
31 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
33 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
39
40 #include <dev/tws/tws.h>
41 #include <dev/tws/tws_services.h>
42 #include <dev/tws/tws_hdm.h>
43
44 #include <cam/cam.h>
45 #include <cam/cam_ccb.h>
46 #include <cam/cam_xpt.h>
47
48 MALLOC_DEFINE(M_TWS, "twsbuf", "buffers used by tws driver");
49 int tws_queue_depth = TWS_MAX_REQS;
50 int tws_enable_msi = 0;
51 int tws_enable_msix = 0;
52
53 /* externs */
54 extern int tws_cam_attach(struct tws_softc *sc);
55 extern void tws_cam_detach(struct tws_softc *sc);
56 extern int tws_init_ctlr(struct tws_softc *sc);
57 extern boolean tws_ctlr_ready(struct tws_softc *sc);
58 extern void tws_turn_off_interrupts(struct tws_softc *sc);
59 extern void tws_q_insert_tail(struct tws_softc *sc, struct tws_request *req,
60 u_int8_t q_type );
61 extern struct tws_request *tws_q_remove_request(struct tws_softc *sc,
62 struct tws_request *req, u_int8_t q_type );
63 extern struct tws_request *tws_q_remove_head(struct tws_softc *sc,
64 u_int8_t q_type );
65 extern boolean tws_get_response(struct tws_softc *sc, u_int16_t *req_id);
66 extern boolean tws_ctlr_reset(struct tws_softc *sc);
67 extern void tws_intr(void *arg);
68 extern int tws_use_32bit_sgls;
69
70 struct tws_request *tws_get_request(struct tws_softc *sc, u_int16_t type);
71 int tws_init_connect(struct tws_softc *sc, u_int16_t mc);
72 void tws_send_event(struct tws_softc *sc, u_int8_t event);
73 uint8_t tws_get_state(struct tws_softc *sc);
74 void tws_release_request(struct tws_request *req);
75
76 /* Function prototypes */
77 static d_open_t tws_open;
78 static d_close_t tws_close;
79 static d_read_t tws_read;
80 static d_write_t tws_write;
81 extern d_ioctl_t tws_ioctl;
82
83 static int tws_init(struct tws_softc *sc);
84 static void tws_dmamap_cmds_load_cbfn(void *arg, bus_dma_segment_t *segs,
85 int nseg, int error);
86
87 static int tws_init_reqs(struct tws_softc *sc, u_int32_t dma_mem_size);
88 static int tws_init_aen_q(struct tws_softc *sc);
89 static int tws_init_trace_q(struct tws_softc *sc);
90 static int tws_setup_irq(struct tws_softc *sc);
91 int tws_setup_intr(struct tws_softc *sc, int irqs);
92 int tws_teardown_intr(struct tws_softc *sc);
93
94 /* Character device entry points */
95
96 static struct cdevsw tws_cdevsw = {
97 .d_version = D_VERSION,
98 .d_open = tws_open,
99 .d_close = tws_close,
100 .d_read = tws_read,
101 .d_write = tws_write,
102 .d_ioctl = tws_ioctl,
103 .d_name = "tws",
104 };
105
106 /*
107 * In the cdevsw routines, we find our softc by using the si_drv1 member
108 * of struct cdev. We set this variable to point to our softc in our
109 * attach routine when we create the /dev entry.
110 */
111
112 int
tws_open(struct cdev * dev,int oflags,int devtype,struct thread * td)113 tws_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
114 {
115 struct tws_softc *sc = dev->si_drv1;
116
117 if ( sc )
118 TWS_TRACE_DEBUG(sc, "entry", dev, oflags);
119 return (0);
120 }
121
122 int
tws_close(struct cdev * dev,int fflag,int devtype,struct thread * td)123 tws_close(struct cdev *dev, int fflag, int devtype, struct thread *td)
124 {
125 struct tws_softc *sc = dev->si_drv1;
126
127 if ( sc )
128 TWS_TRACE_DEBUG(sc, "entry", dev, fflag);
129 return (0);
130 }
131
132 int
tws_read(struct cdev * dev,struct uio * uio,int ioflag)133 tws_read(struct cdev *dev, struct uio *uio, int ioflag)
134 {
135 struct tws_softc *sc = dev->si_drv1;
136
137 if ( sc )
138 TWS_TRACE_DEBUG(sc, "entry", dev, ioflag);
139 return (0);
140 }
141
142 int
tws_write(struct cdev * dev,struct uio * uio,int ioflag)143 tws_write(struct cdev *dev, struct uio *uio, int ioflag)
144 {
145 struct tws_softc *sc = dev->si_drv1;
146
147 if ( sc )
148 TWS_TRACE_DEBUG(sc, "entry", dev, ioflag);
149 return (0);
150 }
151
152 /* PCI Support Functions */
153
154 /*
155 * Compare the device ID of this device against the IDs that this driver
156 * supports. If there is a match, set the description and return success.
157 */
158 static int
tws_probe(device_t dev)159 tws_probe(device_t dev)
160 {
161 static u_int8_t first_ctlr = 1;
162
163 if ((pci_get_vendor(dev) == TWS_VENDOR_ID) &&
164 (pci_get_device(dev) == TWS_DEVICE_ID)) {
165 device_set_desc(dev, "LSI 3ware SAS/SATA Storage Controller");
166 if (first_ctlr) {
167 printf("LSI 3ware device driver for SAS/SATA storage "
168 "controllers, version: %s\n", TWS_DRIVER_VERSION_STRING);
169 first_ctlr = 0;
170 }
171
172 return(BUS_PROBE_DEFAULT);
173 }
174 return (ENXIO);
175 }
176
177 /* Attach function is only called if the probe is successful. */
178
179 static int
tws_attach(device_t dev)180 tws_attach(device_t dev)
181 {
182 struct tws_softc *sc = device_get_softc(dev);
183 u_int32_t bar;
184 int error=0,i;
185
186 /* no tracing yet */
187 /* Look up our softc and initialize its fields. */
188 sc->tws_dev = dev;
189 sc->device_id = pci_get_device(dev);
190 sc->subvendor_id = pci_get_subvendor(dev);
191 sc->subdevice_id = pci_get_subdevice(dev);
192
193 /* Intialize mutexes */
194 mtx_init( &sc->q_lock, "tws_q_lock", NULL, MTX_DEF);
195 mtx_init( &sc->sim_lock, "tws_sim_lock", NULL, MTX_DEF);
196 mtx_init( &sc->gen_lock, "tws_gen_lock", NULL, MTX_DEF);
197 mtx_init( &sc->io_lock, "tws_io_lock", NULL, MTX_DEF | MTX_RECURSE);
198 callout_init(&sc->stats_timer, 1);
199
200 if ( tws_init_trace_q(sc) == FAILURE )
201 printf("trace init failure\n");
202 /* send init event */
203 mtx_lock(&sc->gen_lock);
204 tws_send_event(sc, TWS_INIT_START);
205 mtx_unlock(&sc->gen_lock);
206
207 #if _BYTE_ORDER == _BIG_ENDIAN
208 TWS_TRACE(sc, "BIG endian", 0, 0);
209 #endif
210 /* sysctl context setup */
211 sysctl_ctx_init(&sc->tws_clist);
212 sc->tws_oidp = SYSCTL_ADD_NODE(&sc->tws_clist,
213 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
214 device_get_nameunit(dev), CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
215 if ( sc->tws_oidp == NULL ) {
216 tws_log(sc, SYSCTL_TREE_NODE_ADD);
217 goto attach_fail_1;
218 }
219 SYSCTL_ADD_STRING(&sc->tws_clist, SYSCTL_CHILDREN(sc->tws_oidp),
220 OID_AUTO, "driver_version", CTLFLAG_RD,
221 TWS_DRIVER_VERSION_STRING, 0, "TWS driver version");
222
223 pci_enable_busmaster(dev);
224
225 bar = pci_read_config(dev, TWS_PCI_BAR0, 4);
226 TWS_TRACE_DEBUG(sc, "bar0 ", bar, 0);
227 bar = pci_read_config(dev, TWS_PCI_BAR1, 4);
228 bar = bar & ~TWS_BIT2;
229 TWS_TRACE_DEBUG(sc, "bar1 ", bar, 0);
230
231 /* MFA base address is BAR2 register used for
232 * push mode. Firmware will evatualy move to
233 * pull mode during witch this needs to change
234 */
235 #ifndef TWS_PULL_MODE_ENABLE
236 sc->mfa_base = (u_int64_t)pci_read_config(dev, TWS_PCI_BAR2, 4);
237 sc->mfa_base = sc->mfa_base & ~TWS_BIT2;
238 TWS_TRACE_DEBUG(sc, "bar2 ", sc->mfa_base, 0);
239 #endif
240
241 /* allocate MMIO register space */
242 sc->reg_res_id = TWS_PCI_BAR1; /* BAR1 offset */
243 if ((sc->reg_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
244 &(sc->reg_res_id), RF_ACTIVE))
245 == NULL) {
246 tws_log(sc, ALLOC_MEMORY_RES);
247 goto attach_fail_1;
248 }
249 sc->bus_tag = rman_get_bustag(sc->reg_res);
250 sc->bus_handle = rman_get_bushandle(sc->reg_res);
251
252 #ifndef TWS_PULL_MODE_ENABLE
253 /* Allocate bus space for inbound mfa */
254 sc->mfa_res_id = TWS_PCI_BAR2; /* BAR2 offset */
255 if ((sc->mfa_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
256 &(sc->mfa_res_id), RF_ACTIVE))
257 == NULL) {
258 tws_log(sc, ALLOC_MEMORY_RES);
259 goto attach_fail_2;
260 }
261 sc->bus_mfa_tag = rman_get_bustag(sc->mfa_res);
262 sc->bus_mfa_handle = rman_get_bushandle(sc->mfa_res);
263 #endif
264
265 /* Allocate and register our interrupt. */
266 sc->intr_type = TWS_INTx; /* default */
267
268 if ( tws_enable_msi )
269 sc->intr_type = TWS_MSI;
270 if ( tws_setup_irq(sc) == FAILURE ) {
271 tws_log(sc, ALLOC_MEMORY_RES);
272 goto attach_fail_3;
273 }
274
275 /*
276 * Create a /dev entry for this device. The kernel will assign us
277 * a major number automatically. We use the unit number of this
278 * device as the minor number and name the character device
279 * "tws<unit>".
280 */
281 sc->tws_cdev = make_dev(&tws_cdevsw, device_get_unit(dev),
282 UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR, "tws%u",
283 device_get_unit(dev));
284 sc->tws_cdev->si_drv1 = sc;
285
286 if ( tws_init(sc) == FAILURE ) {
287 tws_log(sc, TWS_INIT_FAILURE);
288 goto attach_fail_4;
289 }
290 if ( tws_init_ctlr(sc) == FAILURE ) {
291 tws_log(sc, TWS_CTLR_INIT_FAILURE);
292 goto attach_fail_4;
293 }
294 if ((error = tws_cam_attach(sc))) {
295 tws_log(sc, TWS_CAM_ATTACH);
296 goto attach_fail_4;
297 }
298 /* send init complete event */
299 mtx_lock(&sc->gen_lock);
300 tws_send_event(sc, TWS_INIT_COMPLETE);
301 mtx_unlock(&sc->gen_lock);
302
303 TWS_TRACE_DEBUG(sc, "attached successfully", 0, sc->device_id);
304 return(0);
305
306 attach_fail_4:
307 tws_teardown_intr(sc);
308 destroy_dev(sc->tws_cdev);
309 if (sc->dma_mem_phys)
310 bus_dmamap_unload(sc->cmd_tag, sc->cmd_map);
311 if (sc->dma_mem)
312 bus_dmamem_free(sc->cmd_tag, sc->dma_mem, sc->cmd_map);
313 if (sc->cmd_tag)
314 bus_dma_tag_destroy(sc->cmd_tag);
315 attach_fail_3:
316 for(i=0;i<sc->irqs;i++) {
317 if ( sc->irq_res[i] ){
318 if (bus_release_resource(sc->tws_dev,
319 SYS_RES_IRQ, sc->irq_res_id[i], sc->irq_res[i]))
320 TWS_TRACE(sc, "bus irq res", 0, 0);
321 }
322 }
323 #ifndef TWS_PULL_MODE_ENABLE
324 attach_fail_2:
325 #endif
326 if ( sc->mfa_res ){
327 if (bus_release_resource(sc->tws_dev,
328 SYS_RES_MEMORY, sc->mfa_res_id, sc->mfa_res))
329 TWS_TRACE(sc, "bus release ", 0, sc->mfa_res_id);
330 }
331 if ( sc->reg_res ){
332 if (bus_release_resource(sc->tws_dev,
333 SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res))
334 TWS_TRACE(sc, "bus release2 ", 0, sc->reg_res_id);
335 }
336 attach_fail_1:
337 mtx_destroy(&sc->q_lock);
338 mtx_destroy(&sc->sim_lock);
339 mtx_destroy(&sc->gen_lock);
340 mtx_destroy(&sc->io_lock);
341 sysctl_ctx_free(&sc->tws_clist);
342 return (ENXIO);
343 }
344
345 /* Detach device. */
346
347 static int
tws_detach(device_t dev)348 tws_detach(device_t dev)
349 {
350 struct tws_softc *sc = device_get_softc(dev);
351 int i;
352 u_int32_t reg;
353
354 TWS_TRACE_DEBUG(sc, "entry", 0, 0);
355
356 mtx_lock(&sc->gen_lock);
357 tws_send_event(sc, TWS_UNINIT_START);
358 mtx_unlock(&sc->gen_lock);
359
360 /* needs to disable interrupt before detaching from cam */
361 tws_turn_off_interrupts(sc);
362 /* clear door bell */
363 tws_write_reg(sc, TWS_I2O0_HOBDBC, ~0, 4);
364 reg = tws_read_reg(sc, TWS_I2O0_HIMASK, 4);
365 TWS_TRACE_DEBUG(sc, "turn-off-intr", reg, 0);
366 sc->obfl_q_overrun = false;
367 tws_init_connect(sc, 1);
368
369 /* Teardown the state in our softc created in our attach routine. */
370 /* Disconnect the interrupt handler. */
371 tws_teardown_intr(sc);
372
373 /* Release irq resource */
374 for(i=0;i<sc->irqs;i++) {
375 if ( sc->irq_res[i] ){
376 if (bus_release_resource(sc->tws_dev,
377 SYS_RES_IRQ, sc->irq_res_id[i], sc->irq_res[i]))
378 TWS_TRACE(sc, "bus release irq resource",
379 i, sc->irq_res_id[i]);
380 }
381 }
382 if ( sc->intr_type == TWS_MSI ) {
383 pci_release_msi(sc->tws_dev);
384 }
385
386 tws_cam_detach(sc);
387
388 if (sc->dma_mem_phys)
389 bus_dmamap_unload(sc->cmd_tag, sc->cmd_map);
390 if (sc->dma_mem)
391 bus_dmamem_free(sc->cmd_tag, sc->dma_mem, sc->cmd_map);
392 if (sc->cmd_tag)
393 bus_dma_tag_destroy(sc->cmd_tag);
394
395 /* Release memory resource */
396 if ( sc->mfa_res ){
397 if (bus_release_resource(sc->tws_dev,
398 SYS_RES_MEMORY, sc->mfa_res_id, sc->mfa_res))
399 TWS_TRACE(sc, "bus release mem resource", 0, sc->mfa_res_id);
400 }
401 if ( sc->reg_res ){
402 if (bus_release_resource(sc->tws_dev,
403 SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res))
404 TWS_TRACE(sc, "bus release mem resource", 0, sc->reg_res_id);
405 }
406
407 for ( i=0; i< tws_queue_depth; i++) {
408 if (sc->reqs[i].dma_map)
409 bus_dmamap_destroy(sc->data_tag, sc->reqs[i].dma_map);
410 callout_drain(&sc->reqs[i].timeout);
411 }
412
413 callout_drain(&sc->stats_timer);
414 free(sc->reqs, M_TWS);
415 free(sc->sense_bufs, M_TWS);
416 xpt_free_ccb(sc->scan_ccb);
417 if (sc->ioctl_data_mem)
418 bus_dmamem_free(sc->data_tag, sc->ioctl_data_mem, sc->ioctl_data_map);
419 if (sc->data_tag)
420 bus_dma_tag_destroy(sc->data_tag);
421 free(sc->aen_q.q, M_TWS);
422 free(sc->trace_q.q, M_TWS);
423 mtx_destroy(&sc->q_lock);
424 mtx_destroy(&sc->sim_lock);
425 mtx_destroy(&sc->gen_lock);
426 mtx_destroy(&sc->io_lock);
427 destroy_dev(sc->tws_cdev);
428 sysctl_ctx_free(&sc->tws_clist);
429 return (0);
430 }
431
432 int
tws_setup_intr(struct tws_softc * sc,int irqs)433 tws_setup_intr(struct tws_softc *sc, int irqs)
434 {
435 int i, error;
436
437 for(i=0;i<irqs;i++) {
438 if (!(sc->intr_handle[i])) {
439 if ((error = bus_setup_intr(sc->tws_dev, sc->irq_res[i],
440 INTR_TYPE_CAM | INTR_MPSAFE,
441 NULL,
442 tws_intr, sc, &sc->intr_handle[i]))) {
443 tws_log(sc, SETUP_INTR_RES);
444 return(FAILURE);
445 }
446 }
447 }
448 return(SUCCESS);
449
450 }
451
452 int
tws_teardown_intr(struct tws_softc * sc)453 tws_teardown_intr(struct tws_softc *sc)
454 {
455 int i, error;
456
457 for(i=0;i<sc->irqs;i++) {
458 if (sc->intr_handle[i]) {
459 error = bus_teardown_intr(sc->tws_dev,
460 sc->irq_res[i], sc->intr_handle[i]);
461 sc->intr_handle[i] = NULL;
462 }
463 }
464 return(SUCCESS);
465 }
466
467 static int
tws_setup_irq(struct tws_softc * sc)468 tws_setup_irq(struct tws_softc *sc)
469 {
470 int messages;
471
472 switch(sc->intr_type) {
473 case TWS_INTx :
474 sc->irqs = 1;
475 sc->irq_res_id[0] = 0;
476 sc->irq_res[0] = bus_alloc_resource_any(sc->tws_dev, SYS_RES_IRQ,
477 &sc->irq_res_id[0], RF_SHAREABLE | RF_ACTIVE);
478 if ( ! sc->irq_res[0] )
479 return(FAILURE);
480 if ( tws_setup_intr(sc, sc->irqs) == FAILURE )
481 return(FAILURE);
482 device_printf(sc->tws_dev, "Using legacy INTx\n");
483 break;
484 case TWS_MSI :
485 sc->irqs = 1;
486 sc->irq_res_id[0] = 1;
487 messages = 1;
488 if (pci_alloc_msi(sc->tws_dev, &messages) != 0 ) {
489 TWS_TRACE(sc, "pci alloc msi fail", 0, messages);
490 return(FAILURE);
491 }
492 sc->irq_res[0] = bus_alloc_resource_any(sc->tws_dev, SYS_RES_IRQ,
493 &sc->irq_res_id[0], RF_SHAREABLE | RF_ACTIVE);
494
495 if ( !sc->irq_res[0] )
496 return(FAILURE);
497 if ( tws_setup_intr(sc, sc->irqs) == FAILURE )
498 return(FAILURE);
499 device_printf(sc->tws_dev, "Using MSI\n");
500 break;
501 }
502
503 return(SUCCESS);
504 }
505
506 static int
tws_init(struct tws_softc * sc)507 tws_init(struct tws_softc *sc)
508 {
509
510 u_int32_t max_sg_elements;
511 u_int32_t dma_mem_size;
512 int error;
513 u_int32_t reg;
514
515 sc->seq_id = 0;
516 if ( tws_queue_depth > TWS_MAX_REQS )
517 tws_queue_depth = TWS_MAX_REQS;
518 if (tws_queue_depth < TWS_RESERVED_REQS+1)
519 tws_queue_depth = TWS_RESERVED_REQS+1;
520 sc->is64bit = (sizeof(bus_addr_t) == 8) ? true : false;
521 max_sg_elements = (sc->is64bit && !tws_use_32bit_sgls) ?
522 TWS_MAX_64BIT_SG_ELEMENTS :
523 TWS_MAX_32BIT_SG_ELEMENTS;
524 dma_mem_size = (sizeof(struct tws_command_packet) * tws_queue_depth) +
525 (TWS_SECTOR_SIZE) ;
526 if ( bus_dma_tag_create(bus_get_dma_tag(sc->tws_dev), /* PCI parent */
527 TWS_ALIGNMENT, /* alignment */
528 0, /* boundary */
529 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
530 BUS_SPACE_MAXADDR, /* highaddr */
531 NULL, NULL, /* filter, filterarg */
532 BUS_SPACE_MAXSIZE, /* maxsize */
533 max_sg_elements, /* numsegs */
534 BUS_SPACE_MAXSIZE, /* maxsegsize */
535 0, /* flags */
536 NULL, NULL, /* lockfunc, lockfuncarg */
537 &sc->parent_tag /* tag */
538 )) {
539 TWS_TRACE_DEBUG(sc, "DMA parent tag Create fail", max_sg_elements,
540 sc->is64bit);
541 return(ENOMEM);
542 }
543 /* In bound message frame requires 16byte alignment.
544 * Outbound MF's can live with 4byte alignment - for now just
545 * use 16 for both.
546 */
547 if ( bus_dma_tag_create(sc->parent_tag, /* parent */
548 TWS_IN_MF_ALIGNMENT, /* alignment */
549 0, /* boundary */
550 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
551 BUS_SPACE_MAXADDR, /* highaddr */
552 NULL, NULL, /* filter, filterarg */
553 dma_mem_size, /* maxsize */
554 1, /* numsegs */
555 BUS_SPACE_MAXSIZE, /* maxsegsize */
556 0, /* flags */
557 NULL, NULL, /* lockfunc, lockfuncarg */
558 &sc->cmd_tag /* tag */
559 )) {
560 TWS_TRACE_DEBUG(sc, "DMA cmd tag Create fail", max_sg_elements, sc->is64bit);
561 return(ENOMEM);
562 }
563
564 if (bus_dmamem_alloc(sc->cmd_tag, &sc->dma_mem,
565 BUS_DMA_NOWAIT, &sc->cmd_map)) {
566 TWS_TRACE_DEBUG(sc, "DMA mem alloc fail", max_sg_elements, sc->is64bit);
567 return(ENOMEM);
568 }
569
570 /* if bus_dmamem_alloc succeeds then bus_dmamap_load will succeed */
571 sc->dma_mem_phys=0;
572 error = bus_dmamap_load(sc->cmd_tag, sc->cmd_map, sc->dma_mem,
573 dma_mem_size, tws_dmamap_cmds_load_cbfn,
574 &sc->dma_mem_phys, 0);
575
576 /*
577 * Create a dma tag for data buffers; size will be the maximum
578 * possible I/O size (128kB).
579 */
580 if (bus_dma_tag_create(sc->parent_tag, /* parent */
581 TWS_ALIGNMENT, /* alignment */
582 0, /* boundary */
583 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
584 BUS_SPACE_MAXADDR, /* highaddr */
585 NULL, NULL, /* filter, filterarg */
586 TWS_MAX_IO_SIZE, /* maxsize */
587 max_sg_elements, /* nsegments */
588 TWS_MAX_IO_SIZE, /* maxsegsize */
589 BUS_DMA_ALLOCNOW, /* flags */
590 busdma_lock_mutex, /* lockfunc */
591 &sc->io_lock, /* lockfuncarg */
592 &sc->data_tag /* tag */)) {
593 TWS_TRACE_DEBUG(sc, "DMA cmd tag Create fail", max_sg_elements, sc->is64bit);
594 return(ENOMEM);
595 }
596
597 sc->reqs = malloc(sizeof(struct tws_request) * tws_queue_depth, M_TWS,
598 M_WAITOK | M_ZERO);
599 sc->sense_bufs = malloc(sizeof(struct tws_sense) * tws_queue_depth, M_TWS,
600 M_WAITOK | M_ZERO);
601 sc->scan_ccb = xpt_alloc_ccb();
602 if (bus_dmamem_alloc(sc->data_tag, (void **)&sc->ioctl_data_mem,
603 (BUS_DMA_NOWAIT | BUS_DMA_ZERO), &sc->ioctl_data_map)) {
604 device_printf(sc->tws_dev, "Cannot allocate ioctl data mem\n");
605 return(ENOMEM);
606 }
607
608 if ( !tws_ctlr_ready(sc) )
609 if( !tws_ctlr_reset(sc) )
610 return(FAILURE);
611
612 bzero(&sc->stats, sizeof(struct tws_stats));
613 tws_init_qs(sc);
614 tws_turn_off_interrupts(sc);
615
616 /*
617 * enable pull mode by setting bit1 .
618 * setting bit0 to 1 will enable interrupt coalesing
619 * will revisit.
620 */
621
622 #ifdef TWS_PULL_MODE_ENABLE
623
624 reg = tws_read_reg(sc, TWS_I2O0_CTL, 4);
625 TWS_TRACE_DEBUG(sc, "i20 ctl", reg, TWS_I2O0_CTL);
626 tws_write_reg(sc, TWS_I2O0_CTL, reg | TWS_BIT1, 4);
627
628 #endif
629
630 TWS_TRACE_DEBUG(sc, "dma_mem_phys", sc->dma_mem_phys, TWS_I2O0_CTL);
631 if ( tws_init_reqs(sc, dma_mem_size) == FAILURE )
632 return(FAILURE);
633 if ( tws_init_aen_q(sc) == FAILURE )
634 return(FAILURE);
635
636 return(SUCCESS);
637
638 }
639
640 static int
tws_init_aen_q(struct tws_softc * sc)641 tws_init_aen_q(struct tws_softc *sc)
642 {
643 sc->aen_q.head=0;
644 sc->aen_q.tail=0;
645 sc->aen_q.depth=256;
646 sc->aen_q.overflow=0;
647 sc->aen_q.q = malloc(sizeof(struct tws_event_packet)*sc->aen_q.depth,
648 M_TWS, M_WAITOK | M_ZERO);
649 return(SUCCESS);
650 }
651
652 static int
tws_init_trace_q(struct tws_softc * sc)653 tws_init_trace_q(struct tws_softc *sc)
654 {
655 sc->trace_q.head=0;
656 sc->trace_q.tail=0;
657 sc->trace_q.depth=256;
658 sc->trace_q.overflow=0;
659 sc->trace_q.q = malloc(sizeof(struct tws_trace_rec)*sc->trace_q.depth,
660 M_TWS, M_WAITOK | M_ZERO);
661 return(SUCCESS);
662 }
663
664 static int
tws_init_reqs(struct tws_softc * sc,u_int32_t dma_mem_size)665 tws_init_reqs(struct tws_softc *sc, u_int32_t dma_mem_size)
666 {
667
668 struct tws_command_packet *cmd_buf;
669 cmd_buf = (struct tws_command_packet *)sc->dma_mem;
670 int i;
671
672 bzero(cmd_buf, dma_mem_size);
673 TWS_TRACE_DEBUG(sc, "phy cmd", sc->dma_mem_phys, 0);
674 mtx_lock(&sc->q_lock);
675 for ( i=0; i< tws_queue_depth; i++)
676 {
677 if (bus_dmamap_create(sc->data_tag, 0, &sc->reqs[i].dma_map)) {
678 /* log a ENOMEM failure msg here */
679 mtx_unlock(&sc->q_lock);
680 return(FAILURE);
681 }
682 sc->reqs[i].cmd_pkt = &cmd_buf[i];
683
684 sc->sense_bufs[i].hdr = &cmd_buf[i].hdr ;
685 sc->sense_bufs[i].hdr_pkt_phy = sc->dma_mem_phys +
686 (i * sizeof(struct tws_command_packet));
687
688 sc->reqs[i].cmd_pkt_phy = sc->dma_mem_phys +
689 sizeof(struct tws_command_header) +
690 (i * sizeof(struct tws_command_packet));
691 sc->reqs[i].request_id = i;
692 sc->reqs[i].sc = sc;
693
694 sc->reqs[i].cmd_pkt->hdr.header_desc.size_header = 128;
695
696 callout_init(&sc->reqs[i].timeout, 1);
697 sc->reqs[i].state = TWS_REQ_STATE_FREE;
698 if ( i >= TWS_RESERVED_REQS )
699 tws_q_insert_tail(sc, &sc->reqs[i], TWS_FREE_Q);
700 }
701 mtx_unlock(&sc->q_lock);
702 return(SUCCESS);
703 }
704
705 static void
tws_dmamap_cmds_load_cbfn(void * arg,bus_dma_segment_t * segs,int nseg,int error)706 tws_dmamap_cmds_load_cbfn(void *arg, bus_dma_segment_t *segs,
707 int nseg, int error)
708 {
709
710 /* printf("command load done \n"); */
711
712 *((bus_addr_t *)arg) = segs[0].ds_addr;
713 }
714
715 void
tws_send_event(struct tws_softc * sc,u_int8_t event)716 tws_send_event(struct tws_softc *sc, u_int8_t event)
717 {
718 mtx_assert(&sc->gen_lock, MA_OWNED);
719 TWS_TRACE_DEBUG(sc, "received event ", 0, event);
720 switch (event) {
721 case TWS_INIT_START:
722 sc->tws_state = TWS_INIT;
723 break;
724
725 case TWS_INIT_COMPLETE:
726 if (sc->tws_state != TWS_INIT) {
727 device_printf(sc->tws_dev, "invalid state transition %d => TWS_ONLINE\n", sc->tws_state);
728 } else {
729 sc->tws_state = TWS_ONLINE;
730 }
731 break;
732
733 case TWS_RESET_START:
734 /* We can transition to reset state from any state except reset*/
735 if (sc->tws_state != TWS_RESET) {
736 sc->tws_prev_state = sc->tws_state;
737 sc->tws_state = TWS_RESET;
738 }
739 break;
740
741 case TWS_RESET_COMPLETE:
742 if (sc->tws_state != TWS_RESET) {
743 device_printf(sc->tws_dev, "invalid state transition %d => %d (previous state)\n", sc->tws_state, sc->tws_prev_state);
744 } else {
745 sc->tws_state = sc->tws_prev_state;
746 }
747 break;
748
749 case TWS_SCAN_FAILURE:
750 if (sc->tws_state != TWS_ONLINE) {
751 device_printf(sc->tws_dev, "invalid state transition %d => TWS_OFFLINE\n", sc->tws_state);
752 } else {
753 sc->tws_state = TWS_OFFLINE;
754 }
755 break;
756
757 case TWS_UNINIT_START:
758 if ((sc->tws_state != TWS_ONLINE) && (sc->tws_state != TWS_OFFLINE)) {
759 device_printf(sc->tws_dev, "invalid state transition %d => TWS_UNINIT\n", sc->tws_state);
760 } else {
761 sc->tws_state = TWS_UNINIT;
762 }
763 break;
764 }
765
766 }
767
768 uint8_t
tws_get_state(struct tws_softc * sc)769 tws_get_state(struct tws_softc *sc)
770 {
771
772 return((u_int8_t)sc->tws_state);
773
774 }
775
776 /* Called during system shutdown after sync. */
777
778 static int
tws_shutdown(device_t dev)779 tws_shutdown(device_t dev)
780 {
781
782 struct tws_softc *sc = device_get_softc(dev);
783
784 TWS_TRACE_DEBUG(sc, "entry", 0, 0);
785
786 tws_turn_off_interrupts(sc);
787 tws_init_connect(sc, 1);
788
789 return (0);
790 }
791
792 /*
793 * Device suspend routine.
794 */
795 static int
tws_suspend(device_t dev)796 tws_suspend(device_t dev)
797 {
798 struct tws_softc *sc = device_get_softc(dev);
799
800 if ( sc )
801 TWS_TRACE_DEBUG(sc, "entry", 0, 0);
802 return (0);
803 }
804
805 /*
806 * Device resume routine.
807 */
808 static int
tws_resume(device_t dev)809 tws_resume(device_t dev)
810 {
811
812 struct tws_softc *sc = device_get_softc(dev);
813
814 if ( sc )
815 TWS_TRACE_DEBUG(sc, "entry", 0, 0);
816 return (0);
817 }
818
819 struct tws_request *
tws_get_request(struct tws_softc * sc,u_int16_t type)820 tws_get_request(struct tws_softc *sc, u_int16_t type)
821 {
822 struct mtx *my_mutex = ((type == TWS_REQ_TYPE_SCSI_IO) ? &sc->q_lock : &sc->gen_lock);
823 struct tws_request *r = NULL;
824
825 mtx_lock(my_mutex);
826
827 if (type == TWS_REQ_TYPE_SCSI_IO) {
828 r = tws_q_remove_head(sc, TWS_FREE_Q);
829 } else {
830 if ( sc->reqs[type].state == TWS_REQ_STATE_FREE ) {
831 r = &sc->reqs[type];
832 }
833 }
834
835 if ( r ) {
836 bzero(&r->cmd_pkt->cmd, sizeof(struct tws_command_apache));
837 r->data = NULL;
838 r->length = 0;
839 r->type = type;
840 r->flags = TWS_DIR_UNKNOWN;
841 r->error_code = TWS_REQ_RET_INVALID;
842 r->cb = NULL;
843 r->ccb_ptr = NULL;
844 callout_stop(&r->timeout);
845 r->next = r->prev = NULL;
846
847 r->state = ((type == TWS_REQ_TYPE_SCSI_IO) ? TWS_REQ_STATE_TRAN : TWS_REQ_STATE_BUSY);
848 }
849
850 mtx_unlock(my_mutex);
851
852 return(r);
853 }
854
855 void
tws_release_request(struct tws_request * req)856 tws_release_request(struct tws_request *req)
857 {
858
859 struct tws_softc *sc = req->sc;
860
861 TWS_TRACE_DEBUG(sc, "entry", sc, 0);
862 mtx_lock(&sc->q_lock);
863 tws_q_insert_tail(sc, req, TWS_FREE_Q);
864 mtx_unlock(&sc->q_lock);
865 }
866
867 static device_method_t tws_methods[] = {
868 /* Device interface */
869 DEVMETHOD(device_probe, tws_probe),
870 DEVMETHOD(device_attach, tws_attach),
871 DEVMETHOD(device_detach, tws_detach),
872 DEVMETHOD(device_shutdown, tws_shutdown),
873 DEVMETHOD(device_suspend, tws_suspend),
874 DEVMETHOD(device_resume, tws_resume),
875
876 DEVMETHOD_END
877 };
878
879 static driver_t tws_driver = {
880 "tws",
881 tws_methods,
882 sizeof(struct tws_softc)
883 };
884
885 static devclass_t tws_devclass;
886
887 /* DEFINE_CLASS_0(tws, tws_driver, tws_methods, sizeof(struct tws_softc)); */
888 DRIVER_MODULE(tws, pci, tws_driver, tws_devclass, 0, 0);
889 MODULE_DEPEND(tws, cam, 1, 1, 1);
890 MODULE_DEPEND(tws, pci, 1, 1, 1);
891
892 TUNABLE_INT("hw.tws.queue_depth", &tws_queue_depth);
893 TUNABLE_INT("hw.tws.enable_msi", &tws_enable_msi);
894