xref: /freebsd-14.2/sys/dev/mpi3mr/mpi3mr_app.c (revision 1d8eda2e)
1 /*
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2020-2023, Broadcom Inc. All rights reserved.
5  * Support: <[email protected]>
6  *
7  * Authors: Sumit Saxena <[email protected]>
8  *	    Chandrakanth Patil <[email protected]>
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions are
12  * met:
13  *
14  * 1. Redistributions of source code must retain the above copyright notice,
15  *    this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright notice,
17  *    this list of conditions and the following disclaimer in the documentation and/or other
18  *    materials provided with the distribution.
19  * 3. Neither the name of the Broadcom Inc. nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software without
21  *    specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33  * POSSIBILITY OF SUCH DAMAGE.
34  *
35  * The views and conclusions contained in the software and documentation are
36  * those of the authors and should not be interpreted as representing
37  * official policies,either expressed or implied, of the FreeBSD Project.
38  *
39  * Mail to: Broadcom Inc 1320 Ridder Park Dr, San Jose, CA 95131
40  *
41  * Broadcom Inc. (Broadcom) MPI3MR Adapter FreeBSD
42  */
43 
44 #include <sys/cdefs.h>
45 #include <sys/param.h>
46 #include <sys/proc.h>
47 #include <cam/cam.h>
48 #include <cam/cam_ccb.h>
49 #include "mpi3mr_cam.h"
50 #include "mpi3mr_app.h"
51 #include "mpi3mr.h"
52 
53 static d_open_t		mpi3mr_open;
54 static d_close_t	mpi3mr_close;
55 static d_ioctl_t	mpi3mr_ioctl;
56 static d_poll_t		mpi3mr_poll;
57 
58 static struct cdevsw mpi3mr_cdevsw = {
59 	.d_version =	D_VERSION,
60 	.d_flags =	0,
61 	.d_open =	mpi3mr_open,
62 	.d_close =	mpi3mr_close,
63 	.d_ioctl =	mpi3mr_ioctl,
64 	.d_poll =	mpi3mr_poll,
65 	.d_name =	"mpi3mr",
66 };
67 
68 static struct mpi3mr_mgmt_info mpi3mr_mgmt_info;
69 
70 static int
mpi3mr_open(struct cdev * dev,int flags,int fmt,struct thread * td)71 mpi3mr_open(struct cdev *dev, int flags, int fmt, struct thread *td)
72 {
73 
74 	return (0);
75 }
76 
77 static int
mpi3mr_close(struct cdev * dev,int flags,int fmt,struct thread * td)78 mpi3mr_close(struct cdev *dev, int flags, int fmt, struct thread *td)
79 {
80 
81 	return (0);
82 }
83 
84 /*
85  * mpi3mr_app_attach - Char device registration
86  * @sc: Adapter reference
87  *
88  * This function does char device registration.
89  *
90  * Return: 0 on success and proper error codes on failure
91  */
92 int
mpi3mr_app_attach(struct mpi3mr_softc * sc)93 mpi3mr_app_attach(struct mpi3mr_softc *sc)
94 {
95 
96 	/* Create a /dev entry for Avenger controller */
97 	sc->mpi3mr_cdev = make_dev(&mpi3mr_cdevsw, device_get_unit(sc->mpi3mr_dev),
98 				   UID_ROOT, GID_OPERATOR, 0640, "mpi3mr%d",
99 				   device_get_unit(sc->mpi3mr_dev));
100 
101 	if (sc->mpi3mr_cdev == NULL)
102 		return (ENOMEM);
103 
104 	sc->mpi3mr_cdev->si_drv1 = sc;
105 
106 	/* Assign controller instance to mgmt_info structure */
107 	if (device_get_unit(sc->mpi3mr_dev) == 0)
108 		memset(&mpi3mr_mgmt_info, 0, sizeof(mpi3mr_mgmt_info));
109 	mpi3mr_mgmt_info.count++;
110 	mpi3mr_mgmt_info.sc_ptr[mpi3mr_mgmt_info.max_index] = sc;
111 	mpi3mr_mgmt_info.max_index++;
112 
113 	return (0);
114 }
115 
116 void
mpi3mr_app_detach(struct mpi3mr_softc * sc)117 mpi3mr_app_detach(struct mpi3mr_softc *sc)
118 {
119 	U8 i = 0;
120 
121 	if (sc->mpi3mr_cdev == NULL)
122 		return;
123 
124 	destroy_dev(sc->mpi3mr_cdev);
125 	for (i = 0; i < mpi3mr_mgmt_info.max_index; i++) {
126 		if (mpi3mr_mgmt_info.sc_ptr[i] == sc) {
127 			mpi3mr_mgmt_info.count--;
128 			mpi3mr_mgmt_info.sc_ptr[i] = NULL;
129 			break;
130 		}
131 	}
132 	return;
133 }
134 
135 static int
mpi3mr_poll(struct cdev * dev,int poll_events,struct thread * td)136 mpi3mr_poll(struct cdev *dev, int poll_events, struct thread *td)
137 {
138 	int revents = 0;
139 	struct mpi3mr_softc *sc = NULL;
140 	sc = dev->si_drv1;
141 
142 	if ((poll_events & (POLLIN | POLLRDNORM)) &&
143 	    (sc->mpi3mr_aen_triggered))
144 		revents |= poll_events & (POLLIN | POLLRDNORM);
145 
146 	if (revents == 0) {
147 		if (poll_events & (POLLIN | POLLRDNORM)) {
148 			sc->mpi3mr_poll_waiting = 1;
149 			selrecord(td, &sc->mpi3mr_select);
150 		}
151 	}
152 	return revents;
153 }
154 
155 /**
156  * mpi3mr_app_get_adp_instancs - Get Adapter instance
157  * @mrioc_id: Adapter ID
158  *
159  * This fucnction searches the Adapter reference with mrioc_id
160  * upon found, returns the adapter reference otherwise returns
161  * the NULL
162  *
163  * Return: Adapter reference on success and NULL on failure
164  */
165 static struct mpi3mr_softc *
mpi3mr_app_get_adp_instance(U8 mrioc_id)166 mpi3mr_app_get_adp_instance(U8 mrioc_id)
167 {
168 	struct mpi3mr_softc *sc = NULL;
169 
170 	if (mrioc_id >= mpi3mr_mgmt_info.max_index)
171 		return NULL;
172 
173 	sc = mpi3mr_mgmt_info.sc_ptr[mrioc_id];
174 	return sc;
175 }
176 
177 static int
mpi3mr_app_construct_nvme_sgl(struct mpi3mr_softc * sc,Mpi3NVMeEncapsulatedRequest_t * nvme_encap_request,struct mpi3mr_ioctl_mpt_dma_buffer * dma_buffers,U8 bufcnt)178 mpi3mr_app_construct_nvme_sgl(struct mpi3mr_softc *sc,
179 			      Mpi3NVMeEncapsulatedRequest_t *nvme_encap_request,
180 			      struct mpi3mr_ioctl_mpt_dma_buffer *dma_buffers, U8 bufcnt)
181 {
182 	struct mpi3mr_nvme_pt_sge *nvme_sgl;
183 	U64 sgl_dma;
184 	U8 count;
185 	U16 available_sges = 0, i;
186 	U32 sge_element_size = sizeof(struct mpi3mr_nvme_pt_sge);
187 	size_t length = 0;
188 	struct mpi3mr_ioctl_mpt_dma_buffer *dma_buff = dma_buffers;
189 	U64 sgemod_mask = ((U64)((sc->facts.sge_mod_mask) <<
190 				 sc->facts.sge_mod_shift) << 32);
191 	U64 sgemod_val = ((U64)(sc->facts.sge_mod_value) <<
192 				sc->facts.sge_mod_shift) << 32;
193 
194 	U32 size;
195 
196 	nvme_sgl = (struct mpi3mr_nvme_pt_sge *)
197 		    ((U8 *)(nvme_encap_request->Command) + MPI3MR_NVME_CMD_SGL_OFFSET);
198 
199 	/*
200 	 * Not all commands require a data transfer. If no data, just return
201 	 * without constructing any SGL.
202 	 */
203 	for (count = 0; count < bufcnt; count++, dma_buff++) {
204 		if ((dma_buff->data_dir == MPI3MR_APP_DDI) ||
205 		    (dma_buff->data_dir == MPI3MR_APP_DDO)) {
206 			length = dma_buff->kern_buf_len;
207 			break;
208 		}
209 	}
210 	if (!length || !dma_buff->num_dma_desc)
211 		return 0;
212 
213 	if (dma_buff->num_dma_desc == 1) {
214 		available_sges = 1;
215 		goto build_sges;
216 	}
217 	sgl_dma = (U64)sc->ioctl_chain_sge.dma_addr;
218 
219 	if (sgl_dma & sgemod_mask) {
220 		printf(IOCNAME "NVMe SGL address collides with SGEModifier\n",sc->name);
221 		return -1;
222 	}
223 
224 	sgl_dma &= ~sgemod_mask;
225 	sgl_dma |= sgemod_val;
226 
227 	memset(sc->ioctl_chain_sge.addr, 0, sc->ioctl_chain_sge.size);
228 	available_sges = sc->ioctl_chain_sge.size / sge_element_size;
229 	if (available_sges < dma_buff->num_dma_desc)
230 		return -1;
231 	memset(nvme_sgl, 0, sizeof(struct mpi3mr_nvme_pt_sge));
232 	nvme_sgl->base_addr = sgl_dma;
233 	size = dma_buff->num_dma_desc * sizeof(struct mpi3mr_nvme_pt_sge);
234 	nvme_sgl->length = htole32(size);
235 	nvme_sgl->type = MPI3MR_NVMESGL_LAST_SEGMENT;
236 
237 	nvme_sgl = (struct mpi3mr_nvme_pt_sge *) sc->ioctl_chain_sge.addr;
238 
239 build_sges:
240 	for (i = 0; i < dma_buff->num_dma_desc; i++) {
241 		sgl_dma = htole64(dma_buff->dma_desc[i].dma_addr);
242 		if (sgl_dma & sgemod_mask) {
243 			printf("%s: SGL address collides with SGE modifier\n",
244 			       __func__);
245 		return -1;
246 		}
247 
248 		sgl_dma &= ~sgemod_mask;
249 		sgl_dma |= sgemod_val;
250 
251 		nvme_sgl->base_addr = sgl_dma;
252 		nvme_sgl->length = htole32(dma_buff->dma_desc[i].size);
253 		nvme_sgl->type = MPI3MR_NVMESGL_DATA_SEGMENT;
254 		nvme_sgl++;
255 		available_sges--;
256 	}
257 
258 	return 0;
259 }
260 
261 static int
mpi3mr_app_build_nvme_prp(struct mpi3mr_softc * sc,Mpi3NVMeEncapsulatedRequest_t * nvme_encap_request,struct mpi3mr_ioctl_mpt_dma_buffer * dma_buffers,U8 bufcnt)262 mpi3mr_app_build_nvme_prp(struct mpi3mr_softc *sc,
263 			  Mpi3NVMeEncapsulatedRequest_t *nvme_encap_request,
264 			  struct mpi3mr_ioctl_mpt_dma_buffer *dma_buffers, U8 bufcnt)
265 {
266 	int prp_size = MPI3MR_NVME_PRP_SIZE;
267 	U64 *prp_entry, *prp1_entry, *prp2_entry;
268 	U64 *prp_page;
269 	bus_addr_t prp_entry_dma, prp_page_dma, dma_addr;
270 	U32 offset, entry_len, dev_pgsz;
271 	U32 page_mask_result, page_mask;
272 	size_t length = 0, desc_len;
273 	U8 count;
274 	struct mpi3mr_ioctl_mpt_dma_buffer *dma_buff = dma_buffers;
275 	U64 sgemod_mask = ((U64)((sc->facts.sge_mod_mask) <<
276 			    sc->facts.sge_mod_shift) << 32);
277 	U64 sgemod_val = ((U64)(sc->facts.sge_mod_value) <<
278 			  sc->facts.sge_mod_shift) << 32;
279 	U16 dev_handle = nvme_encap_request->DevHandle;
280 	struct mpi3mr_target *tgtdev;
281 	U16 desc_count = 0;
282 
283 	tgtdev = mpi3mr_find_target_by_dev_handle(sc->cam_sc, dev_handle);
284 	if (!tgtdev) {
285 		printf(IOCNAME "EncapNVMe Error: Invalid DevHandle 0x%02x\n", sc->name,
286 		       dev_handle);
287 		return -1;
288 	}
289 	if (tgtdev->dev_spec.pcie_inf.pgsz == 0) {
290 		printf(IOCNAME "%s: NVME device page size is zero for handle 0x%04x\n",
291 		       sc->name, __func__, dev_handle);
292 		return -1;
293 	}
294 	dev_pgsz = 1 << (tgtdev->dev_spec.pcie_inf.pgsz);
295 
296 	page_mask = dev_pgsz - 1;
297 
298 	if (dev_pgsz > MPI3MR_IOCTL_SGE_SIZE){
299 		printf("%s: NVMe device page size(%d) is greater than ioctl data sge size(%d) for handle 0x%04x\n",
300 		       __func__, dev_pgsz,  MPI3MR_IOCTL_SGE_SIZE, dev_handle);
301 		return -1;
302 	}
303 
304 	if (MPI3MR_IOCTL_SGE_SIZE % dev_pgsz){
305 		printf("%s: ioctl data sge size(%d) is not a multiple of NVMe device page size(%d) for handle 0x%04x\n",
306 		       __func__, MPI3MR_IOCTL_SGE_SIZE, dev_pgsz, dev_handle);
307 		return -1;
308 	}
309 
310 	/*
311 	 * Not all commands require a data transfer. If no data, just return
312 	 * without constructing any PRP.
313 	 */
314 	for (count = 0; count < bufcnt; count++, dma_buff++) {
315 		if ((dma_buff->data_dir == MPI3MR_APP_DDI) ||
316 		    (dma_buff->data_dir == MPI3MR_APP_DDO)) {
317 			length = dma_buff->kern_buf_len;
318 			break;
319 		}
320 	}
321 	if (!length || !dma_buff->num_dma_desc)
322 		return 0;
323 
324 	for (count = 0; count < dma_buff->num_dma_desc; count++) {
325 		dma_addr = dma_buff->dma_desc[count].dma_addr;
326 		if (dma_addr & page_mask) {
327 			printf("%s:dma_addr 0x%lu is not aligned with page size 0x%x\n",
328 			       __func__,  dma_addr, dev_pgsz);
329 			return -1;
330 		}
331 	}
332 
333 	dma_addr = dma_buff->dma_desc[0].dma_addr;
334 	desc_len = dma_buff->dma_desc[0].size;
335 
336 	sc->nvme_encap_prp_sz = 0;
337 	if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,		/* parent */
338 				4, 0,				/* algnmnt, boundary */
339 				sc->dma_loaddr,			/* lowaddr */
340 				BUS_SPACE_MAXADDR,		/* highaddr */
341 				NULL, NULL,			/* filter, filterarg */
342 				dev_pgsz,			/* maxsize */
343                                 1,				/* nsegments */
344 				dev_pgsz,			/* maxsegsize */
345                                 0,				/* flags */
346                                 NULL, NULL,			/* lockfunc, lockarg */
347 				&sc->nvme_encap_prp_list_dmatag)) {
348 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot create ioctl NVME kernel buffer dma tag\n");
349 		return (ENOMEM);
350         }
351 
352 	if (bus_dmamem_alloc(sc->nvme_encap_prp_list_dmatag, (void **)&sc->nvme_encap_prp_list,
353 			     BUS_DMA_NOWAIT, &sc->nvme_encap_prp_list_dma_dmamap)) {
354 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate ioctl NVME dma memory\n");
355 		return (ENOMEM);
356         }
357 
358 	bzero(sc->nvme_encap_prp_list, dev_pgsz);
359 	bus_dmamap_load(sc->nvme_encap_prp_list_dmatag, sc->nvme_encap_prp_list_dma_dmamap,
360 			sc->nvme_encap_prp_list, dev_pgsz, mpi3mr_memaddr_cb, &sc->nvme_encap_prp_list_dma,
361 			BUS_DMA_NOWAIT);
362 
363 	if (!sc->nvme_encap_prp_list) {
364 		printf(IOCNAME "%s:%d Cannot load ioctl NVME dma memory for size: %d\n", sc->name,
365 		       __func__, __LINE__, dev_pgsz);
366 		goto err_out;
367 	}
368 	sc->nvme_encap_prp_sz = dev_pgsz;
369 
370 	/*
371 	 * Set pointers to PRP1 and PRP2, which are in the NVMe command.
372 	 * PRP1 is located at a 24 byte offset from the start of the NVMe
373 	 * command.  Then set the current PRP entry pointer to PRP1.
374 	 */
375 	prp1_entry = (U64 *)((U8 *)(nvme_encap_request->Command) + MPI3MR_NVME_CMD_PRP1_OFFSET);
376 	prp2_entry = (U64 *)((U8 *)(nvme_encap_request->Command) + MPI3MR_NVME_CMD_PRP2_OFFSET);
377 	prp_entry = prp1_entry;
378 	/*
379 	 * For the PRP entries, use the specially allocated buffer of
380 	 * contiguous memory.
381 	 */
382 	prp_page = sc->nvme_encap_prp_list;
383 	prp_page_dma = sc->nvme_encap_prp_list_dma;
384 
385 	/*
386 	 * Check if we are within 1 entry of a page boundary we don't
387 	 * want our first entry to be a PRP List entry.
388 	 */
389 	page_mask_result = (uintptr_t)((U8 *)prp_page + prp_size) & page_mask;
390 	if (!page_mask_result) {
391 		printf(IOCNAME "PRP Page is not page aligned\n", sc->name);
392 		goto err_out;
393 	}
394 
395 	/*
396 	 * Set PRP physical pointer, which initially points to the current PRP
397 	 * DMA memory page.
398 	 */
399 	prp_entry_dma = prp_page_dma;
400 
401 
402 	/* Loop while the length is not zero. */
403 	while (length) {
404 		page_mask_result = (prp_entry_dma + prp_size) & page_mask;
405 		if (!page_mask_result && (length >  dev_pgsz)) {
406 			printf(IOCNAME "Single PRP page is not sufficient\n", sc->name);
407 			goto err_out;
408 		}
409 
410 		/* Need to handle if entry will be part of a page. */
411 		offset = dma_addr & page_mask;
412 		entry_len = dev_pgsz - offset;
413 
414 		if (prp_entry == prp1_entry) {
415 			/*
416 			 * Must fill in the first PRP pointer (PRP1) before
417 			 * moving on.
418 			 */
419 			*prp1_entry = dma_addr;
420 			if (*prp1_entry & sgemod_mask) {
421 				printf(IOCNAME "PRP1 address collides with SGEModifier\n", sc->name);
422 				goto err_out;
423 			}
424 			*prp1_entry &= ~sgemod_mask;
425 			*prp1_entry |= sgemod_val;
426 
427 			/*
428 			 * Now point to the second PRP entry within the
429 			 * command (PRP2).
430 			 */
431 			prp_entry = prp2_entry;
432 		} else if (prp_entry == prp2_entry) {
433 			/*
434 			 * Should the PRP2 entry be a PRP List pointer or just
435 			 * a regular PRP pointer?  If there is more than one
436 			 * more page of data, must use a PRP List pointer.
437 			 */
438 			if (length > dev_pgsz) {
439 				/*
440 				 * PRP2 will contain a PRP List pointer because
441 				 * more PRP's are needed with this command. The
442 				 * list will start at the beginning of the
443 				 * contiguous buffer.
444 				 */
445 				*prp2_entry = prp_entry_dma;
446 				if (*prp2_entry & sgemod_mask) {
447 					printf(IOCNAME "PRP list address collides with SGEModifier\n", sc->name);
448 					goto err_out;
449 				}
450 				*prp2_entry &= ~sgemod_mask;
451 				*prp2_entry |= sgemod_val;
452 
453 				/*
454 				 * The next PRP Entry will be the start of the
455 				 * first PRP List.
456 				 */
457 				prp_entry = prp_page;
458 				continue;
459 			} else {
460 				/*
461 				 * After this, the PRP Entries are complete.
462 				 * This command uses 2 PRP's and no PRP list.
463 				 */
464 				*prp2_entry = dma_addr;
465 				if (*prp2_entry & sgemod_mask) {
466 					printf(IOCNAME "PRP2 address collides with SGEModifier\n", sc->name);
467 					goto err_out;
468 				}
469 				*prp2_entry &= ~sgemod_mask;
470 				*prp2_entry |= sgemod_val;
471 			}
472 		} else {
473 			/*
474 			 * Put entry in list and bump the addresses.
475 			 *
476 			 * After PRP1 and PRP2 are filled in, this will fill in
477 			 * all remaining PRP entries in a PRP List, one per
478 			 * each time through the loop.
479 			 */
480 			*prp_entry = dma_addr;
481 			if (*prp_entry & sgemod_mask) {
482 				printf(IOCNAME "PRP address collides with SGEModifier\n", sc->name);
483 				goto err_out;
484 			}
485 			*prp_entry &= ~sgemod_mask;
486 			*prp_entry |= sgemod_val;
487 			prp_entry++;
488 			prp_entry_dma += prp_size;
489 		}
490 
491 		/* Decrement length accounting for last partial page. */
492 		if (entry_len >= length)
493 			length = 0;
494 		else {
495 			if (entry_len <= desc_len) {
496 				dma_addr += entry_len;
497 				desc_len -= entry_len;
498 			}
499 			if (!desc_len) {
500 				if ((++desc_count) >=
501 				   dma_buff->num_dma_desc) {
502 					printf("%s: Invalid len %ld while building PRP\n",
503 					       __func__, length);
504 					goto err_out;
505 				}
506 				dma_addr =
507 				    dma_buff->dma_desc[desc_count].dma_addr;
508 				desc_len =
509 				    dma_buff->dma_desc[desc_count].size;
510 			}
511 			length -= entry_len;
512 		}
513 	}
514 	return 0;
515 err_out:
516 	if (sc->nvme_encap_prp_list && sc->nvme_encap_prp_list_dma) {
517 		bus_dmamap_unload(sc->nvme_encap_prp_list_dmatag, sc->nvme_encap_prp_list_dma_dmamap);
518 		bus_dmamem_free(sc->nvme_encap_prp_list_dmatag, sc->nvme_encap_prp_list, sc->nvme_encap_prp_list_dma_dmamap);
519 		bus_dma_tag_destroy(sc->nvme_encap_prp_list_dmatag);
520 		sc->nvme_encap_prp_list = NULL;
521 	}
522 	return -1;
523 }
524 
525  /**
526 + * mpi3mr_map_data_buffer_dma - build dma descriptors for data
527 + *                              buffers
528 + * @sc: Adapter instance reference
529 + * @dma_buff: buffer map descriptor
530 + * @desc_count: Number of already consumed dma descriptors
531 + *
532 + * This function computes how many pre-allocated DMA descriptors
533 + * are required for the given data buffer and if those number of
534 + * descriptors are free, then setup the mapping of the scattered
535 + * DMA address to the given data buffer, if the data direction
536 + * of the buffer is DATA_OUT then the actual data is copied to
537 + * the DMA buffers
538 + *
539 + * Return: 0 on success, -1 on failure
540 + */
mpi3mr_map_data_buffer_dma(struct mpi3mr_softc * sc,struct mpi3mr_ioctl_mpt_dma_buffer * dma_buffers,U8 desc_count)541 static int mpi3mr_map_data_buffer_dma(struct mpi3mr_softc *sc,
542 				      struct mpi3mr_ioctl_mpt_dma_buffer *dma_buffers,
543 				      U8 desc_count)
544 {
545 	U16 i, needed_desc = (dma_buffers->kern_buf_len / MPI3MR_IOCTL_SGE_SIZE);
546 	U32 buf_len = dma_buffers->kern_buf_len, copied_len = 0;
547 	int error;
548 
549 	if (dma_buffers->kern_buf_len % MPI3MR_IOCTL_SGE_SIZE)
550 		needed_desc++;
551 
552 	if ((needed_desc + desc_count) > MPI3MR_NUM_IOCTL_SGE) {
553 		printf("%s: DMA descriptor mapping error %d:%d:%d\n",
554 		       __func__, needed_desc, desc_count, MPI3MR_NUM_IOCTL_SGE);
555 		return -1;
556 	}
557 
558 	dma_buffers->dma_desc = malloc(sizeof(*dma_buffers->dma_desc) * needed_desc,
559 				       M_MPI3MR, M_NOWAIT | M_ZERO);
560 	if (!dma_buffers->dma_desc)
561 		return -1;
562 
563 	error = 0;
564 	for (i = 0; i < needed_desc; i++, desc_count++) {
565 
566 		dma_buffers->dma_desc[i].addr = sc->ioctl_sge[desc_count].addr;
567 		dma_buffers->dma_desc[i].dma_addr = sc->ioctl_sge[desc_count].dma_addr;
568 
569 		if (buf_len < sc->ioctl_sge[desc_count].size)
570 			dma_buffers->dma_desc[i].size = buf_len;
571 		else
572 			dma_buffers->dma_desc[i].size = sc->ioctl_sge[desc_count].size;
573 
574 		buf_len -= dma_buffers->dma_desc[i].size;
575 		memset(dma_buffers->dma_desc[i].addr, 0, sc->ioctl_sge[desc_count].size);
576 
577 		if (dma_buffers->data_dir == MPI3MR_APP_DDO) {
578 			error = copyin(((U8 *)dma_buffers->user_buf + copied_len),
579 			       dma_buffers->dma_desc[i].addr,
580 			       dma_buffers->dma_desc[i].size);
581 			if (error != 0)
582 				break;
583 			copied_len += dma_buffers->dma_desc[i].size;
584 		}
585 	}
586 	if (error != 0) {
587 		printf("%s: DMA copyin error %d\n", __func__, error);
588 		free(dma_buffers->dma_desc, M_MPI3MR);
589 		return -1;
590 	}
591 
592 	dma_buffers->num_dma_desc = needed_desc;
593 
594 	return 0;
595 }
596 
597 static unsigned int
mpi3mr_app_get_nvme_data_fmt(Mpi3NVMeEncapsulatedRequest_t * nvme_encap_request)598 mpi3mr_app_get_nvme_data_fmt(Mpi3NVMeEncapsulatedRequest_t *nvme_encap_request)
599 {
600 	U8 format = 0;
601 
602 	format = ((nvme_encap_request->Command[0] & 0xc000) >> 14);
603 	return format;
604 }
605 
mpi3mr_total_num_ioctl_sges(struct mpi3mr_ioctl_mpt_dma_buffer * dma_buffers,U8 bufcnt)606 static inline U16 mpi3mr_total_num_ioctl_sges(struct mpi3mr_ioctl_mpt_dma_buffer *dma_buffers,
607 					      U8 bufcnt)
608 {
609 	U16 i, sge_count = 0;
610 	for (i=0; i < bufcnt; i++, dma_buffers++) {
611 		if ((dma_buffers->data_dir == MPI3MR_APP_DDN) ||
612 		    dma_buffers->kern_buf)
613 			continue;
614 		sge_count += dma_buffers->num_dma_desc;
615 		if (!dma_buffers->num_dma_desc)
616 			sge_count++;
617 	}
618 	return sge_count;
619 }
620 
621 static int
mpi3mr_app_construct_sgl(struct mpi3mr_softc * sc,U8 * mpi_request,U32 sgl_offset,struct mpi3mr_ioctl_mpt_dma_buffer * dma_buffers,U8 bufcnt,U8 is_rmc,U8 is_rmr,U8 num_datasges)622 mpi3mr_app_construct_sgl(struct mpi3mr_softc *sc, U8 *mpi_request, U32 sgl_offset,
623 			 struct mpi3mr_ioctl_mpt_dma_buffer *dma_buffers,
624 			 U8 bufcnt, U8 is_rmc, U8 is_rmr, U8 num_datasges)
625 {
626 	U8 *sgl = (mpi_request + sgl_offset), count = 0;
627 	Mpi3RequestHeader_t *mpi_header = (Mpi3RequestHeader_t *)mpi_request;
628 	Mpi3MgmtPassthroughRequest_t *rmgmt_req =
629 		(Mpi3MgmtPassthroughRequest_t *)mpi_request;
630 	struct mpi3mr_ioctl_mpt_dma_buffer *dma_buff = dma_buffers;
631 	U8 flag, sgl_flags, sgl_flags_eob, sgl_flags_last, last_chain_sgl_flags;
632 	U16 available_sges, i, sges_needed;
633 	U32 sge_element_size = sizeof(struct _MPI3_SGE_COMMON);
634 	bool chain_used = false;
635 
636 	sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE |
637 		MPI3_SGE_FLAGS_DLAS_SYSTEM ;
638 	sgl_flags_eob = sgl_flags | MPI3_SGE_FLAGS_END_OF_BUFFER;
639 	sgl_flags_last = sgl_flags_eob | MPI3_SGE_FLAGS_END_OF_LIST;
640 	last_chain_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_LAST_CHAIN |
641 	    MPI3_SGE_FLAGS_DLAS_SYSTEM;
642 
643 	sges_needed = mpi3mr_total_num_ioctl_sges(dma_buffers, bufcnt);
644 
645 	if (is_rmc) {
646 		mpi3mr_add_sg_single(&rmgmt_req->CommandSGL,
647 		    sgl_flags_last, dma_buff->kern_buf_len,
648 		    dma_buff->kern_buf_dma);
649 		sgl = (U8 *) dma_buff->kern_buf + dma_buff->user_buf_len;
650 		available_sges = (dma_buff->kern_buf_len -
651 		    dma_buff->user_buf_len) / sge_element_size;
652 		if (sges_needed > available_sges)
653 			return -1;
654 		chain_used = true;
655 		dma_buff++;
656 		count++;
657 		if (is_rmr) {
658 			mpi3mr_add_sg_single(&rmgmt_req->ResponseSGL,
659 			    sgl_flags_last, dma_buff->kern_buf_len,
660 			    dma_buff->kern_buf_dma);
661 			dma_buff++;
662 			count++;
663 		} else
664 			mpi3mr_build_zero_len_sge(
665 			    &rmgmt_req->ResponseSGL);
666 		if (num_datasges) {
667 			i = 0;
668 			goto build_sges;
669 		}
670 	} else {
671 		if (sgl_offset >= MPI3MR_AREQ_FRAME_SZ)
672 			return -1;
673 		available_sges = (MPI3MR_AREQ_FRAME_SZ - sgl_offset) /
674 		    sge_element_size;
675 		if (!available_sges)
676 			return -1;
677 	}
678 
679 	if (!num_datasges) {
680 		mpi3mr_build_zero_len_sge(sgl);
681 		return 0;
682 	}
683 
684 	if (mpi_header->Function == MPI3_FUNCTION_SMP_PASSTHROUGH) {
685 		if ((sges_needed > 2) || (sges_needed > available_sges))
686 			return -1;
687 		for (; count < bufcnt; count++, dma_buff++) {
688 			if ((dma_buff->data_dir == MPI3MR_APP_DDN) ||
689 			    !dma_buff->num_dma_desc)
690 				continue;
691 			mpi3mr_add_sg_single(sgl, sgl_flags_last,
692 			    dma_buff->dma_desc[0].size,
693 			    dma_buff->dma_desc[0].dma_addr);
694 			sgl += sge_element_size;
695 		}
696 		return 0;
697 	}
698 	i = 0;
699 
700 build_sges:
701 	for (; count < bufcnt; count++, dma_buff++) {
702 		if (dma_buff->data_dir == MPI3MR_APP_DDN)
703 			continue;
704 		if (!dma_buff->num_dma_desc) {
705 			if (chain_used && !available_sges)
706 				return -1;
707 			if (!chain_used && (available_sges == 1) &&
708 			    (sges_needed > 1))
709 				goto setup_chain;
710 			flag = sgl_flags_eob;
711 			if (num_datasges == 1)
712 				flag = sgl_flags_last;
713 			mpi3mr_add_sg_single(sgl, flag, 0, 0);
714 			sgl += sge_element_size;
715 			available_sges--;
716 			sges_needed--;
717 			num_datasges--;
718 			continue;
719 		}
720 		for (; i < dma_buff->num_dma_desc; i++) {
721 			if (chain_used && !available_sges)
722 				return -1;
723 			if (!chain_used && (available_sges == 1) &&
724 			    (sges_needed > 1))
725 				goto setup_chain;
726 			flag = sgl_flags;
727 			if (i == (dma_buff->num_dma_desc - 1)) {
728 				if (num_datasges == 1)
729 					flag = sgl_flags_last;
730 				else
731 					flag = sgl_flags_eob;
732 			}
733 
734 			mpi3mr_add_sg_single(sgl, flag,
735 			    dma_buff->dma_desc[i].size,
736 			    dma_buff->dma_desc[i].dma_addr);
737 			sgl += sge_element_size;
738 			available_sges--;
739 			sges_needed--;
740 		}
741 		num_datasges--;
742 		i = 0;
743 	}
744 	return 0;
745 
746 setup_chain:
747 	available_sges = sc->ioctl_chain_sge.size / sge_element_size;
748 	if (sges_needed > available_sges)
749 		return -1;
750 	mpi3mr_add_sg_single(sgl, last_chain_sgl_flags,
751 	    (sges_needed * sge_element_size), sc->ioctl_chain_sge.dma_addr);
752 	memset(sc->ioctl_chain_sge.addr, 0, sc->ioctl_chain_sge.size);
753 	sgl = (U8 *)sc->ioctl_chain_sge.addr;
754 	chain_used = true;
755 	goto build_sges;
756 }
757 
758 
759 /**
760  * mpi3mr_app_mptcmds - MPI Pass through IOCTL handler
761  * @dev: char device
762  * @cmd: IOCTL command
763  * @arg: User data payload buffer for the IOCTL
764  * @flag: flags
765  * @thread: threads
766  *
767  * This function is the top level handler for MPI Pass through
768  * IOCTL, this does basic validation of the input data buffers,
769  * identifies the given buffer types and MPI command, allocates
770  * DMAable memory for user given buffers, construstcs SGL
771  * properly and passes the command to the firmware.
772  *
773  * Once the MPI command is completed the driver copies the data
774  * if any and reply, sense information to user provided buffers.
775  * If the command is timed out then issues controller reset
776  * prior to returning.
777  *
778  * Return: 0 on success and proper error codes on failure
779  */
780 static long
mpi3mr_app_mptcmds(struct cdev * dev,u_long cmd,void * uarg,int flag,struct thread * td)781 mpi3mr_app_mptcmds(struct cdev *dev, u_long cmd, void *uarg,
782 		   int flag, struct thread *td)
783 {
784 	long rval = EINVAL;
785 	U8 count, bufcnt = 0, is_rmcb = 0, is_rmrb = 0, din_cnt = 0, dout_cnt = 0;
786 	U8 invalid_be = 0, erb_offset = 0xFF, mpirep_offset = 0xFF;
787 	U16 desc_count = 0;
788 	U8 nvme_fmt = 0;
789 	U32 tmplen = 0, erbsz = MPI3MR_SENSEBUF_SZ, din_sz = 0, dout_sz = 0;
790 	U8 *kern_erb = NULL;
791 	U8 *mpi_request = NULL;
792 	Mpi3RequestHeader_t *mpi_header = NULL;
793 	Mpi3PELReqActionGetCount_t *pel = NULL;
794 	Mpi3StatusReplyDescriptor_t *status_desc = NULL;
795 	struct mpi3mr_softc *sc = NULL;
796 	struct mpi3mr_ioctl_buf_entry_list *buffer_list = NULL;
797 	struct mpi3mr_buf_entry *buf_entries = NULL;
798 	struct mpi3mr_ioctl_mpt_dma_buffer *dma_buffers = NULL, *dma_buff = NULL;
799 	struct mpi3mr_ioctl_mpirepbuf *mpirepbuf = NULL;
800 	struct mpi3mr_ioctl_mptcmd *karg = (struct mpi3mr_ioctl_mptcmd *)uarg;
801 
802 
803 	sc = mpi3mr_app_get_adp_instance(karg->mrioc_id);
804 	if (!sc)
805 		return ENODEV;
806 
807 	if (!sc->ioctl_sges_allocated) {
808 		printf("%s: DMA memory was not allocated\n", __func__);
809 		return ENOMEM;
810 	}
811 
812 	if (karg->timeout < MPI3MR_IOCTL_DEFAULT_TIMEOUT)
813 		karg->timeout = MPI3MR_IOCTL_DEFAULT_TIMEOUT;
814 
815 	if (!karg->mpi_msg_size || !karg->buf_entry_list_size) {
816 		printf(IOCNAME "%s:%d Invalid IOCTL parameters passed\n", sc->name,
817 		       __func__, __LINE__);
818 		return rval;
819 	}
820 	if ((karg->mpi_msg_size * 4) > MPI3MR_AREQ_FRAME_SZ) {
821 		printf(IOCNAME "%s:%d Invalid IOCTL parameters passed\n", sc->name,
822 		       __func__, __LINE__);
823 		return rval;
824 	}
825 
826 	mpi_request = malloc(MPI3MR_AREQ_FRAME_SZ, M_MPI3MR, M_NOWAIT | M_ZERO);
827 	if (!mpi_request) {
828 		printf(IOCNAME "%s: memory allocation failed for mpi_request\n", sc->name,
829 		       __func__);
830 		return ENOMEM;
831 	}
832 
833 	mpi_header = (Mpi3RequestHeader_t *)mpi_request;
834 	pel = (Mpi3PELReqActionGetCount_t *)mpi_request;
835 	if (copyin(karg->mpi_msg_buf, mpi_request, (karg->mpi_msg_size * 4))) {
836 		printf(IOCNAME "failure at %s:%d/%s()!\n", sc->name,
837 		       __FILE__, __LINE__, __func__);
838 		rval = EFAULT;
839 		goto out;
840 	}
841 
842 	buffer_list = malloc(karg->buf_entry_list_size, M_MPI3MR, M_NOWAIT | M_ZERO);
843 	if (!buffer_list) {
844 		printf(IOCNAME "%s: memory allocation failed for buffer_list\n", sc->name,
845 		       __func__);
846 		rval = ENOMEM;
847 		goto out;
848 	}
849 	if (copyin(karg->buf_entry_list, buffer_list, karg->buf_entry_list_size)) {
850 		printf(IOCNAME "failure at %s:%d/%s()!\n", sc->name,
851 		       __FILE__, __LINE__, __func__);
852 		rval = EFAULT;
853 		goto out;
854 	}
855 	if (!buffer_list->num_of_buf_entries) {
856 		printf(IOCNAME "%s:%d Invalid IOCTL parameters passed\n", sc->name,
857 		       __func__, __LINE__);
858 		rval = EINVAL;
859 		goto out;
860 	}
861 	bufcnt = buffer_list->num_of_buf_entries;
862 	dma_buffers = malloc((sizeof(*dma_buffers) * bufcnt), M_MPI3MR, M_NOWAIT | M_ZERO);
863 	if (!dma_buffers) {
864 		printf(IOCNAME "%s: memory allocation failed for dma_buffers\n", sc->name,
865 		       __func__);
866 		rval = ENOMEM;
867 		goto out;
868 	}
869 	buf_entries = buffer_list->buf_entry;
870 	dma_buff = dma_buffers;
871 	for (count = 0; count < bufcnt; count++, buf_entries++, dma_buff++) {
872 		memset(dma_buff, 0, sizeof(*dma_buff));
873 		dma_buff->user_buf = buf_entries->buffer;
874 		dma_buff->user_buf_len = buf_entries->buf_len;
875 
876 		switch (buf_entries->buf_type) {
877 		case MPI3MR_IOCTL_BUFTYPE_RAIDMGMT_CMD:
878 			is_rmcb = 1;
879 			if ((count != 0) || !buf_entries->buf_len)
880 				invalid_be = 1;
881 			dma_buff->data_dir = MPI3MR_APP_DDO;
882 			break;
883 		case MPI3MR_IOCTL_BUFTYPE_RAIDMGMT_RESP:
884 			is_rmrb = 1;
885 			if (count != 1 || !is_rmcb || !buf_entries->buf_len)
886 				invalid_be = 1;
887 			dma_buff->data_dir = MPI3MR_APP_DDI;
888 			break;
889 		case MPI3MR_IOCTL_BUFTYPE_DATA_IN:
890 			din_sz = dma_buff->user_buf_len;
891 			din_cnt++;
892 			if ((din_cnt > 1) && !is_rmcb)
893 				invalid_be = 1;
894 			dma_buff->data_dir = MPI3MR_APP_DDI;
895 			break;
896 		case MPI3MR_IOCTL_BUFTYPE_DATA_OUT:
897 			dout_sz = dma_buff->user_buf_len;
898 			dout_cnt++;
899 			if ((dout_cnt > 1) && !is_rmcb)
900 				invalid_be = 1;
901 			dma_buff->data_dir = MPI3MR_APP_DDO;
902 			break;
903 		case MPI3MR_IOCTL_BUFTYPE_MPI_REPLY:
904 			mpirep_offset = count;
905 			dma_buff->data_dir = MPI3MR_APP_DDN;
906 			if (!buf_entries->buf_len)
907 				invalid_be = 1;
908 			break;
909 		case MPI3MR_IOCTL_BUFTYPE_ERR_RESPONSE:
910 			erb_offset = count;
911 			dma_buff->data_dir = MPI3MR_APP_DDN;
912 			if (!buf_entries->buf_len)
913 				invalid_be = 1;
914 			break;
915 		default:
916 			invalid_be = 1;
917 			break;
918 		}
919 		if (invalid_be)
920 			break;
921 	}
922 	if (invalid_be) {
923 		printf(IOCNAME "%s:%d Invalid IOCTL parameters passed\n", sc->name,
924 		       __func__, __LINE__);
925 		rval = EINVAL;
926 		goto out;
927 	}
928 
929 	if (is_rmcb && ((din_sz + dout_sz) > MPI3MR_MAX_IOCTL_TRANSFER_SIZE)) {
930 		printf("%s:%d: invalid data transfer size passed for function 0x%x"
931 		       "din_sz = %d, dout_size = %d\n", __func__, __LINE__,
932 		       mpi_header->Function, din_sz, dout_sz);
933 		rval = EINVAL;
934 		goto out;
935 	}
936 
937  	if ((din_sz > MPI3MR_MAX_IOCTL_TRANSFER_SIZE) ||
938 	    (dout_sz > MPI3MR_MAX_IOCTL_TRANSFER_SIZE)) {
939 		printf("%s:%d: invalid data transfer size passed for function 0x%x"
940 		       "din_size=%d dout_size=%d\n", __func__, __LINE__,
941 		       mpi_header->Function, din_sz, dout_sz);
942 		rval = EINVAL;
943  		goto out;
944  	}
945 
946 	if (mpi_header->Function == MPI3_FUNCTION_SMP_PASSTHROUGH) {
947 		if ((din_sz > MPI3MR_IOCTL_SGE_SIZE) ||
948 		    (dout_sz > MPI3MR_IOCTL_SGE_SIZE)) {
949 			printf("%s:%d: invalid message size passed:%d:%d:%d:%d\n",
950 			       __func__, __LINE__, din_cnt, dout_cnt, din_sz, dout_sz);
951 			rval = EINVAL;
952 			goto out;
953 		}
954 	}
955 
956 	dma_buff = dma_buffers;
957 	for (count = 0; count < bufcnt; count++, dma_buff++) {
958 
959 		dma_buff->kern_buf_len = dma_buff->user_buf_len;
960 
961 		if (is_rmcb && !count) {
962 			dma_buff->kern_buf = sc->ioctl_chain_sge.addr;
963 			dma_buff->kern_buf_len = sc->ioctl_chain_sge.size;
964 			dma_buff->kern_buf_dma = sc->ioctl_chain_sge.dma_addr;
965 			dma_buff->dma_desc = NULL;
966 			dma_buff->num_dma_desc = 0;
967 			memset(dma_buff->kern_buf, 0, dma_buff->kern_buf_len);
968 			tmplen = min(dma_buff->kern_buf_len, dma_buff->user_buf_len);
969 			if (copyin(dma_buff->user_buf, dma_buff->kern_buf, tmplen)) {
970 				mpi3mr_dprint(sc, MPI3MR_ERROR, "failure at %s() line: %d",
971 					      __func__, __LINE__);
972 				rval = EFAULT;
973 				goto out;
974 			}
975 		} else if (is_rmrb && (count == 1)) {
976 			dma_buff->kern_buf = sc->ioctl_resp_sge.addr;
977 			dma_buff->kern_buf_len = sc->ioctl_resp_sge.size;
978 			dma_buff->kern_buf_dma = sc->ioctl_resp_sge.dma_addr;
979 			dma_buff->dma_desc = NULL;
980 			dma_buff->num_dma_desc = 0;
981 			memset(dma_buff->kern_buf, 0, dma_buff->kern_buf_len);
982 			tmplen = min(dma_buff->kern_buf_len, dma_buff->user_buf_len);
983 			dma_buff->kern_buf_len = tmplen;
984 		} else {
985 			if (!dma_buff->kern_buf_len)
986 				continue;
987 			if (mpi3mr_map_data_buffer_dma(sc, dma_buff, desc_count)) {
988 				rval = ENOMEM;
989 				mpi3mr_dprint(sc, MPI3MR_ERROR, "mapping data buffers failed"
990 					      "at %s() line: %d\n", __func__, __LINE__);
991 				goto out;
992 			}
993 			desc_count += dma_buff->num_dma_desc;
994 		}
995 	}
996 
997 	if (erb_offset != 0xFF) {
998 		kern_erb = malloc(erbsz, M_MPI3MR, M_NOWAIT | M_ZERO);
999 		if (!kern_erb) {
1000 			printf(IOCNAME "%s:%d Cannot allocate memory for sense buffer\n", sc->name,
1001 			       __func__, __LINE__);
1002 			rval = ENOMEM;
1003 			goto out;
1004 		}
1005 	}
1006 
1007 	if (sc->ioctl_cmds.state & MPI3MR_CMD_PENDING) {
1008 		printf(IOCNAME "Issue IOCTL: Ioctl command is in use/previous command is pending\n",
1009 		       sc->name);
1010 		rval = EAGAIN;
1011 		goto out;
1012 	}
1013 
1014 	if (sc->unrecoverable) {
1015 		printf(IOCNAME "Issue IOCTL: controller is in unrecoverable state\n", sc->name);
1016 		rval = EFAULT;
1017 		goto out;
1018 	}
1019 
1020 	if (sc->reset_in_progress) {
1021 		printf(IOCNAME "Issue IOCTL: reset in progress\n", sc->name);
1022 		rval = EAGAIN;
1023 		goto out;
1024 	}
1025 	if (sc->block_ioctls) {
1026 		printf(IOCNAME "Issue IOCTL: IOCTLs are blocked\n", sc->name);
1027 		rval = EAGAIN;
1028 		goto out;
1029 	}
1030 
1031 	if (mpi_header->Function != MPI3_FUNCTION_NVME_ENCAPSULATED) {
1032 		if (mpi3mr_app_construct_sgl(sc, mpi_request, (karg->mpi_msg_size * 4), dma_buffers,
1033 					     bufcnt, is_rmcb, is_rmrb, (dout_cnt + din_cnt))) {
1034 			printf(IOCNAME "Issue IOCTL: sgl build failed\n", sc->name);
1035 			rval = EAGAIN;
1036 			goto out;
1037 		}
1038 
1039 	} else {
1040 		nvme_fmt = mpi3mr_app_get_nvme_data_fmt(
1041 			   (Mpi3NVMeEncapsulatedRequest_t *)mpi_request);
1042 		if (nvme_fmt == MPI3MR_NVME_DATA_FORMAT_PRP) {
1043 			if (mpi3mr_app_build_nvme_prp(sc,
1044 			    (Mpi3NVMeEncapsulatedRequest_t *) mpi_request,
1045 			    dma_buffers, bufcnt)) {
1046 				rval = ENOMEM;
1047 				goto out;
1048 			}
1049 		} else if (nvme_fmt == MPI3MR_NVME_DATA_FORMAT_SGL1 ||
1050 			   nvme_fmt == MPI3MR_NVME_DATA_FORMAT_SGL2) {
1051 			if (mpi3mr_app_construct_nvme_sgl(sc, (Mpi3NVMeEncapsulatedRequest_t *) mpi_request,
1052 			    dma_buffers, bufcnt)) {
1053 				rval = EINVAL;
1054 				goto out;
1055 			}
1056 		} else {
1057 			printf(IOCNAME "%s: Invalid NVMe Command Format\n", sc->name,
1058 			       __func__);
1059 			rval = EINVAL;
1060 			goto out;
1061 		}
1062 	}
1063 
1064 	sc->ioctl_cmds.state = MPI3MR_CMD_PENDING;
1065 	sc->ioctl_cmds.is_waiting = 1;
1066 	sc->ioctl_cmds.callback = NULL;
1067 	sc->ioctl_cmds.is_senseprst = 0;
1068 	sc->ioctl_cmds.sensebuf = kern_erb;
1069 	memset((sc->ioctl_cmds.reply), 0, sc->reply_sz);
1070 	mpi_header->HostTag = MPI3MR_HOSTTAG_IOCTLCMDS;
1071 	init_completion(&sc->ioctl_cmds.completion);
1072 	rval = mpi3mr_submit_admin_cmd(sc, mpi_request, MPI3MR_AREQ_FRAME_SZ);
1073 	if (rval) {
1074 		printf(IOCNAME "Issue IOCTL: Admin Post failed\n", sc->name);
1075 		goto out_failed;
1076 	}
1077 	wait_for_completion_timeout(&sc->ioctl_cmds.completion, karg->timeout);
1078 
1079 	if (!(sc->ioctl_cmds.state & MPI3MR_CMD_COMPLETE)) {
1080 		sc->ioctl_cmds.is_waiting = 0;
1081 		printf(IOCNAME "Issue IOCTL: command timed out\n", sc->name);
1082 		rval = EAGAIN;
1083 		if (sc->ioctl_cmds.state & MPI3MR_CMD_RESET)
1084 			goto out_failed;
1085 
1086 		sc->reset.type = MPI3MR_TRIGGER_SOFT_RESET;
1087 		sc->reset.reason = MPI3MR_RESET_FROM_IOCTL_TIMEOUT;
1088 		goto out_failed;
1089 	}
1090 
1091 	if (sc->nvme_encap_prp_list && sc->nvme_encap_prp_list_dma) {
1092 		bus_dmamap_unload(sc->nvme_encap_prp_list_dmatag, sc->nvme_encap_prp_list_dma_dmamap);
1093 		bus_dmamem_free(sc->nvme_encap_prp_list_dmatag, sc->nvme_encap_prp_list, sc->nvme_encap_prp_list_dma_dmamap);
1094 		bus_dma_tag_destroy(sc->nvme_encap_prp_list_dmatag);
1095 		sc->nvme_encap_prp_list = NULL;
1096 	}
1097 
1098 	if (((sc->ioctl_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
1099 	    != MPI3_IOCSTATUS_SUCCESS) &&
1100 	    (sc->mpi3mr_debug & MPI3MR_DEBUG_IOCTL)) {
1101 		printf(IOCNAME "Issue IOCTL: Failed IOCStatus(0x%04x) Loginfo(0x%08x)\n", sc->name,
1102 		       (sc->ioctl_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
1103 		       sc->ioctl_cmds.ioc_loginfo);
1104 	}
1105 
1106 	if ((mpirep_offset != 0xFF) &&
1107 	    dma_buffers[mpirep_offset].user_buf_len) {
1108 		dma_buff = &dma_buffers[mpirep_offset];
1109 		dma_buff->kern_buf_len = (sizeof(*mpirepbuf) - 1 +
1110 					   sc->reply_sz);
1111 		mpirepbuf = malloc(dma_buff->kern_buf_len, M_MPI3MR, M_NOWAIT | M_ZERO);
1112 
1113 		if (!mpirepbuf) {
1114 			printf(IOCNAME "%s: failed obtaining a memory for mpi reply\n", sc->name,
1115 			       __func__);
1116 			rval = ENOMEM;
1117 			goto out_failed;
1118 		}
1119 		if (sc->ioctl_cmds.state & MPI3MR_CMD_REPLYVALID) {
1120 			mpirepbuf->mpirep_type =
1121 				MPI3MR_IOCTL_MPI_REPLY_BUFTYPE_ADDRESS;
1122 			memcpy(mpirepbuf->repbuf, sc->ioctl_cmds.reply, sc->reply_sz);
1123 		} else {
1124 			mpirepbuf->mpirep_type =
1125 				MPI3MR_IOCTL_MPI_REPLY_BUFTYPE_STATUS;
1126 			status_desc = (Mpi3StatusReplyDescriptor_t *)
1127 			    mpirepbuf->repbuf;
1128 			status_desc->IOCStatus = sc->ioctl_cmds.ioc_status;
1129 			status_desc->IOCLogInfo = sc->ioctl_cmds.ioc_loginfo;
1130 		}
1131 		tmplen = min(dma_buff->kern_buf_len, dma_buff->user_buf_len);
1132 		if (copyout(mpirepbuf, dma_buff->user_buf, tmplen)) {
1133 			printf(IOCNAME "failure at %s:%d/%s()!\n", sc->name,
1134 			       __FILE__, __LINE__, __func__);
1135 			rval = EFAULT;
1136 			goto out_failed;
1137 		}
1138 	}
1139 
1140 	if (erb_offset != 0xFF && sc->ioctl_cmds.sensebuf &&
1141 	    sc->ioctl_cmds.is_senseprst) {
1142 		dma_buff = &dma_buffers[erb_offset];
1143 		tmplen = min(erbsz, dma_buff->user_buf_len);
1144 		if (copyout(kern_erb, dma_buff->user_buf, tmplen)) {
1145 			printf(IOCNAME "failure at %s:%d/%s()!\n", sc->name,
1146 			       __FILE__, __LINE__, __func__);
1147 			rval = EFAULT;
1148 			goto out_failed;
1149 		}
1150 	}
1151 
1152 	dma_buff = dma_buffers;
1153 	for (count = 0; count < bufcnt; count++, dma_buff++) {
1154 		if ((count == 1) && is_rmrb) {
1155 			if (copyout(dma_buff->kern_buf, dma_buff->user_buf,dma_buff->kern_buf_len)) {
1156 				printf(IOCNAME "failure at %s:%d/%s()!\n", sc->name,
1157 				       __FILE__, __LINE__, __func__);
1158 				rval = EFAULT;
1159 				goto out_failed;
1160 			}
1161 		} else if (dma_buff->data_dir == MPI3MR_APP_DDI) {
1162 			tmplen = 0;
1163 			for (desc_count = 0; desc_count < dma_buff->num_dma_desc; desc_count++) {
1164 				if (copyout(dma_buff->dma_desc[desc_count].addr,
1165 		                    (U8 *)dma_buff->user_buf+tmplen,
1166 				    dma_buff->dma_desc[desc_count].size)) {
1167 					printf(IOCNAME "failure at %s:%d/%s()!\n", sc->name,
1168 					       __FILE__, __LINE__, __func__);
1169 					rval = EFAULT;
1170 					goto out_failed;
1171 				}
1172 				tmplen += dma_buff->dma_desc[desc_count].size;
1173 			}
1174 		}
1175 	}
1176 
1177 	if ((pel->Function == MPI3_FUNCTION_PERSISTENT_EVENT_LOG) &&
1178 	    (pel->Action == MPI3_PEL_ACTION_GET_COUNT))
1179 		sc->mpi3mr_aen_triggered = 0;
1180 
1181 out_failed:
1182 	sc->ioctl_cmds.is_senseprst = 0;
1183 	sc->ioctl_cmds.sensebuf = NULL;
1184 	sc->ioctl_cmds.state = MPI3MR_CMD_NOTUSED;
1185 out:
1186 	if (kern_erb)
1187 		free(kern_erb, M_MPI3MR);
1188 	if (buffer_list)
1189 		free(buffer_list, M_MPI3MR);
1190 	if (mpi_request)
1191 		free(mpi_request, M_MPI3MR);
1192 	if (dma_buffers) {
1193 		dma_buff = dma_buffers;
1194 		for (count = 0; count < bufcnt; count++, dma_buff++) {
1195 			free(dma_buff->dma_desc, M_MPI3MR);
1196 		}
1197 		free(dma_buffers, M_MPI3MR);
1198 	}
1199 	if (mpirepbuf)
1200 		free(mpirepbuf, M_MPI3MR);
1201 	return rval;
1202 }
1203 
1204 /**
1205  * mpi3mr_soft_reset_from_app - Trigger controller reset
1206  * @sc: Adapter instance reference
1207  *
1208  * This function triggers the controller reset from the
1209  * watchdog context and wait for it to complete. It will
1210  * come out of wait upon completion or timeout exaustion.
1211  *
1212  * Return: 0 on success and proper error codes on failure
1213  */
1214 static long
mpi3mr_soft_reset_from_app(struct mpi3mr_softc * sc)1215 mpi3mr_soft_reset_from_app(struct mpi3mr_softc *sc)
1216 {
1217 
1218 	U32 timeout;
1219 
1220 	/* if reset is not in progress, trigger soft reset from watchdog context */
1221 	if (!sc->reset_in_progress) {
1222 		sc->reset.type = MPI3MR_TRIGGER_SOFT_RESET;
1223 		sc->reset.reason = MPI3MR_RESET_FROM_IOCTL;
1224 
1225 		/* Wait for soft reset to start */
1226 		timeout = 50;
1227 		while (timeout--) {
1228 			if (sc->reset_in_progress == 1)
1229 				break;
1230 			DELAY(100 * 1000);
1231 		}
1232 		if (!timeout)
1233 			return EFAULT;
1234 	}
1235 
1236 	/* Wait for soft reset to complete */
1237 	int i = 0;
1238 	timeout = sc->ready_timeout;
1239 	while (timeout--) {
1240 		if (sc->reset_in_progress == 0)
1241 			break;
1242 		i++;
1243 		if (!(i % 5)) {
1244 			mpi3mr_dprint(sc, MPI3MR_INFO,
1245 			    "[%2ds]waiting for controller reset to be finished from %s\n", i, __func__);
1246 		}
1247 		DELAY(1000 * 1000);
1248 	}
1249 
1250 	/*
1251 	 * In case of soft reset failure or not completed within stipulated time,
1252 	 * fail back to application.
1253 	 */
1254 	if ((!timeout || sc->reset.status))
1255 		return EFAULT;
1256 
1257 	return 0;
1258 }
1259 
1260 
1261 /**
1262  * mpi3mr_adp_reset - Issue controller reset
1263  * @sc: Adapter instance reference
1264  * @data_out_buf: User buffer with reset type
1265  * @data_out_sz: length of the user buffer.
1266  *
1267  * This function identifies the user provided reset type and
1268  * issues approporiate reset to the controller and wait for that
1269  * to complete and reinitialize the controller and then returns.
1270  *
1271  * Return: 0 on success and proper error codes on failure
1272  */
1273 static long
mpi3mr_adp_reset(struct mpi3mr_softc * sc,void * data_out_buf,U32 data_out_sz)1274 mpi3mr_adp_reset(struct mpi3mr_softc *sc,
1275 		 void *data_out_buf, U32 data_out_sz)
1276 {
1277 	long rval = EINVAL;
1278 	struct mpi3mr_ioctl_adpreset adpreset;
1279 
1280 	memset(&adpreset, 0, sizeof(adpreset));
1281 
1282 	if (data_out_sz != sizeof(adpreset)) {
1283 		printf(IOCNAME "Invalid user adpreset buffer size %s() line: %d\n", sc->name,
1284 		       __func__, __LINE__);
1285 		goto out;
1286 	}
1287 
1288 	if (copyin(data_out_buf, &adpreset, sizeof(adpreset))) {
1289 		printf(IOCNAME "failure at %s() line:%d\n", sc->name,
1290 		       __func__, __LINE__);
1291 		rval = EFAULT;
1292 		goto out;
1293 	}
1294 
1295 	switch (adpreset.reset_type) {
1296 	case MPI3MR_IOCTL_ADPRESET_SOFT:
1297 		sc->reset.ioctl_reset_snapdump = false;
1298 		break;
1299 	case MPI3MR_IOCTL_ADPRESET_DIAG_FAULT:
1300 		sc->reset.ioctl_reset_snapdump = true;
1301 		break;
1302 	default:
1303 		printf(IOCNAME "Unknown reset_type(0x%x) issued\n", sc->name,
1304 		       adpreset.reset_type);
1305 		goto out;
1306 	}
1307 	rval = mpi3mr_soft_reset_from_app(sc);
1308 	if (rval)
1309 		printf(IOCNAME "reset handler returned error (0x%lx) for reset type 0x%x\n",
1310 		       sc->name, rval, adpreset.reset_type);
1311 
1312 out:
1313 	return rval;
1314 }
1315 
1316 void
mpi3mr_app_send_aen(struct mpi3mr_softc * sc)1317 mpi3mr_app_send_aen(struct mpi3mr_softc *sc)
1318 {
1319 	sc->mpi3mr_aen_triggered = 1;
1320 	if (sc->mpi3mr_poll_waiting) {
1321 		selwakeup(&sc->mpi3mr_select);
1322 		sc->mpi3mr_poll_waiting = 0;
1323 	}
1324 	return;
1325 }
1326 
1327 void
mpi3mr_pel_wait_complete(struct mpi3mr_softc * sc,struct mpi3mr_drvr_cmd * drvr_cmd)1328 mpi3mr_pel_wait_complete(struct mpi3mr_softc *sc,
1329 			 struct mpi3mr_drvr_cmd *drvr_cmd)
1330 {
1331 	U8 retry = 0;
1332 	Mpi3PELReply_t *pel_reply = NULL;
1333 	mpi3mr_dprint(sc, MPI3MR_TRACE, "%s() line: %d\n", __func__, __LINE__);
1334 
1335 	if (drvr_cmd->state & MPI3MR_CMD_RESET)
1336 		goto cleanup_drvrcmd;
1337 
1338 	if (!(drvr_cmd->state & MPI3MR_CMD_REPLYVALID)) {
1339 		printf(IOCNAME "%s: PELGetSeqNum Failed, No Reply\n", sc->name, __func__);
1340 		goto out_failed;
1341 	}
1342 	pel_reply = (Mpi3PELReply_t *)drvr_cmd->reply;
1343 
1344 	if (((GET_IOC_STATUS(drvr_cmd->ioc_status)) != MPI3_IOCSTATUS_SUCCESS)
1345 	    || ((le16toh(pel_reply->PELogStatus) != MPI3_PEL_STATUS_SUCCESS)
1346 	    && (le16toh(pel_reply->PELogStatus) != MPI3_PEL_STATUS_ABORTED))){
1347 		printf(IOCNAME "%s: PELGetSeqNum Failed, IOCStatus(0x%04x) Loginfo(0x%08x) PEL_LogStatus(0x%04x)\n",
1348 		       sc->name, __func__, GET_IOC_STATUS(drvr_cmd->ioc_status),
1349 		       drvr_cmd->ioc_loginfo, le16toh(pel_reply->PELogStatus));
1350 		retry = 1;
1351 	}
1352 
1353 	if (retry) {
1354 		if (drvr_cmd->retry_count < MPI3MR_PELCMDS_RETRYCOUNT) {
1355 			drvr_cmd->retry_count++;
1356 			printf(IOCNAME "%s : PELWaitretry=%d\n", sc->name,
1357 			       __func__,  drvr_cmd->retry_count);
1358 			mpi3mr_issue_pel_wait(sc, drvr_cmd);
1359 			return;
1360 		}
1361 
1362 		printf(IOCNAME "%s :PELWait failed after all retries\n", sc->name,
1363 		    __func__);
1364 		goto out_failed;
1365 	}
1366 
1367 	mpi3mr_app_send_aen(sc);
1368 
1369 	if (!sc->pel_abort_requested) {
1370 		sc->pel_cmds.retry_count = 0;
1371 		mpi3mr_send_pel_getseq(sc, &sc->pel_cmds);
1372 	}
1373 
1374 	return;
1375 out_failed:
1376 	sc->pel_wait_pend = 0;
1377 cleanup_drvrcmd:
1378 	drvr_cmd->state = MPI3MR_CMD_NOTUSED;
1379 	drvr_cmd->callback = NULL;
1380 	drvr_cmd->retry_count = 0;
1381 }
1382 
1383 void
mpi3mr_issue_pel_wait(struct mpi3mr_softc * sc,struct mpi3mr_drvr_cmd * drvr_cmd)1384 mpi3mr_issue_pel_wait(struct mpi3mr_softc *sc,
1385 		      struct mpi3mr_drvr_cmd *drvr_cmd)
1386 {
1387 	U8 retry_count = 0;
1388 	Mpi3PELReqActionWait_t pel_wait;
1389 	mpi3mr_dprint(sc, MPI3MR_TRACE, "%s() line: %d\n", __func__, __LINE__);
1390 
1391 	sc->pel_abort_requested = 0;
1392 
1393 	memset(&pel_wait, 0, sizeof(pel_wait));
1394 	drvr_cmd->state = MPI3MR_CMD_PENDING;
1395 	drvr_cmd->is_waiting = 0;
1396 	drvr_cmd->callback = mpi3mr_pel_wait_complete;
1397 	drvr_cmd->ioc_status = 0;
1398 	drvr_cmd->ioc_loginfo = 0;
1399 	pel_wait.HostTag = htole16(MPI3MR_HOSTTAG_PELWAIT);
1400 	pel_wait.Function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG;
1401 	pel_wait.Action = MPI3_PEL_ACTION_WAIT;
1402 	pel_wait.StartingSequenceNumber = htole32(sc->newest_seqnum);
1403 	pel_wait.Locale = htole16(sc->pel_locale);
1404 	pel_wait.Class = htole16(sc->pel_class);
1405 	pel_wait.WaitTime = MPI3_PEL_WAITTIME_INFINITE_WAIT;
1406 	printf(IOCNAME "Issuing PELWait: seqnum %u class %u locale 0x%08x\n",
1407 	       sc->name, sc->newest_seqnum, sc->pel_class, sc->pel_locale);
1408 retry_pel_wait:
1409 	if (mpi3mr_submit_admin_cmd(sc, &pel_wait, sizeof(pel_wait))) {
1410 		printf(IOCNAME "%s: Issue PELWait IOCTL: Admin Post failed\n", sc->name, __func__);
1411 		if (retry_count < MPI3MR_PELCMDS_RETRYCOUNT) {
1412 			retry_count++;
1413 			goto retry_pel_wait;
1414 		}
1415 		goto out_failed;
1416 	}
1417 	return;
1418 out_failed:
1419 	drvr_cmd->state = MPI3MR_CMD_NOTUSED;
1420 	drvr_cmd->callback = NULL;
1421 	drvr_cmd->retry_count = 0;
1422 	sc->pel_wait_pend = 0;
1423 	return;
1424 }
1425 
1426 void
mpi3mr_send_pel_getseq(struct mpi3mr_softc * sc,struct mpi3mr_drvr_cmd * drvr_cmd)1427 mpi3mr_send_pel_getseq(struct mpi3mr_softc *sc,
1428 		       struct mpi3mr_drvr_cmd *drvr_cmd)
1429 {
1430 	U8 retry_count = 0;
1431 	U8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
1432 	Mpi3PELReqActionGetSequenceNumbers_t pel_getseq_req;
1433 
1434 	memset(&pel_getseq_req, 0, sizeof(pel_getseq_req));
1435 	sc->pel_cmds.state = MPI3MR_CMD_PENDING;
1436 	sc->pel_cmds.is_waiting = 0;
1437 	sc->pel_cmds.ioc_status = 0;
1438 	sc->pel_cmds.ioc_loginfo = 0;
1439 	sc->pel_cmds.callback = mpi3mr_pel_getseq_complete;
1440 	pel_getseq_req.HostTag = htole16(MPI3MR_HOSTTAG_PELWAIT);
1441 	pel_getseq_req.Function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG;
1442 	pel_getseq_req.Action = MPI3_PEL_ACTION_GET_SEQNUM;
1443 	mpi3mr_add_sg_single(&pel_getseq_req.SGL, sgl_flags,
1444 			     sc->pel_seq_number_sz, sc->pel_seq_number_dma);
1445 
1446 retry_pel_getseq:
1447 	if (mpi3mr_submit_admin_cmd(sc, &pel_getseq_req, sizeof(pel_getseq_req))) {
1448 		printf(IOCNAME "%s: Issuing PEL GetSeq IOCTL: Admin Post failed\n", sc->name, __func__);
1449 		if (retry_count < MPI3MR_PELCMDS_RETRYCOUNT) {
1450 			retry_count++;
1451 			goto retry_pel_getseq;
1452 		}
1453 		goto out_failed;
1454 	}
1455 	return;
1456 out_failed:
1457 	drvr_cmd->state = MPI3MR_CMD_NOTUSED;
1458 	drvr_cmd->callback = NULL;
1459 	drvr_cmd->retry_count = 0;
1460 	sc->pel_wait_pend = 0;
1461 }
1462 
1463 void
mpi3mr_pel_getseq_complete(struct mpi3mr_softc * sc,struct mpi3mr_drvr_cmd * drvr_cmd)1464 mpi3mr_pel_getseq_complete(struct mpi3mr_softc *sc,
1465 			   struct mpi3mr_drvr_cmd *drvr_cmd)
1466 {
1467 	U8 retry = 0;
1468 	Mpi3PELReply_t *pel_reply = NULL;
1469 	Mpi3PELSeq_t *pel_seq_num = (Mpi3PELSeq_t *)sc->pel_seq_number;
1470 	mpi3mr_dprint(sc, MPI3MR_TRACE, "%s() line: %d\n", __func__, __LINE__);
1471 
1472 	if (drvr_cmd->state & MPI3MR_CMD_RESET)
1473 		goto cleanup_drvrcmd;
1474 
1475 	if (!(drvr_cmd->state & MPI3MR_CMD_REPLYVALID)) {
1476 		printf(IOCNAME "%s: PELGetSeqNum Failed, No Reply\n", sc->name, __func__);
1477 		goto out_failed;
1478 	}
1479 	pel_reply = (Mpi3PELReply_t *)drvr_cmd->reply;
1480 
1481 	if (((GET_IOC_STATUS(drvr_cmd->ioc_status)) != MPI3_IOCSTATUS_SUCCESS)
1482 	    || (le16toh(pel_reply->PELogStatus) != MPI3_PEL_STATUS_SUCCESS)){
1483 		printf(IOCNAME "%s: PELGetSeqNum Failed, IOCStatus(0x%04x) Loginfo(0x%08x) PEL_LogStatus(0x%04x)\n",
1484 		       sc->name, __func__, GET_IOC_STATUS(drvr_cmd->ioc_status),
1485 		       drvr_cmd->ioc_loginfo, le16toh(pel_reply->PELogStatus));
1486 		retry = 1;
1487 	}
1488 
1489 	if (retry) {
1490 		if (drvr_cmd->retry_count < MPI3MR_PELCMDS_RETRYCOUNT) {
1491 			drvr_cmd->retry_count++;
1492 			printf(IOCNAME "%s : PELGetSeqNUM retry=%d\n", sc->name,
1493 			       __func__,  drvr_cmd->retry_count);
1494 			mpi3mr_send_pel_getseq(sc, drvr_cmd);
1495 			return;
1496 		}
1497 		printf(IOCNAME "%s :PELGetSeqNUM failed after all retries\n",
1498 		       sc->name, __func__);
1499 		goto out_failed;
1500 	}
1501 
1502 	sc->newest_seqnum = le32toh(pel_seq_num->Newest) + 1;
1503 	drvr_cmd->retry_count = 0;
1504 	mpi3mr_issue_pel_wait(sc, drvr_cmd);
1505 	return;
1506 out_failed:
1507 	sc->pel_wait_pend = 0;
1508 cleanup_drvrcmd:
1509 	drvr_cmd->state = MPI3MR_CMD_NOTUSED;
1510 	drvr_cmd->callback = NULL;
1511 	drvr_cmd->retry_count = 0;
1512 }
1513 
1514 static int
mpi3mr_pel_getseq(struct mpi3mr_softc * sc)1515 mpi3mr_pel_getseq(struct mpi3mr_softc *sc)
1516 {
1517 	int rval = 0;
1518 	U8 sgl_flags = 0;
1519 	Mpi3PELReqActionGetSequenceNumbers_t pel_getseq_req;
1520 	mpi3mr_dprint(sc, MPI3MR_TRACE, "%s() line: %d\n", __func__, __LINE__);
1521 
1522 	if (sc->reset_in_progress || sc->block_ioctls) {
1523 		printf(IOCNAME "%s: IOCTL failed: reset in progress: %u ioctls blocked: %u\n",
1524 		       sc->name, __func__, sc->reset_in_progress, sc->block_ioctls);
1525 		return -1;
1526 	}
1527 
1528 	memset(&pel_getseq_req, 0, sizeof(pel_getseq_req));
1529 	sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
1530 	sc->pel_cmds.state = MPI3MR_CMD_PENDING;
1531 	sc->pel_cmds.is_waiting = 0;
1532 	sc->pel_cmds.retry_count = 0;
1533 	sc->pel_cmds.ioc_status = 0;
1534 	sc->pel_cmds.ioc_loginfo = 0;
1535 	sc->pel_cmds.callback = mpi3mr_pel_getseq_complete;
1536 	pel_getseq_req.HostTag = htole16(MPI3MR_HOSTTAG_PELWAIT);
1537 	pel_getseq_req.Function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG;
1538 	pel_getseq_req.Action = MPI3_PEL_ACTION_GET_SEQNUM;
1539 	mpi3mr_add_sg_single(&pel_getseq_req.SGL, sgl_flags,
1540 			     sc->pel_seq_number_sz, sc->pel_seq_number_dma);
1541 
1542 	if ((rval = mpi3mr_submit_admin_cmd(sc, &pel_getseq_req, sizeof(pel_getseq_req))))
1543 		printf(IOCNAME "%s: Issue IOCTL: Admin Post failed\n", sc->name, __func__);
1544 
1545 	return rval;
1546 }
1547 
1548 int
mpi3mr_pel_abort(struct mpi3mr_softc * sc)1549 mpi3mr_pel_abort(struct mpi3mr_softc *sc)
1550 {
1551 	int retval = 0;
1552 	U16 pel_log_status;
1553 	Mpi3PELReqActionAbort_t pel_abort_req;
1554 	Mpi3PELReply_t *pel_reply = NULL;
1555 
1556 	if (sc->reset_in_progress || sc->block_ioctls) {
1557 		printf(IOCNAME "%s: IOCTL failed: reset in progress: %u ioctls blocked: %u\n",
1558 		       sc->name, __func__, sc->reset_in_progress, sc->block_ioctls);
1559 		return -1;
1560 	}
1561 
1562 	memset(&pel_abort_req, 0, sizeof(pel_abort_req));
1563 
1564 	mtx_lock(&sc->pel_abort_cmd.completion.lock);
1565 	if (sc->pel_abort_cmd.state & MPI3MR_CMD_PENDING) {
1566 		printf(IOCNAME "%s: PEL Abort command is in use\n", sc->name,  __func__);
1567 		mtx_unlock(&sc->pel_abort_cmd.completion.lock);
1568 		return -1;
1569 	}
1570 
1571 	sc->pel_abort_cmd.state = MPI3MR_CMD_PENDING;
1572 	sc->pel_abort_cmd.is_waiting = 1;
1573 	sc->pel_abort_cmd.callback = NULL;
1574 	pel_abort_req.HostTag = htole16(MPI3MR_HOSTTAG_PELABORT);
1575 	pel_abort_req.Function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG;
1576 	pel_abort_req.Action = MPI3_PEL_ACTION_ABORT;
1577 	pel_abort_req.AbortHostTag = htole16(MPI3MR_HOSTTAG_PELWAIT);
1578 
1579 	sc->pel_abort_requested = 1;
1580 
1581 	init_completion(&sc->pel_abort_cmd.completion);
1582 	retval = mpi3mr_submit_admin_cmd(sc, &pel_abort_req, sizeof(pel_abort_req));
1583 	if (retval) {
1584 		printf(IOCNAME "%s: Issue IOCTL: Admin Post failed\n", sc->name, __func__);
1585 		sc->pel_abort_requested = 0;
1586 		retval = -1;
1587 		goto out_unlock;
1588 	}
1589 	wait_for_completion_timeout(&sc->pel_abort_cmd.completion, MPI3MR_INTADMCMD_TIMEOUT);
1590 
1591 	if (!(sc->pel_abort_cmd.state & MPI3MR_CMD_COMPLETE)) {
1592 		printf(IOCNAME "%s: PEL Abort command timedout\n",sc->name,  __func__);
1593 		sc->pel_abort_cmd.is_waiting = 0;
1594 		retval = -1;
1595 		sc->reset.type = MPI3MR_TRIGGER_SOFT_RESET;
1596 		sc->reset.reason = MPI3MR_RESET_FROM_PELABORT_TIMEOUT;
1597 		goto out_unlock;
1598 	}
1599 	if (((GET_IOC_STATUS(sc->pel_abort_cmd.ioc_status)) != MPI3_IOCSTATUS_SUCCESS)
1600 	    || (!(sc->pel_abort_cmd.state & MPI3MR_CMD_REPLYVALID))) {
1601 		printf(IOCNAME "%s: PEL Abort command failed, ioc_status(0x%04x) log_info(0x%08x)\n",
1602 		       sc->name, __func__, GET_IOC_STATUS(sc->pel_abort_cmd.ioc_status),
1603 		       sc->pel_abort_cmd.ioc_loginfo);
1604 		retval = -1;
1605 		goto out_unlock;
1606 	}
1607 
1608 	pel_reply = (Mpi3PELReply_t *)sc->pel_abort_cmd.reply;
1609 	pel_log_status = le16toh(pel_reply->PELogStatus);
1610 	if (pel_log_status != MPI3_PEL_STATUS_SUCCESS) {
1611 		printf(IOCNAME "%s: PEL abort command failed, pel_status(0x%04x)\n",
1612 		       sc->name, __func__, pel_log_status);
1613 		retval = -1;
1614 	}
1615 
1616 out_unlock:
1617 	mtx_unlock(&sc->pel_abort_cmd.completion.lock);
1618 	sc->pel_abort_cmd.state = MPI3MR_CMD_NOTUSED;
1619 	return retval;
1620 }
1621 
1622 /**
1623  * mpi3mr_pel_enable - Handler for PEL enable
1624  * @sc: Adapter instance reference
1625  * @data_out_buf: User buffer containing PEL enable data
1626  * @data_out_sz: length of the user buffer.
1627  *
1628  * This function is the handler for PEL enable driver IOCTL.
1629  * Validates the application given class and locale and if
1630  * requires aborts the existing PEL wait request and/or issues
1631  * new PEL wait request to the firmware and returns.
1632  *
1633  * Return: 0 on success and proper error codes on failure.
1634  */
1635 static long
mpi3mr_pel_enable(struct mpi3mr_softc * sc,void * data_out_buf,U32 data_out_sz)1636 mpi3mr_pel_enable(struct mpi3mr_softc *sc,
1637 		  void *data_out_buf, U32 data_out_sz)
1638 {
1639 	long rval = EINVAL;
1640 	U8 tmp_class;
1641 	U16 tmp_locale;
1642 	struct mpi3mr_ioctl_pel_enable pel_enable;
1643 	mpi3mr_dprint(sc, MPI3MR_TRACE, "%s() line: %d\n", __func__, __LINE__);
1644 
1645 
1646 	if ((data_out_sz != sizeof(pel_enable) ||
1647 	    (pel_enable.pel_class > MPI3_PEL_CLASS_FAULT))) {
1648 		printf(IOCNAME "%s: Invalid user pel_enable buffer size %u\n",
1649 		       sc->name, __func__, data_out_sz);
1650 		goto out;
1651 	}
1652 	memset(&pel_enable, 0, sizeof(pel_enable));
1653 	if (copyin(data_out_buf, &pel_enable, sizeof(pel_enable))) {
1654 		printf(IOCNAME "failure at %s() line:%d\n", sc->name,
1655 		       __func__, __LINE__);
1656 		rval = EFAULT;
1657 		goto out;
1658 	}
1659 	if (pel_enable.pel_class > MPI3_PEL_CLASS_FAULT) {
1660 		printf(IOCNAME "%s: out of range  class %d\n",
1661 		       sc->name, __func__, pel_enable.pel_class);
1662 		goto out;
1663 	}
1664 
1665 	if (sc->pel_wait_pend) {
1666 		if ((sc->pel_class <= pel_enable.pel_class) &&
1667 		    !((sc->pel_locale & pel_enable.pel_locale) ^
1668 		      pel_enable.pel_locale)) {
1669 			rval = 0;
1670 			goto out;
1671 		} else {
1672 			pel_enable.pel_locale |= sc->pel_locale;
1673 			if (sc->pel_class < pel_enable.pel_class)
1674 				pel_enable.pel_class = sc->pel_class;
1675 
1676 			if (mpi3mr_pel_abort(sc)) {
1677 				printf(IOCNAME "%s: pel_abort failed, status(%ld)\n",
1678 				       sc->name, __func__, rval);
1679 				goto out;
1680 			}
1681 		}
1682 	}
1683 
1684 	tmp_class = sc->pel_class;
1685 	tmp_locale = sc->pel_locale;
1686 	sc->pel_class = pel_enable.pel_class;
1687 	sc->pel_locale = pel_enable.pel_locale;
1688 	sc->pel_wait_pend = 1;
1689 
1690 	if ((rval = mpi3mr_pel_getseq(sc))) {
1691 		sc->pel_class = tmp_class;
1692 		sc->pel_locale = tmp_locale;
1693 		sc->pel_wait_pend = 0;
1694 		printf(IOCNAME "%s: pel get sequence number failed, status(%ld)\n",
1695 		       sc->name, __func__, rval);
1696 	}
1697 
1698 out:
1699 	return rval;
1700 }
1701 
1702 void
mpi3mr_app_save_logdata(struct mpi3mr_softc * sc,char * event_data,U16 event_data_size)1703 mpi3mr_app_save_logdata(struct mpi3mr_softc *sc, char *event_data,
1704 			U16 event_data_size)
1705 {
1706 	struct mpi3mr_log_data_entry *entry;
1707 	U32 index = sc->log_data_buffer_index, sz;
1708 
1709 	if (!(sc->log_data_buffer))
1710 		return;
1711 
1712 	entry = (struct mpi3mr_log_data_entry *)
1713 		(sc->log_data_buffer + (index * sc->log_data_entry_size));
1714 	entry->valid_entry = 1;
1715 	sz = min(sc->log_data_entry_size, event_data_size);
1716 	memcpy(entry->data, event_data, sz);
1717 	sc->log_data_buffer_index =
1718 		((++index) % MPI3MR_IOCTL_LOGDATA_MAX_ENTRIES);
1719 	mpi3mr_app_send_aen(sc);
1720 }
1721 
1722 /**
1723  * mpi3mr_get_logdata - Handler for get log data
1724  * @sc: Adapter instance reference
1725  * @data_in_buf: User buffer to copy the logdata entries
1726  * @data_in_sz: length of the user buffer.
1727  *
1728  * This function copies the log data entries to the user buffer
1729  * when log caching is enabled in the driver.
1730  *
1731  * Return: 0 on success and proper error codes on failure
1732  */
1733 static long
mpi3mr_get_logdata(struct mpi3mr_softc * sc,void * data_in_buf,U32 data_in_sz)1734 mpi3mr_get_logdata(struct mpi3mr_softc *sc,
1735 		   void *data_in_buf, U32 data_in_sz)
1736 {
1737 	long rval = EINVAL;
1738 	U16 num_entries = 0;
1739 	U16 entry_sz = sc->log_data_entry_size;
1740 
1741 	if ((!sc->log_data_buffer) || (data_in_sz < entry_sz))
1742 		return rval;
1743 
1744 	num_entries = data_in_sz / entry_sz;
1745 	if (num_entries > MPI3MR_IOCTL_LOGDATA_MAX_ENTRIES)
1746 		num_entries = MPI3MR_IOCTL_LOGDATA_MAX_ENTRIES;
1747 
1748         if ((rval = copyout(sc->log_data_buffer, data_in_buf, (num_entries * entry_sz)))) {
1749 		printf(IOCNAME "%s: copy to user failed\n", sc->name, __func__);
1750 		rval = EFAULT;
1751 	}
1752 
1753 	return rval;
1754 }
1755 
1756 /**
1757  * mpi3mr_logdata_enable - Handler for log data enable
1758  * @sc: Adapter instance reference
1759  * @data_in_buf: User buffer to copy the max logdata entry count
1760  * @data_in_sz: length of the user buffer.
1761  *
1762  * This function enables log data caching in the driver if not
1763  * already enabled and return the maximum number of log data
1764  * entries that can be cached in the driver.
1765  *
1766  * Return: 0 on success and proper error codes on failure
1767  */
1768 static long
mpi3mr_logdata_enable(struct mpi3mr_softc * sc,void * data_in_buf,U32 data_in_sz)1769 mpi3mr_logdata_enable(struct mpi3mr_softc *sc,
1770 		      void *data_in_buf, U32 data_in_sz)
1771 {
1772 	long rval = EINVAL;
1773 	struct mpi3mr_ioctl_logdata_enable logdata_enable;
1774 
1775 	if (data_in_sz < sizeof(logdata_enable))
1776 		return rval;
1777 
1778 	if (sc->log_data_buffer)
1779 		goto copy_data;
1780 
1781 	sc->log_data_entry_size = (sc->reply_sz - (sizeof(Mpi3EventNotificationReply_t) - 4))
1782 				   + MPI3MR_IOCTL_LOGDATA_ENTRY_HEADER_SZ;
1783 
1784 	sc->log_data_buffer = malloc((MPI3MR_IOCTL_LOGDATA_MAX_ENTRIES * sc->log_data_entry_size),
1785 				     M_MPI3MR, M_NOWAIT | M_ZERO);
1786 	if (!sc->log_data_buffer) {
1787 		printf(IOCNAME "%s log data buffer memory allocation failed\n", sc->name, __func__);
1788 		return ENOMEM;
1789 	}
1790 
1791 	sc->log_data_buffer_index = 0;
1792 
1793 copy_data:
1794 	memset(&logdata_enable, 0, sizeof(logdata_enable));
1795 	logdata_enable.max_entries = MPI3MR_IOCTL_LOGDATA_MAX_ENTRIES;
1796 
1797         if ((rval = copyout(&logdata_enable, data_in_buf, sizeof(logdata_enable)))) {
1798 		printf(IOCNAME "%s: copy to user failed\n", sc->name, __func__);
1799 		rval = EFAULT;
1800 	}
1801 
1802 	return rval;
1803 }
1804 
1805 /**
1806  * mpi3mr_get_change_count - Get topology change count
1807  * @sc: Adapter instance reference
1808  * @data_in_buf: User buffer to copy the change count
1809  * @data_in_sz: length of the user buffer.
1810  *
1811  * This function copies the toplogy change count provided by the
1812  * driver in events and cached in the driver to the user
1813  * provided buffer for the specific controller.
1814  *
1815  * Return: 0 on success and proper error codes on failure
1816  */
1817 static long
mpi3mr_get_change_count(struct mpi3mr_softc * sc,void * data_in_buf,U32 data_in_sz)1818 mpi3mr_get_change_count(struct mpi3mr_softc *sc,
1819 			void *data_in_buf, U32 data_in_sz)
1820 {
1821         long rval = EINVAL;
1822         struct mpi3mr_ioctl_chgcnt chg_count;
1823         memset(&chg_count, 0, sizeof(chg_count));
1824 
1825         chg_count.change_count = sc->change_count;
1826         if (data_in_sz >= sizeof(chg_count)) {
1827                 if ((rval = copyout(&chg_count, data_in_buf, sizeof(chg_count)))) {
1828                         printf(IOCNAME "failure at %s:%d/%s()!\n", sc->name, __FILE__,
1829 			       __LINE__, __func__);
1830                         rval = EFAULT;
1831                 }
1832         }
1833         return rval;
1834 }
1835 
1836 /**
1837  * mpi3mr_get_alltgtinfo - Get all targets information
1838  * @sc: Adapter instance reference
1839  * @data_in_buf: User buffer to copy the target information
1840  * @data_in_sz: length of the user buffer.
1841  *
1842  * This function copies the driver managed target devices device
1843  * handle, persistent ID, bus ID and taret ID to the user
1844  * provided buffer for the specific controller. This function
1845  * also provides the number of devices managed by the driver for
1846  * the specific controller.
1847  *
1848  * Return: 0 on success and proper error codes on failure
1849  */
1850 static long
mpi3mr_get_alltgtinfo(struct mpi3mr_softc * sc,void * data_in_buf,U32 data_in_sz)1851 mpi3mr_get_alltgtinfo(struct mpi3mr_softc *sc,
1852 		      void *data_in_buf, U32 data_in_sz)
1853 {
1854 	long rval = EINVAL;
1855         U8 get_count = 0;
1856 	U16 i = 0, num_devices = 0;
1857         U32 min_entrylen = 0, kern_entrylen = 0, user_entrylen = 0;
1858 	struct mpi3mr_target *tgtdev = NULL;
1859         struct mpi3mr_device_map_info *devmap_info = NULL;
1860 	struct mpi3mr_cam_softc *cam_sc = sc->cam_sc;
1861         struct mpi3mr_ioctl_all_tgtinfo *all_tgtinfo = (struct mpi3mr_ioctl_all_tgtinfo *)data_in_buf;
1862 
1863         if (data_in_sz < sizeof(uint32_t)) {
1864                 printf(IOCNAME "failure at %s:%d/%s()!\n", sc->name, __FILE__,
1865 		       __LINE__, __func__);
1866                 goto out;
1867         }
1868         if (data_in_sz == sizeof(uint32_t))
1869                 get_count = 1;
1870 
1871 	if (TAILQ_EMPTY(&cam_sc->tgt_list)) {
1872                 get_count = 1;
1873                 goto copy_usrbuf;
1874 	}
1875 
1876 	mtx_lock_spin(&cam_sc->sc->target_lock);
1877 	TAILQ_FOREACH(tgtdev, &cam_sc->tgt_list, tgt_next) {
1878 		num_devices++;
1879 	}
1880 	mtx_unlock_spin(&cam_sc->sc->target_lock);
1881 
1882         if (get_count)
1883                 goto copy_usrbuf;
1884 
1885         kern_entrylen = num_devices * sizeof(*devmap_info);
1886 
1887 	devmap_info = malloc(kern_entrylen, M_MPI3MR, M_NOWAIT | M_ZERO);
1888         if (!devmap_info) {
1889                 printf(IOCNAME "failure at %s:%d/%s()!\n", sc->name, __FILE__,
1890 		       __LINE__, __func__);
1891                 rval = ENOMEM;
1892                 goto out;
1893         }
1894         memset((U8*)devmap_info, 0xFF, kern_entrylen);
1895 
1896 	mtx_lock_spin(&cam_sc->sc->target_lock);
1897 	TAILQ_FOREACH(tgtdev, &cam_sc->tgt_list, tgt_next) {
1898                 if (i < num_devices) {
1899                         devmap_info[i].handle = tgtdev->dev_handle;
1900                         devmap_info[i].per_id = tgtdev->per_id;
1901 			/*n
1902 			 *  For hidden/ugood device the target_id and bus_id should be 0xFFFFFFFF and 0xFF
1903 			 */
1904 			if (!tgtdev->exposed_to_os) {
1905                                 devmap_info[i].target_id = 0xFFFFFFFF;
1906                                 devmap_info[i].bus_id = 0xFF;
1907                         } else {
1908                                 devmap_info[i].target_id = tgtdev->tid;
1909                                 devmap_info[i].bus_id = 0;
1910 			}
1911                         i++;
1912                 }
1913         }
1914         num_devices = i;
1915 	mtx_unlock_spin(&cam_sc->sc->target_lock);
1916 
1917 copy_usrbuf:
1918         if (copyout(&num_devices, &all_tgtinfo->num_devices, sizeof(num_devices))) {
1919                 printf(IOCNAME "failure at %s:%d/%s()!\n", sc->name, __FILE__,
1920 		       __LINE__, __func__);
1921                 rval = EFAULT;
1922                 goto out;
1923         }
1924         user_entrylen = (data_in_sz - sizeof(uint32_t))/sizeof(*devmap_info);
1925         user_entrylen *= sizeof(*devmap_info);
1926         min_entrylen = min(user_entrylen, kern_entrylen);
1927         if (min_entrylen && (copyout(devmap_info, &all_tgtinfo->dmi, min_entrylen))) {
1928                 printf(IOCNAME "failure at %s:%d/%s()!\n", sc->name,
1929 		       __FILE__, __LINE__, __func__);
1930                 rval = EFAULT;
1931                 goto out;
1932         }
1933 	rval = 0;
1934 out:
1935         if (devmap_info)
1936                 free(devmap_info, M_MPI3MR);
1937 
1938         return rval;
1939 }
1940 
1941 /**
1942  * mpi3mr_get_tgtinfo - Get specific target information
1943  * @sc: Adapter instance reference
1944  * @karg: driver ponter to users payload buffer
1945  *
1946  * This function copies the driver managed specific target device
1947  * info like handle, persistent ID, bus ID and taret ID to the user
1948  * provided buffer for the specific controller.
1949  *
1950  * Return: 0 on success and proper error codes on failure
1951  */
1952 static long
mpi3mr_get_tgtinfo(struct mpi3mr_softc * sc,struct mpi3mr_ioctl_drvcmd * karg)1953 mpi3mr_get_tgtinfo(struct mpi3mr_softc *sc,
1954 		   struct mpi3mr_ioctl_drvcmd *karg)
1955 {
1956 	long rval = EINVAL;
1957 	struct mpi3mr_target *tgtdev = NULL;
1958 	struct mpi3mr_ioctl_tgtinfo tgtinfo;
1959 
1960 	memset(&tgtinfo, 0, sizeof(tgtinfo));
1961 
1962 	if ((karg->data_out_size != sizeof(struct mpi3mr_ioctl_tgtinfo)) ||
1963 	    (karg->data_in_size != sizeof(struct mpi3mr_ioctl_tgtinfo))) {
1964 		printf(IOCNAME "Invalid user tgtinfo buffer size %s() line: %d\n", sc->name,
1965 		       __func__, __LINE__);
1966 		goto out;
1967 	}
1968 
1969 	if (copyin(karg->data_out_buf, &tgtinfo, sizeof(tgtinfo))) {
1970 		printf(IOCNAME "failure at %s() line:%d\n", sc->name,
1971 		       __func__, __LINE__);
1972 		rval = EFAULT;
1973 		goto out;
1974 	}
1975 
1976 	if ((tgtinfo.bus_id != 0xFF) && (tgtinfo.target_id != 0xFFFFFFFF)) {
1977 		if ((tgtinfo.persistent_id != 0xFFFF) ||
1978 		    (tgtinfo.dev_handle != 0xFFFF))
1979 			goto out;
1980 		tgtdev = mpi3mr_find_target_by_per_id(sc->cam_sc, tgtinfo.target_id);
1981 	} else if (tgtinfo.persistent_id != 0xFFFF) {
1982 		if ((tgtinfo.bus_id != 0xFF) ||
1983 		    (tgtinfo.dev_handle !=0xFFFF) ||
1984 		    (tgtinfo.target_id != 0xFFFFFFFF))
1985 			goto out;
1986 		tgtdev = mpi3mr_find_target_by_per_id(sc->cam_sc, tgtinfo.persistent_id);
1987 	} else if (tgtinfo.dev_handle !=0xFFFF) {
1988 		if ((tgtinfo.bus_id != 0xFF) ||
1989 		    (tgtinfo.target_id != 0xFFFFFFFF) ||
1990 		    (tgtinfo.persistent_id != 0xFFFF))
1991 			goto out;
1992 		tgtdev = mpi3mr_find_target_by_dev_handle(sc->cam_sc, tgtinfo.dev_handle);
1993 	}
1994 	if (!tgtdev)
1995 		goto out;
1996 
1997 	tgtinfo.target_id = tgtdev->per_id;
1998 	tgtinfo.bus_id = 0;
1999 	tgtinfo.dev_handle = tgtdev->dev_handle;
2000 	tgtinfo.persistent_id = tgtdev->per_id;
2001 	tgtinfo.seq_num = 0;
2002 
2003 	if (copyout(&tgtinfo, karg->data_in_buf, sizeof(tgtinfo))) {
2004 		printf(IOCNAME "failure at %s() line:%d\n", sc->name,
2005 		       __func__, __LINE__);
2006 		rval = EFAULT;
2007 	}
2008 
2009 out:
2010 	return rval;
2011 }
2012 
2013 /**
2014  * mpi3mr_get_pciinfo - Get PCI info IOCTL handler
2015  * @sc: Adapter instance reference
2016  * @data_in_buf: User buffer to hold adapter information
2017  * @data_in_sz: length of the user buffer.
2018  *
2019  * This function provides the PCI spec information for the
2020  * given controller
2021  *
2022  * Return: 0 on success and proper error codes on failure
2023  */
2024 static long
mpi3mr_get_pciinfo(struct mpi3mr_softc * sc,void * data_in_buf,U32 data_in_sz)2025 mpi3mr_get_pciinfo(struct mpi3mr_softc *sc,
2026 		   void *data_in_buf, U32 data_in_sz)
2027 {
2028 	long rval = EINVAL;
2029 	U8 i;
2030 	struct mpi3mr_ioctl_pciinfo pciinfo;
2031 	memset(&pciinfo, 0, sizeof(pciinfo));
2032 
2033 	for (i = 0; i < 64; i++)
2034 		pciinfo.config_space[i] = pci_read_config(sc->mpi3mr_dev, (i * 4), 4);
2035 
2036 	if (data_in_sz >= sizeof(pciinfo)) {
2037 		if ((rval = copyout(&pciinfo, data_in_buf, sizeof(pciinfo)))) {
2038 			printf(IOCNAME "failure at %s:%d/%s()!\n", sc->name,
2039 			       __FILE__, __LINE__, __func__);
2040 			rval = EFAULT;
2041 		}
2042 	}
2043 	return rval;
2044 }
2045 
2046 /**
2047  * mpi3mr_get_adpinfo - Get adapter info IOCTL handler
2048  * @sc: Adapter instance reference
2049  * @data_in_buf: User buffer to hold adapter information
2050  * @data_in_sz: length of the user buffer.
2051  *
2052  * This function provides adapter information for the given
2053  * controller
2054  *
2055  * Return: 0 on success and proper error codes on failure
2056  */
2057 static long
mpi3mr_get_adpinfo(struct mpi3mr_softc * sc,void * data_in_buf,U32 data_in_sz)2058 mpi3mr_get_adpinfo(struct mpi3mr_softc *sc,
2059 		   void *data_in_buf, U32 data_in_sz)
2060 {
2061 	long rval = EINVAL;
2062 	struct mpi3mr_ioctl_adpinfo adpinfo;
2063 	enum mpi3mr_iocstate ioc_state;
2064 	memset(&adpinfo, 0, sizeof(adpinfo));
2065 
2066 	adpinfo.adp_type = MPI3MR_IOCTL_ADPTYPE_AVGFAMILY;
2067 	adpinfo.pci_dev_id = pci_get_device(sc->mpi3mr_dev);
2068 	adpinfo.pci_dev_hw_rev = pci_read_config(sc->mpi3mr_dev, PCIR_REVID, 1);
2069 	adpinfo.pci_subsys_dev_id = pci_get_subdevice(sc->mpi3mr_dev);
2070 	adpinfo.pci_subsys_ven_id = pci_get_subvendor(sc->mpi3mr_dev);
2071 	adpinfo.pci_bus = pci_get_bus(sc->mpi3mr_dev);
2072 	adpinfo.pci_dev = pci_get_slot(sc->mpi3mr_dev);
2073 	adpinfo.pci_func = pci_get_function(sc->mpi3mr_dev);
2074 	adpinfo.pci_seg_id = pci_get_domain(sc->mpi3mr_dev);
2075 	adpinfo.ioctl_ver = MPI3MR_IOCTL_VERSION;
2076 	memcpy((U8 *)&adpinfo.driver_info, (U8 *)&sc->driver_info, sizeof(adpinfo.driver_info));
2077 
2078 	ioc_state = mpi3mr_get_iocstate(sc);
2079 
2080 	if (ioc_state == MRIOC_STATE_UNRECOVERABLE)
2081 		adpinfo.adp_state = MPI3MR_IOCTL_ADP_STATE_UNRECOVERABLE;
2082 	else if (sc->reset_in_progress || sc->block_ioctls)
2083 		adpinfo.adp_state = MPI3MR_IOCTL_ADP_STATE_IN_RESET;
2084 	else if (ioc_state == MRIOC_STATE_FAULT)
2085 		adpinfo.adp_state = MPI3MR_IOCTL_ADP_STATE_FAULT;
2086 	else
2087 		adpinfo.adp_state = MPI3MR_IOCTL_ADP_STATE_OPERATIONAL;
2088 
2089 	if (data_in_sz >= sizeof(adpinfo)) {
2090 		if ((rval = copyout(&adpinfo, data_in_buf, sizeof(adpinfo)))) {
2091 			printf(IOCNAME "failure at %s:%d/%s()!\n", sc->name,
2092 			       __FILE__, __LINE__, __func__);
2093 			rval = EFAULT;
2094 		}
2095 	}
2096 	return rval;
2097 }
2098 /**
2099  * mpi3mr_app_drvrcmds - Driver IOCTL handler
2100  * @dev: char device
2101  * @cmd: IOCTL command
2102  * @arg: User data payload buffer for the IOCTL
2103  * @flag: flags
2104  * @thread: threads
2105  *
2106  * This function is the top level handler for driver commands,
2107  * this does basic validation of the buffer and identifies the
2108  * opcode and switches to correct sub handler.
2109  *
2110  * Return: 0 on success and proper error codes on failure
2111  */
2112 
2113 static int
mpi3mr_app_drvrcmds(struct cdev * dev,u_long cmd,void * uarg,int flag,struct thread * td)2114 mpi3mr_app_drvrcmds(struct cdev *dev, u_long cmd,
2115 		    void *uarg, int flag, struct thread *td)
2116 {
2117 	long rval = EINVAL;
2118 	struct mpi3mr_softc *sc = NULL;
2119 	struct mpi3mr_ioctl_drvcmd *karg = (struct mpi3mr_ioctl_drvcmd *)uarg;
2120 
2121 	sc = mpi3mr_app_get_adp_instance(karg->mrioc_id);
2122 	if (!sc)
2123 		return ENODEV;
2124 
2125 	mtx_lock(&sc->ioctl_cmds.completion.lock);
2126 	switch (karg->opcode) {
2127 	case MPI3MR_DRVRIOCTL_OPCODE_ADPINFO:
2128 		rval = mpi3mr_get_adpinfo(sc, karg->data_in_buf, karg->data_in_size);
2129 		break;
2130 	case MPI3MR_DRVRIOCTL_OPCODE_GETPCIINFO:
2131 		rval = mpi3mr_get_pciinfo(sc, karg->data_in_buf, karg->data_in_size);
2132 		break;
2133 	case MPI3MR_DRVRIOCTL_OPCODE_TGTDEVINFO:
2134 		rval = mpi3mr_get_tgtinfo(sc, karg);
2135 		break;
2136 	case MPI3MR_DRVRIOCTL_OPCODE_ALLTGTDEVINFO:
2137                 rval = mpi3mr_get_alltgtinfo(sc, karg->data_in_buf, karg->data_in_size);
2138                 break;
2139         case MPI3MR_DRVRIOCTL_OPCODE_GETCHGCNT:
2140                 rval = mpi3mr_get_change_count(sc, karg->data_in_buf, karg->data_in_size);
2141                 break;
2142 	case MPI3MR_DRVRIOCTL_OPCODE_LOGDATAENABLE:
2143 		rval = mpi3mr_logdata_enable(sc, karg->data_in_buf, karg->data_in_size);
2144 		break;
2145 	case MPI3MR_DRVRIOCTL_OPCODE_GETLOGDATA:
2146 		rval = mpi3mr_get_logdata(sc, karg->data_in_buf, karg->data_in_size);
2147 		break;
2148 	case MPI3MR_DRVRIOCTL_OPCODE_PELENABLE:
2149 		rval = mpi3mr_pel_enable(sc, karg->data_out_buf, karg->data_out_size);
2150 		break;
2151 	case MPI3MR_DRVRIOCTL_OPCODE_ADPRESET:
2152 		rval = mpi3mr_adp_reset(sc, karg->data_out_buf, karg->data_out_size);
2153 		break;
2154 	case MPI3MR_DRVRIOCTL_OPCODE_UNKNOWN:
2155 	default:
2156 		printf("Unsupported drvr ioctl opcode 0x%x\n", karg->opcode);
2157 		break;
2158 	}
2159 	mtx_unlock(&sc->ioctl_cmds.completion.lock);
2160 	return rval;
2161 }
2162 /**
2163  * mpi3mr_ioctl - IOCTL Handler
2164  * @dev: char device
2165  * @cmd: IOCTL command
2166  * @arg: User data payload buffer for the IOCTL
2167  * @flag: flags
2168  * @thread: threads
2169  *
2170  * This is the IOCTL entry point which checks the command type and
2171  * executes proper sub handler specific for the command.
2172  *
2173  * Return: 0 on success and proper error codes on failure
2174  */
2175 static int
mpi3mr_ioctl(struct cdev * dev,u_long cmd,caddr_t arg,int flag,struct thread * td)2176 mpi3mr_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
2177 {
2178 	int rval = EINVAL;
2179 
2180 	struct mpi3mr_softc *sc = NULL;
2181 	struct mpi3mr_ioctl_drvcmd *karg = (struct mpi3mr_ioctl_drvcmd *)arg;
2182 
2183 	sc = mpi3mr_app_get_adp_instance(karg->mrioc_id);
2184 
2185 	if (!sc)
2186 		return ENODEV;
2187 
2188 	mpi3mr_atomic_inc(&sc->pend_ioctls);
2189 
2190 
2191 	if (sc->mpi3mr_flags & MPI3MR_FLAGS_SHUTDOWN) {
2192 		mpi3mr_dprint(sc, MPI3MR_INFO,
2193 			"Return back IOCTL, shutdown is in progress\n");
2194 		mpi3mr_atomic_dec(&sc->pend_ioctls);
2195 		return ENODEV;
2196 	}
2197 
2198 	switch (cmd) {
2199 	case MPI3MRDRVCMD:
2200 		rval = mpi3mr_app_drvrcmds(dev, cmd, arg, flag, td);
2201 		break;
2202 	case MPI3MRMPTCMD:
2203 		mtx_lock(&sc->ioctl_cmds.completion.lock);
2204 		rval = mpi3mr_app_mptcmds(dev, cmd, arg, flag, td);
2205 		mtx_unlock(&sc->ioctl_cmds.completion.lock);
2206 		break;
2207 	default:
2208 		printf("%s:Unsupported ioctl cmd (0x%08lx)\n", MPI3MR_DRIVER_NAME, cmd);
2209 		break;
2210 	}
2211 
2212 	mpi3mr_atomic_dec(&sc->pend_ioctls);
2213 
2214 	return rval;
2215 }
2216