1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (C) 2012-2013 Intel Corporation
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include "nvme_private.h"
33
34 void
nvme_ctrlr_cmd_identify_controller(struct nvme_controller * ctrlr,void * payload,nvme_cb_fn_t cb_fn,void * cb_arg)35 nvme_ctrlr_cmd_identify_controller(struct nvme_controller *ctrlr, void *payload,
36 nvme_cb_fn_t cb_fn, void *cb_arg)
37 {
38 struct nvme_request *req;
39 struct nvme_command *cmd;
40
41 req = nvme_allocate_request_vaddr(payload,
42 sizeof(struct nvme_controller_data), cb_fn, cb_arg);
43
44 cmd = &req->cmd;
45 cmd->opc = NVME_OPC_IDENTIFY;
46
47 /*
48 * TODO: create an identify command data structure, which
49 * includes this CNS bit in cdw10.
50 */
51 cmd->cdw10 = htole32(1);
52
53 nvme_ctrlr_submit_admin_request(ctrlr, req);
54 }
55
56 void
nvme_ctrlr_cmd_identify_namespace(struct nvme_controller * ctrlr,uint32_t nsid,void * payload,nvme_cb_fn_t cb_fn,void * cb_arg)57 nvme_ctrlr_cmd_identify_namespace(struct nvme_controller *ctrlr, uint32_t nsid,
58 void *payload, nvme_cb_fn_t cb_fn, void *cb_arg)
59 {
60 struct nvme_request *req;
61 struct nvme_command *cmd;
62
63 req = nvme_allocate_request_vaddr(payload,
64 sizeof(struct nvme_namespace_data), cb_fn, cb_arg);
65
66 cmd = &req->cmd;
67 cmd->opc = NVME_OPC_IDENTIFY;
68
69 /*
70 * TODO: create an identify command data structure
71 */
72 cmd->nsid = htole32(nsid);
73
74 nvme_ctrlr_submit_admin_request(ctrlr, req);
75 }
76
77 void
nvme_ctrlr_cmd_create_io_cq(struct nvme_controller * ctrlr,struct nvme_qpair * io_que,uint16_t vector,nvme_cb_fn_t cb_fn,void * cb_arg)78 nvme_ctrlr_cmd_create_io_cq(struct nvme_controller *ctrlr,
79 struct nvme_qpair *io_que, uint16_t vector, nvme_cb_fn_t cb_fn,
80 void *cb_arg)
81 {
82 struct nvme_request *req;
83 struct nvme_command *cmd;
84
85 req = nvme_allocate_request_null(cb_fn, cb_arg);
86
87 cmd = &req->cmd;
88 cmd->opc = NVME_OPC_CREATE_IO_CQ;
89
90 /*
91 * TODO: create a create io completion queue command data
92 * structure.
93 */
94 cmd->cdw10 = htole32(((io_que->num_entries-1) << 16) | io_que->id);
95 /* 0x3 = interrupts enabled | physically contiguous */
96 cmd->cdw11 = htole32((vector << 16) | 0x3);
97 cmd->prp1 = htole64(io_que->cpl_bus_addr);
98
99 nvme_ctrlr_submit_admin_request(ctrlr, req);
100 }
101
102 void
nvme_ctrlr_cmd_create_io_sq(struct nvme_controller * ctrlr,struct nvme_qpair * io_que,nvme_cb_fn_t cb_fn,void * cb_arg)103 nvme_ctrlr_cmd_create_io_sq(struct nvme_controller *ctrlr,
104 struct nvme_qpair *io_que, nvme_cb_fn_t cb_fn, void *cb_arg)
105 {
106 struct nvme_request *req;
107 struct nvme_command *cmd;
108
109 req = nvme_allocate_request_null(cb_fn, cb_arg);
110
111 cmd = &req->cmd;
112 cmd->opc = NVME_OPC_CREATE_IO_SQ;
113
114 /*
115 * TODO: create a create io submission queue command data
116 * structure.
117 */
118 cmd->cdw10 = htole32(((io_que->num_entries-1) << 16) | io_que->id);
119 /* 0x1 = physically contiguous */
120 cmd->cdw11 = htole32((io_que->id << 16) | 0x1);
121 cmd->prp1 = htole64(io_que->cmd_bus_addr);
122
123 nvme_ctrlr_submit_admin_request(ctrlr, req);
124 }
125
126 void
nvme_ctrlr_cmd_delete_io_cq(struct nvme_controller * ctrlr,struct nvme_qpair * io_que,nvme_cb_fn_t cb_fn,void * cb_arg)127 nvme_ctrlr_cmd_delete_io_cq(struct nvme_controller *ctrlr,
128 struct nvme_qpair *io_que, nvme_cb_fn_t cb_fn, void *cb_arg)
129 {
130 struct nvme_request *req;
131 struct nvme_command *cmd;
132
133 req = nvme_allocate_request_null(cb_fn, cb_arg);
134
135 cmd = &req->cmd;
136 cmd->opc = NVME_OPC_DELETE_IO_CQ;
137
138 /*
139 * TODO: create a delete io completion queue command data
140 * structure.
141 */
142 cmd->cdw10 = htole32(io_que->id);
143
144 nvme_ctrlr_submit_admin_request(ctrlr, req);
145 }
146
147 void
nvme_ctrlr_cmd_delete_io_sq(struct nvme_controller * ctrlr,struct nvme_qpair * io_que,nvme_cb_fn_t cb_fn,void * cb_arg)148 nvme_ctrlr_cmd_delete_io_sq(struct nvme_controller *ctrlr,
149 struct nvme_qpair *io_que, nvme_cb_fn_t cb_fn, void *cb_arg)
150 {
151 struct nvme_request *req;
152 struct nvme_command *cmd;
153
154 req = nvme_allocate_request_null(cb_fn, cb_arg);
155
156 cmd = &req->cmd;
157 cmd->opc = NVME_OPC_DELETE_IO_SQ;
158
159 /*
160 * TODO: create a delete io submission queue command data
161 * structure.
162 */
163 cmd->cdw10 = htole32(io_que->id);
164
165 nvme_ctrlr_submit_admin_request(ctrlr, req);
166 }
167
168 void
nvme_ctrlr_cmd_set_feature(struct nvme_controller * ctrlr,uint8_t feature,uint32_t cdw11,void * payload,uint32_t payload_size,nvme_cb_fn_t cb_fn,void * cb_arg)169 nvme_ctrlr_cmd_set_feature(struct nvme_controller *ctrlr, uint8_t feature,
170 uint32_t cdw11, void *payload, uint32_t payload_size,
171 nvme_cb_fn_t cb_fn, void *cb_arg)
172 {
173 struct nvme_request *req;
174 struct nvme_command *cmd;
175
176 req = nvme_allocate_request_null(cb_fn, cb_arg);
177
178 cmd = &req->cmd;
179 cmd->opc = NVME_OPC_SET_FEATURES;
180 cmd->cdw10 = htole32(feature);
181 cmd->cdw11 = htole32(cdw11);
182
183 nvme_ctrlr_submit_admin_request(ctrlr, req);
184 }
185
186 void
nvme_ctrlr_cmd_get_feature(struct nvme_controller * ctrlr,uint8_t feature,uint32_t cdw11,void * payload,uint32_t payload_size,nvme_cb_fn_t cb_fn,void * cb_arg)187 nvme_ctrlr_cmd_get_feature(struct nvme_controller *ctrlr, uint8_t feature,
188 uint32_t cdw11, void *payload, uint32_t payload_size,
189 nvme_cb_fn_t cb_fn, void *cb_arg)
190 {
191 struct nvme_request *req;
192 struct nvme_command *cmd;
193
194 req = nvme_allocate_request_null(cb_fn, cb_arg);
195
196 cmd = &req->cmd;
197 cmd->opc = NVME_OPC_GET_FEATURES;
198 cmd->cdw10 = htole32(feature);
199 cmd->cdw11 = htole32(cdw11);
200
201 nvme_ctrlr_submit_admin_request(ctrlr, req);
202 }
203
204 void
nvme_ctrlr_cmd_set_num_queues(struct nvme_controller * ctrlr,uint32_t num_queues,nvme_cb_fn_t cb_fn,void * cb_arg)205 nvme_ctrlr_cmd_set_num_queues(struct nvme_controller *ctrlr,
206 uint32_t num_queues, nvme_cb_fn_t cb_fn, void *cb_arg)
207 {
208 uint32_t cdw11;
209
210 cdw11 = ((num_queues - 1) << 16) | (num_queues - 1);
211 nvme_ctrlr_cmd_set_feature(ctrlr, NVME_FEAT_NUMBER_OF_QUEUES, cdw11,
212 NULL, 0, cb_fn, cb_arg);
213 }
214
215 void
nvme_ctrlr_cmd_set_async_event_config(struct nvme_controller * ctrlr,uint32_t state,nvme_cb_fn_t cb_fn,void * cb_arg)216 nvme_ctrlr_cmd_set_async_event_config(struct nvme_controller *ctrlr,
217 uint32_t state, nvme_cb_fn_t cb_fn, void *cb_arg)
218 {
219 uint32_t cdw11;
220
221 cdw11 = state;
222 nvme_ctrlr_cmd_set_feature(ctrlr,
223 NVME_FEAT_ASYNC_EVENT_CONFIGURATION, cdw11, NULL, 0, cb_fn,
224 cb_arg);
225 }
226
227 void
nvme_ctrlr_cmd_set_interrupt_coalescing(struct nvme_controller * ctrlr,uint32_t microseconds,uint32_t threshold,nvme_cb_fn_t cb_fn,void * cb_arg)228 nvme_ctrlr_cmd_set_interrupt_coalescing(struct nvme_controller *ctrlr,
229 uint32_t microseconds, uint32_t threshold, nvme_cb_fn_t cb_fn, void *cb_arg)
230 {
231 uint32_t cdw11;
232
233 if ((microseconds/100) >= 0x100) {
234 nvme_printf(ctrlr, "invalid coal time %d, disabling\n",
235 microseconds);
236 microseconds = 0;
237 threshold = 0;
238 }
239
240 if (threshold >= 0x100) {
241 nvme_printf(ctrlr, "invalid threshold %d, disabling\n",
242 threshold);
243 threshold = 0;
244 microseconds = 0;
245 }
246
247 cdw11 = ((microseconds/100) << 8) | threshold;
248 nvme_ctrlr_cmd_set_feature(ctrlr, NVME_FEAT_INTERRUPT_COALESCING, cdw11,
249 NULL, 0, cb_fn, cb_arg);
250 }
251
252 void
nvme_ctrlr_cmd_get_log_page(struct nvme_controller * ctrlr,uint8_t log_page,uint32_t nsid,void * payload,uint32_t payload_size,nvme_cb_fn_t cb_fn,void * cb_arg)253 nvme_ctrlr_cmd_get_log_page(struct nvme_controller *ctrlr, uint8_t log_page,
254 uint32_t nsid, void *payload, uint32_t payload_size, nvme_cb_fn_t cb_fn,
255 void *cb_arg)
256 {
257 struct nvme_request *req;
258 struct nvme_command *cmd;
259
260 req = nvme_allocate_request_vaddr(payload, payload_size, cb_fn, cb_arg);
261
262 cmd = &req->cmd;
263 cmd->opc = NVME_OPC_GET_LOG_PAGE;
264 cmd->nsid = htole32(nsid);
265 cmd->cdw10 = ((payload_size/sizeof(uint32_t)) - 1) << 16;
266 cmd->cdw10 |= log_page;
267 cmd->cdw10 = htole32(cmd->cdw10);
268
269 nvme_ctrlr_submit_admin_request(ctrlr, req);
270 }
271
272 void
nvme_ctrlr_cmd_get_error_page(struct nvme_controller * ctrlr,struct nvme_error_information_entry * payload,uint32_t num_entries,nvme_cb_fn_t cb_fn,void * cb_arg)273 nvme_ctrlr_cmd_get_error_page(struct nvme_controller *ctrlr,
274 struct nvme_error_information_entry *payload, uint32_t num_entries,
275 nvme_cb_fn_t cb_fn, void *cb_arg)
276 {
277
278 KASSERT(num_entries > 0, ("%s called with num_entries==0\n", __func__));
279
280 /* Controller's error log page entries is 0-based. */
281 KASSERT(num_entries <= (ctrlr->cdata.elpe + 1),
282 ("%s called with num_entries=%d but (elpe+1)=%d\n", __func__,
283 num_entries, ctrlr->cdata.elpe + 1));
284
285 if (num_entries > (ctrlr->cdata.elpe + 1))
286 num_entries = ctrlr->cdata.elpe + 1;
287
288 nvme_ctrlr_cmd_get_log_page(ctrlr, NVME_LOG_ERROR,
289 NVME_GLOBAL_NAMESPACE_TAG, payload, sizeof(*payload) * num_entries,
290 cb_fn, cb_arg);
291 }
292
293 void
nvme_ctrlr_cmd_get_health_information_page(struct nvme_controller * ctrlr,uint32_t nsid,struct nvme_health_information_page * payload,nvme_cb_fn_t cb_fn,void * cb_arg)294 nvme_ctrlr_cmd_get_health_information_page(struct nvme_controller *ctrlr,
295 uint32_t nsid, struct nvme_health_information_page *payload,
296 nvme_cb_fn_t cb_fn, void *cb_arg)
297 {
298
299 nvme_ctrlr_cmd_get_log_page(ctrlr, NVME_LOG_HEALTH_INFORMATION,
300 nsid, payload, sizeof(*payload), cb_fn, cb_arg);
301 }
302
303 void
nvme_ctrlr_cmd_get_firmware_page(struct nvme_controller * ctrlr,struct nvme_firmware_page * payload,nvme_cb_fn_t cb_fn,void * cb_arg)304 nvme_ctrlr_cmd_get_firmware_page(struct nvme_controller *ctrlr,
305 struct nvme_firmware_page *payload, nvme_cb_fn_t cb_fn, void *cb_arg)
306 {
307
308 nvme_ctrlr_cmd_get_log_page(ctrlr, NVME_LOG_FIRMWARE_SLOT,
309 NVME_GLOBAL_NAMESPACE_TAG, payload, sizeof(*payload), cb_fn,
310 cb_arg);
311 }
312
313 void
nvme_ctrlr_cmd_abort(struct nvme_controller * ctrlr,uint16_t cid,uint16_t sqid,nvme_cb_fn_t cb_fn,void * cb_arg)314 nvme_ctrlr_cmd_abort(struct nvme_controller *ctrlr, uint16_t cid,
315 uint16_t sqid, nvme_cb_fn_t cb_fn, void *cb_arg)
316 {
317 struct nvme_request *req;
318 struct nvme_command *cmd;
319
320 req = nvme_allocate_request_null(cb_fn, cb_arg);
321
322 cmd = &req->cmd;
323 cmd->opc = NVME_OPC_ABORT;
324 cmd->cdw10 = htole32((cid << 16) | sqid);
325
326 nvme_ctrlr_submit_admin_request(ctrlr, req);
327 }
328