1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (C) 2012-2014 Intel Corporation
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 #include <sys/param.h>
31 #include <sys/bus.h>
32 #include <sys/conf.h>
33 #include <sys/module.h>
34
35 #include <vm/uma.h>
36
37 #include "nvme_private.h"
38
39 struct nvme_consumer {
40 uint32_t id;
41 nvme_cons_ns_fn_t ns_fn;
42 nvme_cons_ctrlr_fn_t ctrlr_fn;
43 nvme_cons_async_fn_t async_fn;
44 nvme_cons_fail_fn_t fail_fn;
45 };
46
47 struct nvme_consumer nvme_consumer[NVME_MAX_CONSUMERS];
48 #define INVALID_CONSUMER_ID 0xFFFF
49
50 int32_t nvme_retry_count;
51
52 MALLOC_DEFINE(M_NVME, "nvme", "nvme(4) memory allocations");
53
54 static void
nvme_init(void)55 nvme_init(void)
56 {
57 uint32_t i;
58
59 for (i = 0; i < NVME_MAX_CONSUMERS; i++)
60 nvme_consumer[i].id = INVALID_CONSUMER_ID;
61 }
62
63 SYSINIT(nvme_register, SI_SUB_DRIVERS, SI_ORDER_SECOND, nvme_init, NULL);
64
65 static void
nvme_uninit(void)66 nvme_uninit(void)
67 {
68 }
69
70 SYSUNINIT(nvme_unregister, SI_SUB_DRIVERS, SI_ORDER_SECOND, nvme_uninit, NULL);
71
72 int
nvme_shutdown(device_t dev)73 nvme_shutdown(device_t dev)
74 {
75 struct nvme_controller *ctrlr;
76
77 ctrlr = DEVICE2SOFTC(dev);
78 nvme_ctrlr_shutdown(ctrlr);
79
80 return (0);
81 }
82
83 int
nvme_attach(device_t dev)84 nvme_attach(device_t dev)
85 {
86 struct nvme_controller *ctrlr = DEVICE2SOFTC(dev);
87 int status;
88
89 status = nvme_ctrlr_construct(ctrlr, dev);
90 if (status != 0) {
91 nvme_ctrlr_destruct(ctrlr, dev);
92 return (status);
93 }
94
95 ctrlr->config_hook.ich_func = nvme_ctrlr_start_config_hook;
96 ctrlr->config_hook.ich_arg = ctrlr;
97
98 if (config_intrhook_establish(&ctrlr->config_hook) != 0)
99 return (ENOMEM);
100
101 return (0);
102 }
103
104 int
nvme_detach(device_t dev)105 nvme_detach(device_t dev)
106 {
107 struct nvme_controller *ctrlr = DEVICE2SOFTC(dev);
108
109 config_intrhook_drain(&ctrlr->config_hook);
110
111 nvme_ctrlr_destruct(ctrlr, dev);
112 return (0);
113 }
114
115 static void
nvme_notify(struct nvme_consumer * cons,struct nvme_controller * ctrlr)116 nvme_notify(struct nvme_consumer *cons,
117 struct nvme_controller *ctrlr)
118 {
119 struct nvme_namespace *ns;
120 void *ctrlr_cookie;
121 int cmpset, ns_idx;
122
123 /*
124 * The consumer may register itself after the nvme devices
125 * have registered with the kernel, but before the
126 * driver has completed initialization. In that case,
127 * return here, and when initialization completes, the
128 * controller will make sure the consumer gets notified.
129 */
130 if (!ctrlr->is_initialized)
131 return;
132
133 cmpset = atomic_cmpset_32(&ctrlr->notification_sent, 0, 1);
134 if (cmpset == 0)
135 return;
136
137 if (cons->ctrlr_fn != NULL)
138 ctrlr_cookie = (*cons->ctrlr_fn)(ctrlr);
139 else
140 ctrlr_cookie = (void *)(uintptr_t)0xdeadc0dedeadc0de;
141 ctrlr->cons_cookie[cons->id] = ctrlr_cookie;
142
143 /* ctrlr_fn has failed. Nothing to notify here any more. */
144 if (ctrlr_cookie == NULL) {
145 (void)atomic_cmpset_32(&ctrlr->notification_sent, 1, 0);
146 return;
147 }
148
149 if (ctrlr->is_failed) {
150 ctrlr->cons_cookie[cons->id] = NULL;
151 if (cons->fail_fn != NULL)
152 (*cons->fail_fn)(ctrlr_cookie);
153 /*
154 * Do not notify consumers about the namespaces of a
155 * failed controller.
156 */
157 return;
158 }
159 for (ns_idx = 0; ns_idx < min(ctrlr->cdata.nn, NVME_MAX_NAMESPACES); ns_idx++) {
160 ns = &ctrlr->ns[ns_idx];
161 if (ns->data.nsze == 0)
162 continue;
163 if (cons->ns_fn != NULL)
164 ns->cons_cookie[cons->id] =
165 (*cons->ns_fn)(ns, ctrlr_cookie);
166 }
167 }
168
169 void
nvme_notify_new_controller(struct nvme_controller * ctrlr)170 nvme_notify_new_controller(struct nvme_controller *ctrlr)
171 {
172 int i;
173
174 for (i = 0; i < NVME_MAX_CONSUMERS; i++) {
175 if (nvme_consumer[i].id != INVALID_CONSUMER_ID) {
176 nvme_notify(&nvme_consumer[i], ctrlr);
177 }
178 }
179 }
180
181 static void
nvme_notify_new_consumer(struct nvme_consumer * cons)182 nvme_notify_new_consumer(struct nvme_consumer *cons)
183 {
184 device_t *devlist;
185 struct nvme_controller *ctrlr;
186 int dev_idx, devcount;
187
188 if (devclass_get_devices(devclass_find("nvme"), &devlist, &devcount))
189 return;
190
191 for (dev_idx = 0; dev_idx < devcount; dev_idx++) {
192 ctrlr = DEVICE2SOFTC(devlist[dev_idx]);
193 nvme_notify(cons, ctrlr);
194 }
195
196 free(devlist, M_TEMP);
197 }
198
199 void
nvme_notify_async_consumers(struct nvme_controller * ctrlr,const struct nvme_completion * async_cpl,uint32_t log_page_id,void * log_page_buffer,uint32_t log_page_size)200 nvme_notify_async_consumers(struct nvme_controller *ctrlr,
201 const struct nvme_completion *async_cpl,
202 uint32_t log_page_id, void *log_page_buffer,
203 uint32_t log_page_size)
204 {
205 struct nvme_consumer *cons;
206 void *ctrlr_cookie;
207 uint32_t i;
208
209 for (i = 0; i < NVME_MAX_CONSUMERS; i++) {
210 cons = &nvme_consumer[i];
211 if (cons->id != INVALID_CONSUMER_ID && cons->async_fn != NULL &&
212 (ctrlr_cookie = ctrlr->cons_cookie[i]) != NULL) {
213 (*cons->async_fn)(ctrlr_cookie, async_cpl,
214 log_page_id, log_page_buffer, log_page_size);
215 }
216 }
217 }
218
219 void
nvme_notify_fail_consumers(struct nvme_controller * ctrlr)220 nvme_notify_fail_consumers(struct nvme_controller *ctrlr)
221 {
222 struct nvme_consumer *cons;
223 void *ctrlr_cookie;
224 uint32_t i;
225
226 /*
227 * This controller failed during initialization (i.e. IDENTIFY
228 * command failed or timed out). Do not notify any nvme
229 * consumers of the failure here, since the consumer does not
230 * even know about the controller yet.
231 */
232 if (!ctrlr->is_initialized)
233 return;
234
235 for (i = 0; i < NVME_MAX_CONSUMERS; i++) {
236 cons = &nvme_consumer[i];
237 if (cons->id != INVALID_CONSUMER_ID &&
238 (ctrlr_cookie = ctrlr->cons_cookie[i]) != NULL) {
239 ctrlr->cons_cookie[i] = NULL;
240 if (cons->fail_fn != NULL)
241 cons->fail_fn(ctrlr_cookie);
242 }
243 }
244 }
245
246 void
nvme_notify_ns(struct nvme_controller * ctrlr,int nsid)247 nvme_notify_ns(struct nvme_controller *ctrlr, int nsid)
248 {
249 struct nvme_consumer *cons;
250 struct nvme_namespace *ns;
251 void *ctrlr_cookie;
252 uint32_t i;
253
254 KASSERT(nsid <= NVME_MAX_NAMESPACES,
255 ("%s: Namespace notification to nsid %d exceeds range\n",
256 device_get_nameunit(ctrlr->dev), nsid));
257
258 if (!ctrlr->is_initialized)
259 return;
260
261 ns = &ctrlr->ns[nsid - 1];
262 for (i = 0; i < NVME_MAX_CONSUMERS; i++) {
263 cons = &nvme_consumer[i];
264 if (cons->id != INVALID_CONSUMER_ID && cons->ns_fn != NULL &&
265 (ctrlr_cookie = ctrlr->cons_cookie[i]) != NULL)
266 ns->cons_cookie[i] = (*cons->ns_fn)(ns, ctrlr_cookie);
267 }
268 }
269
270 struct nvme_consumer *
nvme_register_consumer(nvme_cons_ns_fn_t ns_fn,nvme_cons_ctrlr_fn_t ctrlr_fn,nvme_cons_async_fn_t async_fn,nvme_cons_fail_fn_t fail_fn)271 nvme_register_consumer(nvme_cons_ns_fn_t ns_fn, nvme_cons_ctrlr_fn_t ctrlr_fn,
272 nvme_cons_async_fn_t async_fn,
273 nvme_cons_fail_fn_t fail_fn)
274 {
275 int i;
276
277 /*
278 * TODO: add locking around consumer registration.
279 */
280 for (i = 0; i < NVME_MAX_CONSUMERS; i++)
281 if (nvme_consumer[i].id == INVALID_CONSUMER_ID) {
282 nvme_consumer[i].id = i;
283 nvme_consumer[i].ns_fn = ns_fn;
284 nvme_consumer[i].ctrlr_fn = ctrlr_fn;
285 nvme_consumer[i].async_fn = async_fn;
286 nvme_consumer[i].fail_fn = fail_fn;
287
288 nvme_notify_new_consumer(&nvme_consumer[i]);
289 return (&nvme_consumer[i]);
290 }
291
292 printf("nvme(4): consumer not registered - no slots available\n");
293 return (NULL);
294 }
295
296 void
nvme_unregister_consumer(struct nvme_consumer * consumer)297 nvme_unregister_consumer(struct nvme_consumer *consumer)
298 {
299
300 consumer->id = INVALID_CONSUMER_ID;
301 }
302
303 void
nvme_completion_poll_cb(void * arg,const struct nvme_completion * cpl)304 nvme_completion_poll_cb(void *arg, const struct nvme_completion *cpl)
305 {
306 struct nvme_completion_poll_status *status = arg;
307
308 /*
309 * Copy status into the argument passed by the caller, so that
310 * the caller can check the status to determine if the
311 * the request passed or failed.
312 */
313 memcpy(&status->cpl, cpl, sizeof(*cpl));
314 atomic_store_rel_int(&status->done, 1);
315 }
316
317 static int
nvme_modevent(module_t mod __unused,int type __unused,void * argp __unused)318 nvme_modevent(module_t mod __unused, int type __unused, void *argp __unused)
319 {
320 return (0);
321 }
322
323 static moduledata_t nvme_mod = {
324 "nvme",
325 nvme_modevent,
326 0
327 };
328
329 DECLARE_MODULE(nvme, nvme_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
330 MODULE_VERSION(nvme, 1);
331 MODULE_DEPEND(nvme, cam, 1, 1, 1);
332