1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2006 IronPort Systems Inc. <[email protected]>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/bus.h>
33 #include <sys/condvar.h>
34 #include <sys/conf.h>
35 #include <sys/eventhandler.h>
36 #include <sys/kernel.h>
37 #include <sys/lock.h>
38 #include <sys/malloc.h>
39 #include <sys/module.h>
40 #include <sys/mutex.h>
41 #include <sys/poll.h>
42 #include <sys/reboot.h>
43 #include <sys/rman.h>
44 #include <sys/selinfo.h>
45 #include <sys/sysctl.h>
46 #include <sys/watchdog.h>
47
48 #ifdef LOCAL_MODULE
49 #include <ipmi.h>
50 #include <ipmivars.h>
51 #else
52 #include <sys/ipmi.h>
53 #include <dev/ipmi/ipmivars.h>
54 #endif
55
56 #ifdef IPMICTL_SEND_COMMAND_32
57 #include <sys/abi_compat.h>
58 #endif
59
60 /*
61 * Driver request structures are allocated on the stack via alloca() to
62 * avoid calling malloc(), especially for the watchdog handler.
63 * To avoid too much stack growth, a previously allocated structure can
64 * be reused via IPMI_INIT_DRIVER_REQUEST(), but the caller should ensure
65 * that there is adequate reply/request space in the original allocation.
66 */
67 #define IPMI_INIT_DRIVER_REQUEST(req, addr, cmd, reqlen, replylen) \
68 bzero((req), sizeof(struct ipmi_request)); \
69 ipmi_init_request((req), NULL, 0, (addr), (cmd), (reqlen), (replylen))
70
71 #define IPMI_ALLOC_DRIVER_REQUEST(req, addr, cmd, reqlen, replylen) \
72 (req) = __builtin_alloca(sizeof(struct ipmi_request) + \
73 (reqlen) + (replylen)); \
74 IPMI_INIT_DRIVER_REQUEST((req), (addr), (cmd), (reqlen), \
75 (replylen))
76
77 static d_ioctl_t ipmi_ioctl;
78 static d_poll_t ipmi_poll;
79 static d_open_t ipmi_open;
80 static void ipmi_dtor(void *arg);
81
82 int ipmi_attached = 0;
83
84 static int on = 1;
85 static bool wd_in_shutdown = false;
86 static int wd_timer_actions = IPMI_SET_WD_ACTION_POWER_CYCLE;
87 static int wd_shutdown_countdown = 0; /* sec */
88 static int wd_startup_countdown = 0; /* sec */
89 static int wd_pretimeout_countdown = 120; /* sec */
90 static int cycle_wait = 10; /* sec */
91 static int wd_init_enable = 1;
92
93 static SYSCTL_NODE(_hw, OID_AUTO, ipmi, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
94 "IPMI driver parameters");
95 SYSCTL_INT(_hw_ipmi, OID_AUTO, on, CTLFLAG_RWTUN,
96 &on, 0, "");
97 SYSCTL_INT(_hw_ipmi, OID_AUTO, wd_init_enable, CTLFLAG_RWTUN,
98 &wd_init_enable, 1, "Enable watchdog initialization");
99 SYSCTL_INT(_hw_ipmi, OID_AUTO, wd_timer_actions, CTLFLAG_RWTUN,
100 &wd_timer_actions, 0,
101 "IPMI watchdog timer actions (including pre-timeout interrupt)");
102 SYSCTL_INT(_hw_ipmi, OID_AUTO, wd_shutdown_countdown, CTLFLAG_RWTUN,
103 &wd_shutdown_countdown, 0,
104 "IPMI watchdog countdown for shutdown (seconds)");
105 SYSCTL_INT(_hw_ipmi, OID_AUTO, wd_startup_countdown, CTLFLAG_RDTUN,
106 &wd_startup_countdown, 0,
107 "IPMI watchdog countdown initialized during startup (seconds)");
108 SYSCTL_INT(_hw_ipmi, OID_AUTO, wd_pretimeout_countdown, CTLFLAG_RWTUN,
109 &wd_pretimeout_countdown, 0,
110 "IPMI watchdog pre-timeout countdown (seconds)");
111 SYSCTL_INT(_hw_ipmi, OID_AUTO, cycle_wait, CTLFLAG_RWTUN,
112 &cycle_wait, 0,
113 "IPMI power cycle on reboot delay time (seconds)");
114
115 static struct cdevsw ipmi_cdevsw = {
116 .d_version = D_VERSION,
117 .d_open = ipmi_open,
118 .d_ioctl = ipmi_ioctl,
119 .d_poll = ipmi_poll,
120 .d_name = "ipmi",
121 };
122
123 static MALLOC_DEFINE(M_IPMI, "ipmi", "ipmi");
124
125 static int
ipmi_open(struct cdev * cdev,int flags,int fmt,struct thread * td)126 ipmi_open(struct cdev *cdev, int flags, int fmt, struct thread *td)
127 {
128 struct ipmi_device *dev;
129 struct ipmi_softc *sc;
130 int error;
131
132 if (!on)
133 return (ENOENT);
134
135 /* Initialize the per file descriptor data. */
136 dev = malloc(sizeof(struct ipmi_device), M_IPMI, M_WAITOK | M_ZERO);
137 error = devfs_set_cdevpriv(dev, ipmi_dtor);
138 if (error) {
139 free(dev, M_IPMI);
140 return (error);
141 }
142
143 sc = cdev->si_drv1;
144 TAILQ_INIT(&dev->ipmi_completed_requests);
145 dev->ipmi_address = IPMI_BMC_SLAVE_ADDR;
146 dev->ipmi_lun = IPMI_BMC_SMS_LUN;
147 dev->ipmi_softc = sc;
148 IPMI_LOCK(sc);
149 sc->ipmi_opened++;
150 IPMI_UNLOCK(sc);
151
152 return (0);
153 }
154
155 static int
ipmi_poll(struct cdev * cdev,int poll_events,struct thread * td)156 ipmi_poll(struct cdev *cdev, int poll_events, struct thread *td)
157 {
158 struct ipmi_device *dev;
159 struct ipmi_softc *sc;
160 int revents = 0;
161
162 if (devfs_get_cdevpriv((void **)&dev))
163 return (0);
164
165 sc = cdev->si_drv1;
166 IPMI_LOCK(sc);
167 if (poll_events & (POLLIN | POLLRDNORM)) {
168 if (!TAILQ_EMPTY(&dev->ipmi_completed_requests))
169 revents |= poll_events & (POLLIN | POLLRDNORM);
170 if (dev->ipmi_requests == 0)
171 revents |= POLLERR;
172 }
173
174 if (revents == 0) {
175 if (poll_events & (POLLIN | POLLRDNORM))
176 selrecord(td, &dev->ipmi_select);
177 }
178 IPMI_UNLOCK(sc);
179
180 return (revents);
181 }
182
183 static void
ipmi_purge_completed_requests(struct ipmi_device * dev)184 ipmi_purge_completed_requests(struct ipmi_device *dev)
185 {
186 struct ipmi_request *req;
187
188 while (!TAILQ_EMPTY(&dev->ipmi_completed_requests)) {
189 req = TAILQ_FIRST(&dev->ipmi_completed_requests);
190 TAILQ_REMOVE(&dev->ipmi_completed_requests, req, ir_link);
191 dev->ipmi_requests--;
192 ipmi_free_request(req);
193 }
194 }
195
196 static void
ipmi_dtor(void * arg)197 ipmi_dtor(void *arg)
198 {
199 struct ipmi_request *req, *nreq;
200 struct ipmi_device *dev;
201 struct ipmi_softc *sc;
202
203 dev = arg;
204 sc = dev->ipmi_softc;
205
206 IPMI_LOCK(sc);
207 if (dev->ipmi_requests) {
208 /* Throw away any pending requests for this device. */
209 TAILQ_FOREACH_SAFE(req, &sc->ipmi_pending_requests_highpri, ir_link,
210 nreq) {
211 if (req->ir_owner == dev) {
212 TAILQ_REMOVE(&sc->ipmi_pending_requests_highpri, req,
213 ir_link);
214 dev->ipmi_requests--;
215 ipmi_free_request(req);
216 }
217 }
218 TAILQ_FOREACH_SAFE(req, &sc->ipmi_pending_requests, ir_link,
219 nreq) {
220 if (req->ir_owner == dev) {
221 TAILQ_REMOVE(&sc->ipmi_pending_requests, req,
222 ir_link);
223 dev->ipmi_requests--;
224 ipmi_free_request(req);
225 }
226 }
227
228 /* Throw away any pending completed requests for this device. */
229 ipmi_purge_completed_requests(dev);
230
231 /*
232 * If we still have outstanding requests, they must be stuck
233 * in an interface driver, so wait for those to drain.
234 */
235 dev->ipmi_closing = 1;
236 while (dev->ipmi_requests > 0) {
237 msleep(&dev->ipmi_requests, &sc->ipmi_requests_lock,
238 PWAIT, "ipmidrain", 0);
239 ipmi_purge_completed_requests(dev);
240 }
241 }
242 sc->ipmi_opened--;
243 IPMI_UNLOCK(sc);
244
245 /* Cleanup. */
246 free(dev, M_IPMI);
247 }
248
249 static u_char
ipmi_ipmb_checksum(u_char * data,int len)250 ipmi_ipmb_checksum(u_char *data, int len)
251 {
252 u_char sum = 0;
253
254 for (; len; len--)
255 sum += *data++;
256 return (-sum);
257 }
258
259 static int
ipmi_ioctl(struct cdev * cdev,u_long cmd,caddr_t data,int flags,struct thread * td)260 ipmi_ioctl(struct cdev *cdev, u_long cmd, caddr_t data,
261 int flags, struct thread *td)
262 {
263 struct ipmi_softc *sc;
264 struct ipmi_device *dev;
265 struct ipmi_request *kreq;
266 struct ipmi_req *req = (struct ipmi_req *)data;
267 struct ipmi_recv *recv = (struct ipmi_recv *)data;
268 struct ipmi_addr addr;
269 #ifdef IPMICTL_SEND_COMMAND_32
270 struct ipmi_req32 *req32 = (struct ipmi_req32 *)data;
271 struct ipmi_recv32 *recv32 = (struct ipmi_recv32 *)data;
272 union {
273 struct ipmi_req req;
274 struct ipmi_recv recv;
275 } thunk32;
276 #endif
277 int error, len;
278
279 error = devfs_get_cdevpriv((void **)&dev);
280 if (error)
281 return (error);
282
283 sc = cdev->si_drv1;
284
285 #ifdef IPMICTL_SEND_COMMAND_32
286 /* Convert 32-bit structures to native. */
287 switch (cmd) {
288 case IPMICTL_SEND_COMMAND_32:
289 req = &thunk32.req;
290 req->addr = PTRIN(req32->addr);
291 req->addr_len = req32->addr_len;
292 req->msgid = req32->msgid;
293 req->msg.netfn = req32->msg.netfn;
294 req->msg.cmd = req32->msg.cmd;
295 req->msg.data_len = req32->msg.data_len;
296 req->msg.data = PTRIN(req32->msg.data);
297 break;
298 case IPMICTL_RECEIVE_MSG_TRUNC_32:
299 case IPMICTL_RECEIVE_MSG_32:
300 recv = &thunk32.recv;
301 recv->addr = PTRIN(recv32->addr);
302 recv->addr_len = recv32->addr_len;
303 recv->msg.data_len = recv32->msg.data_len;
304 recv->msg.data = PTRIN(recv32->msg.data);
305 break;
306 }
307 #endif
308
309 switch (cmd) {
310 #ifdef IPMICTL_SEND_COMMAND_32
311 case IPMICTL_SEND_COMMAND_32:
312 #endif
313 case IPMICTL_SEND_COMMAND:
314 error = copyin(req->addr, &addr, sizeof(addr));
315 if (error)
316 return (error);
317
318 if (addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
319 struct ipmi_system_interface_addr *saddr =
320 (struct ipmi_system_interface_addr *)&addr;
321
322 kreq = ipmi_alloc_request(dev, req->msgid,
323 IPMI_ADDR(req->msg.netfn, saddr->lun & 0x3),
324 req->msg.cmd, req->msg.data_len, IPMI_MAX_RX);
325 error = copyin(req->msg.data, kreq->ir_request,
326 req->msg.data_len);
327 if (error) {
328 ipmi_free_request(kreq);
329 return (error);
330 }
331 IPMI_LOCK(sc);
332 dev->ipmi_requests++;
333 error = sc->ipmi_enqueue_request(sc, kreq);
334 IPMI_UNLOCK(sc);
335 if (error)
336 return (error);
337 break;
338 }
339
340 /* Special processing for IPMB commands */
341 struct ipmi_ipmb_addr *iaddr = (struct ipmi_ipmb_addr *)&addr;
342
343 IPMI_ALLOC_DRIVER_REQUEST(kreq, IPMI_ADDR(IPMI_APP_REQUEST, 0),
344 IPMI_SEND_MSG, req->msg.data_len + 8, IPMI_MAX_RX);
345 /* Construct the SEND MSG header */
346 kreq->ir_request[0] = iaddr->channel;
347 kreq->ir_request[1] = iaddr->slave_addr;
348 kreq->ir_request[2] = IPMI_ADDR(req->msg.netfn, iaddr->lun);
349 kreq->ir_request[3] =
350 ipmi_ipmb_checksum(&kreq->ir_request[1], 2);
351 kreq->ir_request[4] = dev->ipmi_address;
352 kreq->ir_request[5] = IPMI_ADDR(0, dev->ipmi_lun);
353 kreq->ir_request[6] = req->msg.cmd;
354 /* Copy the message data */
355 if (req->msg.data_len > 0) {
356 error = copyin(req->msg.data, &kreq->ir_request[7],
357 req->msg.data_len);
358 if (error != 0)
359 return (error);
360 }
361 kreq->ir_request[req->msg.data_len + 7] =
362 ipmi_ipmb_checksum(&kreq->ir_request[4],
363 req->msg.data_len + 3);
364 error = ipmi_submit_driver_request(sc, kreq, MAX_TIMEOUT);
365 if (error != 0)
366 return (error);
367
368 kreq = ipmi_alloc_request(dev, req->msgid,
369 IPMI_ADDR(IPMI_APP_REQUEST, 0), IPMI_GET_MSG,
370 0, IPMI_MAX_RX);
371 kreq->ir_ipmb = true;
372 kreq->ir_ipmb_addr = IPMI_ADDR(req->msg.netfn, 0);
373 kreq->ir_ipmb_command = req->msg.cmd;
374 IPMI_LOCK(sc);
375 dev->ipmi_requests++;
376 error = sc->ipmi_enqueue_request(sc, kreq);
377 IPMI_UNLOCK(sc);
378 if (error != 0)
379 return (error);
380 break;
381 #ifdef IPMICTL_SEND_COMMAND_32
382 case IPMICTL_RECEIVE_MSG_TRUNC_32:
383 case IPMICTL_RECEIVE_MSG_32:
384 #endif
385 case IPMICTL_RECEIVE_MSG_TRUNC:
386 case IPMICTL_RECEIVE_MSG:
387 error = copyin(recv->addr, &addr, sizeof(addr));
388 if (error)
389 return (error);
390
391 IPMI_LOCK(sc);
392 kreq = TAILQ_FIRST(&dev->ipmi_completed_requests);
393 if (kreq == NULL) {
394 IPMI_UNLOCK(sc);
395 return (EAGAIN);
396 }
397 if (kreq->ir_error != 0) {
398 error = kreq->ir_error;
399 TAILQ_REMOVE(&dev->ipmi_completed_requests, kreq,
400 ir_link);
401 dev->ipmi_requests--;
402 IPMI_UNLOCK(sc);
403 ipmi_free_request(kreq);
404 return (error);
405 }
406
407 recv->recv_type = IPMI_RESPONSE_RECV_TYPE;
408 recv->msgid = kreq->ir_msgid;
409 if (kreq->ir_ipmb) {
410 addr.channel = IPMI_IPMB_CHANNEL;
411 recv->msg.netfn =
412 IPMI_REPLY_ADDR(kreq->ir_ipmb_addr) >> 2;
413 recv->msg.cmd = kreq->ir_ipmb_command;
414 /* Get the compcode of response */
415 kreq->ir_compcode = kreq->ir_reply[6];
416 /* Move the reply head past response header */
417 kreq->ir_reply += 7;
418 len = kreq->ir_replylen - 7;
419 } else {
420 addr.channel = IPMI_BMC_CHANNEL;
421 recv->msg.netfn = IPMI_REPLY_ADDR(kreq->ir_addr) >> 2;
422 recv->msg.cmd = kreq->ir_command;
423 len = kreq->ir_replylen + 1;
424 }
425
426 if (recv->msg.data_len < len &&
427 (cmd == IPMICTL_RECEIVE_MSG
428 #ifdef IPMICTL_RECEIVE_MSG_32
429 || cmd == IPMICTL_RECEIVE_MSG_32
430 #endif
431 )) {
432 IPMI_UNLOCK(sc);
433 return (EMSGSIZE);
434 }
435 TAILQ_REMOVE(&dev->ipmi_completed_requests, kreq, ir_link);
436 dev->ipmi_requests--;
437 IPMI_UNLOCK(sc);
438 len = min(recv->msg.data_len, len);
439 recv->msg.data_len = len;
440 error = copyout(&addr, recv->addr,sizeof(addr));
441 if (error == 0)
442 error = copyout(&kreq->ir_compcode, recv->msg.data, 1);
443 if (error == 0)
444 error = copyout(kreq->ir_reply, recv->msg.data + 1,
445 len - 1);
446 ipmi_free_request(kreq);
447 if (error)
448 return (error);
449 break;
450 case IPMICTL_SET_MY_ADDRESS_CMD:
451 IPMI_LOCK(sc);
452 dev->ipmi_address = *(int*)data;
453 IPMI_UNLOCK(sc);
454 break;
455 case IPMICTL_GET_MY_ADDRESS_CMD:
456 IPMI_LOCK(sc);
457 *(int*)data = dev->ipmi_address;
458 IPMI_UNLOCK(sc);
459 break;
460 case IPMICTL_SET_MY_LUN_CMD:
461 IPMI_LOCK(sc);
462 dev->ipmi_lun = *(int*)data & 0x3;
463 IPMI_UNLOCK(sc);
464 break;
465 case IPMICTL_GET_MY_LUN_CMD:
466 IPMI_LOCK(sc);
467 *(int*)data = dev->ipmi_lun;
468 IPMI_UNLOCK(sc);
469 break;
470 case IPMICTL_SET_GETS_EVENTS_CMD:
471 /*
472 device_printf(sc->ipmi_dev,
473 "IPMICTL_SET_GETS_EVENTS_CMD NA\n");
474 */
475 break;
476 case IPMICTL_REGISTER_FOR_CMD:
477 case IPMICTL_UNREGISTER_FOR_CMD:
478 return (EOPNOTSUPP);
479 default:
480 device_printf(sc->ipmi_dev, "Unknown IOCTL %lX\n", cmd);
481 return (ENOIOCTL);
482 }
483
484 #ifdef IPMICTL_SEND_COMMAND_32
485 /* Update changed fields in 32-bit structures. */
486 switch (cmd) {
487 case IPMICTL_RECEIVE_MSG_TRUNC_32:
488 case IPMICTL_RECEIVE_MSG_32:
489 recv32->recv_type = recv->recv_type;
490 recv32->msgid = recv->msgid;
491 recv32->msg.netfn = recv->msg.netfn;
492 recv32->msg.cmd = recv->msg.cmd;
493 recv32->msg.data_len = recv->msg.data_len;
494 break;
495 }
496 #endif
497 return (0);
498 }
499
500 /*
501 * Request management.
502 */
503
504 __inline void
ipmi_init_request(struct ipmi_request * req,struct ipmi_device * dev,long msgid,uint8_t addr,uint8_t command,size_t requestlen,size_t replylen)505 ipmi_init_request(struct ipmi_request *req, struct ipmi_device *dev, long msgid,
506 uint8_t addr, uint8_t command, size_t requestlen, size_t replylen)
507 {
508
509 req->ir_owner = dev;
510 req->ir_msgid = msgid;
511 req->ir_addr = addr;
512 req->ir_command = command;
513 if (requestlen) {
514 req->ir_request = (char *)&req[1];
515 req->ir_requestlen = requestlen;
516 }
517 if (replylen) {
518 req->ir_reply = (char *)&req[1] + requestlen;
519 req->ir_replybuflen = replylen;
520 }
521 }
522
523 /* Allocate a new request with request and reply buffers. */
524 struct ipmi_request *
ipmi_alloc_request(struct ipmi_device * dev,long msgid,uint8_t addr,uint8_t command,size_t requestlen,size_t replylen)525 ipmi_alloc_request(struct ipmi_device *dev, long msgid, uint8_t addr,
526 uint8_t command, size_t requestlen, size_t replylen)
527 {
528 struct ipmi_request *req;
529
530 req = malloc(sizeof(struct ipmi_request) + requestlen + replylen,
531 M_IPMI, M_WAITOK | M_ZERO);
532 ipmi_init_request(req, dev, msgid, addr, command, requestlen, replylen);
533 return (req);
534 }
535
536 /* Free a request no longer in use. */
537 void
ipmi_free_request(struct ipmi_request * req)538 ipmi_free_request(struct ipmi_request *req)
539 {
540
541 free(req, M_IPMI);
542 }
543
544 /* Store a processed request on the appropriate completion queue. */
545 void
ipmi_complete_request(struct ipmi_softc * sc,struct ipmi_request * req)546 ipmi_complete_request(struct ipmi_softc *sc, struct ipmi_request *req)
547 {
548 struct ipmi_device *dev;
549
550 IPMI_LOCK_ASSERT(sc);
551
552 /*
553 * Anonymous requests (from inside the driver) always have a
554 * waiter that we awaken.
555 */
556 if (req->ir_owner == NULL)
557 wakeup(req);
558 else {
559 dev = req->ir_owner;
560 TAILQ_INSERT_TAIL(&dev->ipmi_completed_requests, req, ir_link);
561 selwakeup(&dev->ipmi_select);
562 if (dev->ipmi_closing)
563 wakeup(&dev->ipmi_requests);
564 }
565 }
566
567 /* Perform an internal driver request. */
568 int
ipmi_submit_driver_request(struct ipmi_softc * sc,struct ipmi_request * req,int timo)569 ipmi_submit_driver_request(struct ipmi_softc *sc, struct ipmi_request *req,
570 int timo)
571 {
572
573 return (sc->ipmi_driver_request(sc, req, timo));
574 }
575
576 /*
577 * Helper routine for polled system interfaces that use
578 * ipmi_polled_enqueue_request() to queue requests. This request
579 * waits until there is a pending request and then returns the first
580 * request. If the driver is shutting down, it returns NULL.
581 */
582 struct ipmi_request *
ipmi_dequeue_request(struct ipmi_softc * sc)583 ipmi_dequeue_request(struct ipmi_softc *sc)
584 {
585 struct ipmi_request *req;
586
587 IPMI_LOCK_ASSERT(sc);
588
589 while (!sc->ipmi_detaching && TAILQ_EMPTY(&sc->ipmi_pending_requests) &&
590 TAILQ_EMPTY(&sc->ipmi_pending_requests_highpri))
591 cv_wait(&sc->ipmi_request_added, &sc->ipmi_requests_lock);
592 if (sc->ipmi_detaching)
593 return (NULL);
594
595 req = TAILQ_FIRST(&sc->ipmi_pending_requests_highpri);
596 if (req != NULL)
597 TAILQ_REMOVE(&sc->ipmi_pending_requests_highpri, req, ir_link);
598 else {
599 req = TAILQ_FIRST(&sc->ipmi_pending_requests);
600 TAILQ_REMOVE(&sc->ipmi_pending_requests, req, ir_link);
601 }
602 return (req);
603 }
604
605 /* Default implementation of ipmi_enqueue_request() for polled interfaces. */
606 int
ipmi_polled_enqueue_request(struct ipmi_softc * sc,struct ipmi_request * req)607 ipmi_polled_enqueue_request(struct ipmi_softc *sc, struct ipmi_request *req)
608 {
609
610 IPMI_LOCK_ASSERT(sc);
611
612 TAILQ_INSERT_TAIL(&sc->ipmi_pending_requests, req, ir_link);
613 cv_signal(&sc->ipmi_request_added);
614 return (0);
615 }
616
617 int
ipmi_polled_enqueue_request_highpri(struct ipmi_softc * sc,struct ipmi_request * req)618 ipmi_polled_enqueue_request_highpri(struct ipmi_softc *sc, struct ipmi_request *req)
619 {
620
621 IPMI_LOCK_ASSERT(sc);
622
623 TAILQ_INSERT_TAIL(&sc->ipmi_pending_requests_highpri, req, ir_link);
624 cv_signal(&sc->ipmi_request_added);
625 return (0);
626 }
627
628 /*
629 * Watchdog event handler.
630 */
631
632 static int
ipmi_reset_watchdog(struct ipmi_softc * sc)633 ipmi_reset_watchdog(struct ipmi_softc *sc)
634 {
635 struct ipmi_request *req;
636 int error;
637
638 IPMI_ALLOC_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0),
639 IPMI_RESET_WDOG, 0, 0);
640 error = ipmi_submit_driver_request(sc, req, 0);
641 if (error) {
642 device_printf(sc->ipmi_dev, "Failed to reset watchdog\n");
643 } else if (req->ir_compcode == 0x80) {
644 error = ENOENT;
645 } else if (req->ir_compcode != 0) {
646 device_printf(sc->ipmi_dev, "Watchdog reset returned 0x%x\n",
647 req->ir_compcode);
648 error = EINVAL;
649 }
650 return (error);
651 }
652
653 static int
ipmi_set_watchdog(struct ipmi_softc * sc,unsigned int sec)654 ipmi_set_watchdog(struct ipmi_softc *sc, unsigned int sec)
655 {
656 struct ipmi_request *req;
657 int error;
658
659 if (sec > 0xffff / 10)
660 return (EINVAL);
661
662 IPMI_ALLOC_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0),
663 IPMI_SET_WDOG, 6, 0);
664 if (sec) {
665 req->ir_request[0] = IPMI_SET_WD_TIMER_DONT_STOP
666 | IPMI_SET_WD_TIMER_SMS_OS;
667 req->ir_request[1] = (wd_timer_actions & 0xff);
668 req->ir_request[2] = min(0xff,
669 min(wd_pretimeout_countdown, (sec + 2) / 4));
670 req->ir_request[3] = 0; /* Timer use */
671 req->ir_request[4] = (sec * 10) & 0xff;
672 req->ir_request[5] = (sec * 10) >> 8;
673 } else {
674 req->ir_request[0] = IPMI_SET_WD_TIMER_SMS_OS;
675 req->ir_request[1] = 0;
676 req->ir_request[2] = 0;
677 req->ir_request[3] = 0; /* Timer use */
678 req->ir_request[4] = 0;
679 req->ir_request[5] = 0;
680 }
681 error = ipmi_submit_driver_request(sc, req, 0);
682 if (error) {
683 device_printf(sc->ipmi_dev, "Failed to set watchdog\n");
684 } else if (req->ir_compcode != 0) {
685 device_printf(sc->ipmi_dev, "Watchdog set returned 0x%x\n",
686 req->ir_compcode);
687 error = EINVAL;
688 }
689 return (error);
690 }
691
692 static void
ipmi_wd_event(void * arg,unsigned int cmd,int * error)693 ipmi_wd_event(void *arg, unsigned int cmd, int *error)
694 {
695 struct ipmi_softc *sc = arg;
696 unsigned int timeout;
697 int e;
698
699 /* Ignore requests while disabled. */
700 if (!on)
701 return;
702
703 /*
704 * To prevent infinite hangs, we don't let anyone pat or change
705 * the watchdog when we're shutting down. (See ipmi_shutdown_event().)
706 * However, we do want to keep patting the watchdog while we are doing
707 * a coredump.
708 */
709 if (wd_in_shutdown) {
710 if (dumping && sc->ipmi_watchdog_active)
711 ipmi_reset_watchdog(sc);
712 return;
713 }
714
715 cmd &= WD_INTERVAL;
716 if (cmd > 0 && cmd <= 63) {
717 timeout = ((uint64_t)1 << cmd) / 1000000000;
718 if (timeout == 0)
719 timeout = 1;
720 if (timeout != sc->ipmi_watchdog_active ||
721 wd_timer_actions != sc->ipmi_watchdog_actions ||
722 wd_pretimeout_countdown != sc->ipmi_watchdog_pretimeout) {
723 e = ipmi_set_watchdog(sc, timeout);
724 if (e == 0) {
725 sc->ipmi_watchdog_active = timeout;
726 sc->ipmi_watchdog_actions = wd_timer_actions;
727 sc->ipmi_watchdog_pretimeout = wd_pretimeout_countdown;
728 } else {
729 (void)ipmi_set_watchdog(sc, 0);
730 sc->ipmi_watchdog_active = 0;
731 sc->ipmi_watchdog_actions = 0;
732 sc->ipmi_watchdog_pretimeout = 0;
733 }
734 }
735 if (sc->ipmi_watchdog_active != 0) {
736 e = ipmi_reset_watchdog(sc);
737 if (e == 0) {
738 *error = 0;
739 } else {
740 (void)ipmi_set_watchdog(sc, 0);
741 sc->ipmi_watchdog_active = 0;
742 sc->ipmi_watchdog_actions = 0;
743 sc->ipmi_watchdog_pretimeout = 0;
744 }
745 }
746 } else if (atomic_readandclear_int(&sc->ipmi_watchdog_active) != 0) {
747 sc->ipmi_watchdog_actions = 0;
748 sc->ipmi_watchdog_pretimeout = 0;
749
750 e = ipmi_set_watchdog(sc, 0);
751 if (e != 0 && cmd == 0)
752 *error = EOPNOTSUPP;
753 }
754 }
755
756 static void
ipmi_shutdown_event(void * arg,int howto)757 ipmi_shutdown_event(void *arg, int howto)
758 {
759 struct ipmi_softc *sc = arg;
760
761 /* Ignore event if disabled. */
762 if (!on)
763 return;
764
765 /*
766 * Positive wd_shutdown_countdown value will re-arm watchdog;
767 * Zero value in wd_shutdown_countdown will disable watchdog;
768 * Negative value in wd_shutdown_countdown will keep existing state;
769 *
770 * System halt is a special case of shutdown where wd_shutdown_countdown
771 * is ignored and watchdog is disabled to ensure that the system remains
772 * halted as requested.
773 *
774 * Revert to using a power cycle to ensure that the watchdog will
775 * do something useful here. Having the watchdog send an NMI
776 * instead is useless during shutdown, and might be ignored if an
777 * NMI already triggered.
778 */
779
780 wd_in_shutdown = true;
781 if (wd_shutdown_countdown == 0 || (howto & RB_HALT) != 0) {
782 /* disable watchdog */
783 ipmi_set_watchdog(sc, 0);
784 sc->ipmi_watchdog_active = 0;
785 } else if (wd_shutdown_countdown > 0) {
786 /* set desired action and time, and, reset watchdog */
787 wd_timer_actions = IPMI_SET_WD_ACTION_POWER_CYCLE;
788 ipmi_set_watchdog(sc, wd_shutdown_countdown);
789 sc->ipmi_watchdog_active = wd_shutdown_countdown;
790 ipmi_reset_watchdog(sc);
791 }
792 }
793
794 static void
ipmi_power_cycle(void * arg,int howto)795 ipmi_power_cycle(void *arg, int howto)
796 {
797 struct ipmi_softc *sc = arg;
798 struct ipmi_request *req;
799
800 /*
801 * Ignore everything except power cycling requests
802 */
803 if ((howto & RB_POWERCYCLE) == 0)
804 return;
805
806 device_printf(sc->ipmi_dev, "Power cycling using IPMI\n");
807
808 /*
809 * Send a CHASSIS_CONTROL command to the CHASSIS device, subcommand 2
810 * as described in IPMI v2.0 spec section 28.3.
811 */
812 IPMI_ALLOC_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_CHASSIS_REQUEST, 0),
813 IPMI_CHASSIS_CONTROL, 1, 0);
814 req->ir_request[0] = IPMI_CC_POWER_CYCLE;
815
816 ipmi_submit_driver_request(sc, req, MAX_TIMEOUT);
817
818 if (req->ir_error != 0 || req->ir_compcode != 0) {
819 device_printf(sc->ipmi_dev, "Power cycling via IPMI failed code %#x %#x\n",
820 req->ir_error, req->ir_compcode);
821 return;
822 }
823
824 /*
825 * BMCs are notoriously slow, give it cycle_wait seconds for the power
826 * down leg of the power cycle. If that fails, fallback to the next
827 * hanlder in the shutdown_final chain and/or the platform failsafe.
828 */
829 DELAY(cycle_wait * 1000 * 1000);
830 device_printf(sc->ipmi_dev, "Power cycling via IPMI timed out\n");
831 }
832
833 static void
ipmi_startup(void * arg)834 ipmi_startup(void *arg)
835 {
836 struct ipmi_softc *sc = arg;
837 struct ipmi_request *req;
838 device_t dev;
839 int error, i;
840
841 config_intrhook_disestablish(&sc->ipmi_ich);
842 dev = sc->ipmi_dev;
843
844 /* Initialize interface-independent state. */
845 mtx_init(&sc->ipmi_requests_lock, "ipmi requests", NULL, MTX_DEF);
846 mtx_init(&sc->ipmi_io_lock, "ipmi io", NULL, MTX_DEF);
847 cv_init(&sc->ipmi_request_added, "ipmireq");
848 TAILQ_INIT(&sc->ipmi_pending_requests_highpri);
849 TAILQ_INIT(&sc->ipmi_pending_requests);
850
851 /* Initialize interface-dependent state. */
852 error = sc->ipmi_startup(sc);
853 if (error) {
854 device_printf(dev, "Failed to initialize interface: %d\n",
855 error);
856 return;
857 }
858
859 /* Send a GET_DEVICE_ID request. */
860 IPMI_ALLOC_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0),
861 IPMI_GET_DEVICE_ID, 0, 15);
862
863 error = ipmi_submit_driver_request(sc, req, MAX_TIMEOUT);
864 if (error == EWOULDBLOCK) {
865 device_printf(dev, "Timed out waiting for GET_DEVICE_ID\n");
866 return;
867 } else if (error) {
868 device_printf(dev, "Failed GET_DEVICE_ID: %d\n", error);
869 return;
870 } else if (req->ir_compcode != 0) {
871 device_printf(dev,
872 "Bad completion code for GET_DEVICE_ID: %d\n",
873 req->ir_compcode);
874 return;
875 } else if (req->ir_replylen < 5) {
876 device_printf(dev, "Short reply for GET_DEVICE_ID: %d\n",
877 req->ir_replylen);
878 return;
879 }
880
881 device_printf(dev, "IPMI device rev. %d, firmware rev. %d.%d%d, "
882 "version %d.%d, device support mask %#x\n",
883 req->ir_reply[1] & 0x0f,
884 req->ir_reply[2] & 0x7f, req->ir_reply[3] >> 4, req->ir_reply[3] & 0x0f,
885 req->ir_reply[4] & 0x0f, req->ir_reply[4] >> 4, req->ir_reply[5]);
886
887 sc->ipmi_dev_support = req->ir_reply[5];
888
889 IPMI_INIT_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0),
890 IPMI_CLEAR_FLAGS, 1, 0);
891
892 ipmi_submit_driver_request(sc, req, 0);
893
894 /* XXX: Magic numbers */
895 if (req->ir_compcode == 0xc0) {
896 device_printf(dev, "Clear flags is busy\n");
897 }
898 if (req->ir_compcode == 0xc1) {
899 device_printf(dev, "Clear flags illegal\n");
900 }
901
902 for (i = 0; i < 8; i++) {
903 IPMI_INIT_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0),
904 IPMI_GET_CHANNEL_INFO, 1, 0);
905 req->ir_request[0] = i;
906
907 error = ipmi_submit_driver_request(sc, req, 0);
908
909 if (error != 0 || req->ir_compcode != 0)
910 break;
911 }
912 device_printf(dev, "Number of channels %d\n", i);
913
914 /*
915 * Probe for watchdog, but only for backends which support
916 * polled driver requests.
917 */
918 if (wd_init_enable && sc->ipmi_driver_requests_polled) {
919 IPMI_INIT_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0),
920 IPMI_GET_WDOG, 0, 0);
921
922 error = ipmi_submit_driver_request(sc, req, 0);
923
924 if (error == 0 && req->ir_compcode == 0x00) {
925 device_printf(dev, "Attached watchdog\n");
926 /* register the watchdog event handler */
927 sc->ipmi_watchdog_tag = EVENTHANDLER_REGISTER(
928 watchdog_list, ipmi_wd_event, sc, 0);
929 sc->ipmi_shutdown_tag = EVENTHANDLER_REGISTER(
930 shutdown_pre_sync, ipmi_shutdown_event,
931 sc, 0);
932 }
933 }
934
935 sc->ipmi_cdev = make_dev(&ipmi_cdevsw, device_get_unit(dev),
936 UID_ROOT, GID_OPERATOR, 0660, "ipmi%d", device_get_unit(dev));
937 if (sc->ipmi_cdev == NULL) {
938 device_printf(dev, "Failed to create cdev\n");
939 return;
940 }
941 sc->ipmi_cdev->si_drv1 = sc;
942
943 /*
944 * Set initial watchdog state. If desired, set an initial
945 * watchdog on startup. Or, if the watchdog device is
946 * disabled, clear any existing watchdog.
947 */
948 if (on && wd_startup_countdown > 0) {
949 if (ipmi_set_watchdog(sc, wd_startup_countdown) == 0 &&
950 ipmi_reset_watchdog(sc) == 0) {
951 sc->ipmi_watchdog_active = wd_startup_countdown;
952 sc->ipmi_watchdog_actions = wd_timer_actions;
953 sc->ipmi_watchdog_pretimeout = wd_pretimeout_countdown;
954 } else
955 (void)ipmi_set_watchdog(sc, 0);
956 ipmi_reset_watchdog(sc);
957 } else if (!on)
958 (void)ipmi_set_watchdog(sc, 0);
959 /*
960 * Power cycle the system off using IPMI. We use last - 2 since we don't
961 * handle all the other kinds of reboots. We'll let others handle them.
962 * We only try to do this if the BMC supports the Chassis device.
963 */
964 if (sc->ipmi_dev_support & IPMI_ADS_CHASSIS) {
965 device_printf(dev, "Establishing power cycle handler\n");
966 sc->ipmi_power_cycle_tag = EVENTHANDLER_REGISTER(shutdown_final,
967 ipmi_power_cycle, sc, SHUTDOWN_PRI_LAST - 2);
968 }
969 }
970
971 int
ipmi_attach(device_t dev)972 ipmi_attach(device_t dev)
973 {
974 struct ipmi_softc *sc = device_get_softc(dev);
975 int error;
976
977 if (sc->ipmi_irq_res != NULL && sc->ipmi_intr != NULL) {
978 error = bus_setup_intr(dev, sc->ipmi_irq_res, INTR_TYPE_MISC,
979 NULL, sc->ipmi_intr, sc, &sc->ipmi_irq);
980 if (error) {
981 device_printf(dev, "can't set up interrupt\n");
982 return (error);
983 }
984 }
985
986 bzero(&sc->ipmi_ich, sizeof(struct intr_config_hook));
987 sc->ipmi_ich.ich_func = ipmi_startup;
988 sc->ipmi_ich.ich_arg = sc;
989 if (config_intrhook_establish(&sc->ipmi_ich) != 0) {
990 device_printf(dev, "can't establish configuration hook\n");
991 return (ENOMEM);
992 }
993
994 ipmi_attached = 1;
995 return (0);
996 }
997
998 int
ipmi_detach(device_t dev)999 ipmi_detach(device_t dev)
1000 {
1001 struct ipmi_softc *sc;
1002
1003 sc = device_get_softc(dev);
1004
1005 /* Fail if there are any open handles. */
1006 IPMI_LOCK(sc);
1007 if (sc->ipmi_opened) {
1008 IPMI_UNLOCK(sc);
1009 return (EBUSY);
1010 }
1011 IPMI_UNLOCK(sc);
1012 if (sc->ipmi_cdev)
1013 destroy_dev(sc->ipmi_cdev);
1014
1015 /* Detach from watchdog handling and turn off watchdog. */
1016 if (sc->ipmi_shutdown_tag)
1017 EVENTHANDLER_DEREGISTER(shutdown_pre_sync,
1018 sc->ipmi_shutdown_tag);
1019 if (sc->ipmi_watchdog_tag) {
1020 EVENTHANDLER_DEREGISTER(watchdog_list, sc->ipmi_watchdog_tag);
1021 ipmi_set_watchdog(sc, 0);
1022 }
1023
1024 /* Detach from shutdown handling for power cycle reboot */
1025 if (sc->ipmi_power_cycle_tag)
1026 EVENTHANDLER_DEREGISTER(shutdown_final, sc->ipmi_power_cycle_tag);
1027
1028 /* XXX: should use shutdown callout I think. */
1029 /* If the backend uses a kthread, shut it down. */
1030 IPMI_LOCK(sc);
1031 sc->ipmi_detaching = 1;
1032 if (sc->ipmi_kthread) {
1033 cv_broadcast(&sc->ipmi_request_added);
1034 msleep(sc->ipmi_kthread, &sc->ipmi_requests_lock, 0,
1035 "ipmi_wait", 0);
1036 }
1037 IPMI_UNLOCK(sc);
1038 if (sc->ipmi_irq)
1039 bus_teardown_intr(dev, sc->ipmi_irq_res, sc->ipmi_irq);
1040
1041 ipmi_release_resources(dev);
1042 mtx_destroy(&sc->ipmi_io_lock);
1043 mtx_destroy(&sc->ipmi_requests_lock);
1044 return (0);
1045 }
1046
1047 void
ipmi_release_resources(device_t dev)1048 ipmi_release_resources(device_t dev)
1049 {
1050 struct ipmi_softc *sc;
1051 int i;
1052
1053 sc = device_get_softc(dev);
1054 if (sc->ipmi_irq)
1055 bus_teardown_intr(dev, sc->ipmi_irq_res, sc->ipmi_irq);
1056 if (sc->ipmi_irq_res)
1057 bus_release_resource(dev, SYS_RES_IRQ, sc->ipmi_irq_rid,
1058 sc->ipmi_irq_res);
1059 for (i = 0; i < MAX_RES; i++)
1060 if (sc->ipmi_io_res[i])
1061 bus_release_resource(dev, sc->ipmi_io_type,
1062 sc->ipmi_io_rid + i, sc->ipmi_io_res[i]);
1063 }
1064
1065 /* XXX: Why? */
1066 static void
ipmi_unload(void * arg)1067 ipmi_unload(void *arg)
1068 {
1069 device_t * devs;
1070 int count;
1071 int i;
1072
1073 if (devclass_get_devices(devclass_find("ipmi"), &devs, &count) != 0)
1074 return;
1075 for (i = 0; i < count; i++)
1076 device_delete_child(device_get_parent(devs[i]), devs[i]);
1077 free(devs, M_TEMP);
1078 }
1079 SYSUNINIT(ipmi_unload, SI_SUB_DRIVERS, SI_ORDER_FIRST, ipmi_unload, NULL);
1080
1081 #ifdef IMPI_DEBUG
1082 static void
dump_buf(u_char * data,int len)1083 dump_buf(u_char *data, int len)
1084 {
1085 char buf[20];
1086 char line[1024];
1087 char temp[30];
1088 int count = 0;
1089 int i=0;
1090
1091 printf("Address %p len %d\n", data, len);
1092 if (len > 256)
1093 len = 256;
1094 line[0] = '\000';
1095 for (; len > 0; len--, data++) {
1096 sprintf(temp, "%02x ", *data);
1097 strcat(line, temp);
1098 if (*data >= ' ' && *data <= '~')
1099 buf[count] = *data;
1100 else if (*data >= 'A' && *data <= 'Z')
1101 buf[count] = *data;
1102 else
1103 buf[count] = '.';
1104 if (++count == 16) {
1105 buf[count] = '\000';
1106 count = 0;
1107 printf(" %3x %s %s\n", i, line, buf);
1108 i+=16;
1109 line[0] = '\000';
1110 }
1111 }
1112 buf[count] = '\000';
1113
1114 for (; count != 16; count++) {
1115 strcat(line, " ");
1116 }
1117 printf(" %3x %s %s\n", i, line, buf);
1118 }
1119 #endif
1120