1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2006 IronPort Systems Inc. <[email protected]>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/bus.h>
35 #include <sys/condvar.h>
36 #include <sys/conf.h>
37 #include <sys/eventhandler.h>
38 #include <sys/kernel.h>
39 #include <sys/lock.h>
40 #include <sys/malloc.h>
41 #include <sys/module.h>
42 #include <sys/mutex.h>
43 #include <sys/poll.h>
44 #include <sys/reboot.h>
45 #include <sys/rman.h>
46 #include <sys/selinfo.h>
47 #include <sys/sysctl.h>
48 #include <sys/watchdog.h>
49
50 #ifdef LOCAL_MODULE
51 #include <ipmi.h>
52 #include <ipmivars.h>
53 #else
54 #include <sys/ipmi.h>
55 #include <dev/ipmi/ipmivars.h>
56 #endif
57
58 #ifdef IPMICTL_SEND_COMMAND_32
59 #include <sys/abi_compat.h>
60 #endif
61
62 /*
63 * Driver request structures are allocated on the stack via alloca() to
64 * avoid calling malloc(), especially for the watchdog handler.
65 * To avoid too much stack growth, a previously allocated structure can
66 * be reused via IPMI_INIT_DRIVER_REQUEST(), but the caller should ensure
67 * that there is adequate reply/request space in the original allocation.
68 */
69 #define IPMI_INIT_DRIVER_REQUEST(req, addr, cmd, reqlen, replylen) \
70 bzero((req), sizeof(struct ipmi_request)); \
71 ipmi_init_request((req), NULL, 0, (addr), (cmd), (reqlen), (replylen))
72
73 #define IPMI_ALLOC_DRIVER_REQUEST(req, addr, cmd, reqlen, replylen) \
74 (req) = __builtin_alloca(sizeof(struct ipmi_request) + \
75 (reqlen) + (replylen)); \
76 IPMI_INIT_DRIVER_REQUEST((req), (addr), (cmd), (reqlen), \
77 (replylen))
78
79 #ifdef IPMB
80 static int ipmi_ipmb_checksum(u_char, int);
81 static int ipmi_ipmb_send_message(device_t, u_char, u_char, u_char,
82 u_char, u_char, int)
83 #endif
84
85 static d_ioctl_t ipmi_ioctl;
86 static d_poll_t ipmi_poll;
87 static d_open_t ipmi_open;
88 static void ipmi_dtor(void *arg);
89
90 int ipmi_attached = 0;
91
92 static int on = 1;
93 static bool wd_in_shutdown = false;
94 static int wd_timer_actions = IPMI_SET_WD_ACTION_POWER_CYCLE;
95 static int wd_shutdown_countdown = 0; /* sec */
96 static int wd_startup_countdown = 0; /* sec */
97 static int wd_pretimeout_countdown = 120; /* sec */
98 static int cycle_wait = 10; /* sec */
99 static int wd_init_enable = 1;
100
101 static SYSCTL_NODE(_hw, OID_AUTO, ipmi, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
102 "IPMI driver parameters");
103 SYSCTL_INT(_hw_ipmi, OID_AUTO, on, CTLFLAG_RWTUN,
104 &on, 0, "");
105 SYSCTL_INT(_hw_ipmi, OID_AUTO, wd_init_enable, CTLFLAG_RWTUN,
106 &wd_init_enable, 1, "Enable watchdog initialization");
107 SYSCTL_INT(_hw_ipmi, OID_AUTO, wd_timer_actions, CTLFLAG_RW,
108 &wd_timer_actions, 0,
109 "IPMI watchdog timer actions (including pre-timeout interrupt)");
110 SYSCTL_INT(_hw_ipmi, OID_AUTO, wd_shutdown_countdown, CTLFLAG_RW,
111 &wd_shutdown_countdown, 0,
112 "IPMI watchdog countdown for shutdown (seconds)");
113 SYSCTL_INT(_hw_ipmi, OID_AUTO, wd_startup_countdown, CTLFLAG_RDTUN,
114 &wd_startup_countdown, 0,
115 "IPMI watchdog countdown initialized during startup (seconds)");
116 SYSCTL_INT(_hw_ipmi, OID_AUTO, wd_pretimeout_countdown, CTLFLAG_RW,
117 &wd_pretimeout_countdown, 0,
118 "IPMI watchdog pre-timeout countdown (seconds)");
119 SYSCTL_INT(_hw_ipmi, OID_AUTO, cyle_wait, CTLFLAG_RWTUN,
120 &cycle_wait, 0,
121 "IPMI power cycle on reboot delay time (seconds)");
122
123 static struct cdevsw ipmi_cdevsw = {
124 .d_version = D_VERSION,
125 .d_open = ipmi_open,
126 .d_ioctl = ipmi_ioctl,
127 .d_poll = ipmi_poll,
128 .d_name = "ipmi",
129 };
130
131 static MALLOC_DEFINE(M_IPMI, "ipmi", "ipmi");
132
133 static int
ipmi_open(struct cdev * cdev,int flags,int fmt,struct thread * td)134 ipmi_open(struct cdev *cdev, int flags, int fmt, struct thread *td)
135 {
136 struct ipmi_device *dev;
137 struct ipmi_softc *sc;
138 int error;
139
140 if (!on)
141 return (ENOENT);
142
143 /* Initialize the per file descriptor data. */
144 dev = malloc(sizeof(struct ipmi_device), M_IPMI, M_WAITOK | M_ZERO);
145 error = devfs_set_cdevpriv(dev, ipmi_dtor);
146 if (error) {
147 free(dev, M_IPMI);
148 return (error);
149 }
150
151 sc = cdev->si_drv1;
152 TAILQ_INIT(&dev->ipmi_completed_requests);
153 dev->ipmi_address = IPMI_BMC_SLAVE_ADDR;
154 dev->ipmi_lun = IPMI_BMC_SMS_LUN;
155 dev->ipmi_softc = sc;
156 IPMI_LOCK(sc);
157 sc->ipmi_opened++;
158 IPMI_UNLOCK(sc);
159
160 return (0);
161 }
162
163 static int
ipmi_poll(struct cdev * cdev,int poll_events,struct thread * td)164 ipmi_poll(struct cdev *cdev, int poll_events, struct thread *td)
165 {
166 struct ipmi_device *dev;
167 struct ipmi_softc *sc;
168 int revents = 0;
169
170 if (devfs_get_cdevpriv((void **)&dev))
171 return (0);
172
173 sc = cdev->si_drv1;
174 IPMI_LOCK(sc);
175 if (poll_events & (POLLIN | POLLRDNORM)) {
176 if (!TAILQ_EMPTY(&dev->ipmi_completed_requests))
177 revents |= poll_events & (POLLIN | POLLRDNORM);
178 if (dev->ipmi_requests == 0)
179 revents |= POLLERR;
180 }
181
182 if (revents == 0) {
183 if (poll_events & (POLLIN | POLLRDNORM))
184 selrecord(td, &dev->ipmi_select);
185 }
186 IPMI_UNLOCK(sc);
187
188 return (revents);
189 }
190
191 static void
ipmi_purge_completed_requests(struct ipmi_device * dev)192 ipmi_purge_completed_requests(struct ipmi_device *dev)
193 {
194 struct ipmi_request *req;
195
196 while (!TAILQ_EMPTY(&dev->ipmi_completed_requests)) {
197 req = TAILQ_FIRST(&dev->ipmi_completed_requests);
198 TAILQ_REMOVE(&dev->ipmi_completed_requests, req, ir_link);
199 dev->ipmi_requests--;
200 ipmi_free_request(req);
201 }
202 }
203
204 static void
ipmi_dtor(void * arg)205 ipmi_dtor(void *arg)
206 {
207 struct ipmi_request *req, *nreq;
208 struct ipmi_device *dev;
209 struct ipmi_softc *sc;
210
211 dev = arg;
212 sc = dev->ipmi_softc;
213
214 IPMI_LOCK(sc);
215 if (dev->ipmi_requests) {
216 /* Throw away any pending requests for this device. */
217 TAILQ_FOREACH_SAFE(req, &sc->ipmi_pending_requests, ir_link,
218 nreq) {
219 if (req->ir_owner == dev) {
220 TAILQ_REMOVE(&sc->ipmi_pending_requests, req,
221 ir_link);
222 dev->ipmi_requests--;
223 ipmi_free_request(req);
224 }
225 }
226
227 /* Throw away any pending completed requests for this device. */
228 ipmi_purge_completed_requests(dev);
229
230 /*
231 * If we still have outstanding requests, they must be stuck
232 * in an interface driver, so wait for those to drain.
233 */
234 dev->ipmi_closing = 1;
235 while (dev->ipmi_requests > 0) {
236 msleep(&dev->ipmi_requests, &sc->ipmi_requests_lock,
237 PWAIT, "ipmidrain", 0);
238 ipmi_purge_completed_requests(dev);
239 }
240 }
241 sc->ipmi_opened--;
242 IPMI_UNLOCK(sc);
243
244 /* Cleanup. */
245 free(dev, M_IPMI);
246 }
247
248 #ifdef IPMB
249 static int
ipmi_ipmb_checksum(u_char * data,int len)250 ipmi_ipmb_checksum(u_char *data, int len)
251 {
252 u_char sum = 0;
253
254 for (; len; len--) {
255 sum += *data++;
256 }
257 return (-sum);
258 }
259
260 /* XXX: Needs work */
261 static int
ipmi_ipmb_send_message(device_t dev,u_char channel,u_char netfn,u_char command,u_char seq,u_char * data,int data_len)262 ipmi_ipmb_send_message(device_t dev, u_char channel, u_char netfn,
263 u_char command, u_char seq, u_char *data, int data_len)
264 {
265 struct ipmi_softc *sc = device_get_softc(dev);
266 struct ipmi_request *req;
267 u_char slave_addr = 0x52;
268 int error;
269
270 IPMI_ALLOC_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0),
271 IPMI_SEND_MSG, data_len + 8, 0);
272 req->ir_request[0] = channel;
273 req->ir_request[1] = slave_addr;
274 req->ir_request[2] = IPMI_ADDR(netfn, 0);
275 req->ir_request[3] = ipmi_ipmb_checksum(&req->ir_request[1], 2);
276 req->ir_request[4] = sc->ipmi_address;
277 req->ir_request[5] = IPMI_ADDR(seq, sc->ipmi_lun);
278 req->ir_request[6] = command;
279
280 bcopy(data, &req->ir_request[7], data_len);
281 temp[data_len + 7] = ipmi_ipmb_checksum(&req->ir_request[4],
282 data_len + 3);
283
284 ipmi_submit_driver_request(sc, req);
285 error = req->ir_error;
286
287 return (error);
288 }
289
290 static int
ipmi_handle_attn(struct ipmi_softc * sc)291 ipmi_handle_attn(struct ipmi_softc *sc)
292 {
293 struct ipmi_request *req;
294 int error;
295
296 device_printf(sc->ipmi_dev, "BMC has a message\n");
297 IPMI_ALLOC_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0),
298 IPMI_GET_MSG_FLAGS, 0, 1);
299
300 ipmi_submit_driver_request(sc, req);
301
302 if (req->ir_error == 0 && req->ir_compcode == 0) {
303 if (req->ir_reply[0] & IPMI_MSG_BUFFER_FULL) {
304 device_printf(sc->ipmi_dev, "message buffer full");
305 }
306 if (req->ir_reply[0] & IPMI_WDT_PRE_TIMEOUT) {
307 device_printf(sc->ipmi_dev,
308 "watchdog about to go off");
309 }
310 if (req->ir_reply[0] & IPMI_MSG_AVAILABLE) {
311 IPMI_ALLOC_DRIVER_REQUEST(req,
312 IPMI_ADDR(IPMI_APP_REQUEST, 0), IPMI_GET_MSG, 0,
313 16);
314
315 device_printf(sc->ipmi_dev, "throw out message ");
316 dump_buf(temp, 16);
317 }
318 }
319 error = req->ir_error;
320
321 return (error);
322 }
323 #endif
324
325 static int
ipmi_ioctl(struct cdev * cdev,u_long cmd,caddr_t data,int flags,struct thread * td)326 ipmi_ioctl(struct cdev *cdev, u_long cmd, caddr_t data,
327 int flags, struct thread *td)
328 {
329 struct ipmi_softc *sc;
330 struct ipmi_device *dev;
331 struct ipmi_request *kreq;
332 struct ipmi_req *req = (struct ipmi_req *)data;
333 struct ipmi_recv *recv = (struct ipmi_recv *)data;
334 struct ipmi_addr addr;
335 #ifdef IPMICTL_SEND_COMMAND_32
336 struct ipmi_req32 *req32 = (struct ipmi_req32 *)data;
337 struct ipmi_recv32 *recv32 = (struct ipmi_recv32 *)data;
338 union {
339 struct ipmi_req req;
340 struct ipmi_recv recv;
341 } thunk32;
342 #endif
343 int error, len;
344
345 error = devfs_get_cdevpriv((void **)&dev);
346 if (error)
347 return (error);
348
349 sc = cdev->si_drv1;
350
351 #ifdef IPMICTL_SEND_COMMAND_32
352 /* Convert 32-bit structures to native. */
353 switch (cmd) {
354 case IPMICTL_SEND_COMMAND_32:
355 req = &thunk32.req;
356 req->addr = PTRIN(req32->addr);
357 req->addr_len = req32->addr_len;
358 req->msgid = req32->msgid;
359 req->msg.netfn = req32->msg.netfn;
360 req->msg.cmd = req32->msg.cmd;
361 req->msg.data_len = req32->msg.data_len;
362 req->msg.data = PTRIN(req32->msg.data);
363 break;
364 case IPMICTL_RECEIVE_MSG_TRUNC_32:
365 case IPMICTL_RECEIVE_MSG_32:
366 recv = &thunk32.recv;
367 recv->addr = PTRIN(recv32->addr);
368 recv->addr_len = recv32->addr_len;
369 recv->msg.data_len = recv32->msg.data_len;
370 recv->msg.data = PTRIN(recv32->msg.data);
371 break;
372 }
373 #endif
374
375 switch (cmd) {
376 #ifdef IPMICTL_SEND_COMMAND_32
377 case IPMICTL_SEND_COMMAND_32:
378 #endif
379 case IPMICTL_SEND_COMMAND:
380 /*
381 * XXX: Need to add proper handling of this.
382 */
383 error = copyin(req->addr, &addr, sizeof(addr));
384 if (error)
385 return (error);
386
387 IPMI_LOCK(sc);
388 /* clear out old stuff in queue of stuff done */
389 /* XXX: This seems odd. */
390 while ((kreq = TAILQ_FIRST(&dev->ipmi_completed_requests))) {
391 TAILQ_REMOVE(&dev->ipmi_completed_requests, kreq,
392 ir_link);
393 dev->ipmi_requests--;
394 ipmi_free_request(kreq);
395 }
396 IPMI_UNLOCK(sc);
397
398 kreq = ipmi_alloc_request(dev, req->msgid,
399 IPMI_ADDR(req->msg.netfn, 0), req->msg.cmd,
400 req->msg.data_len, IPMI_MAX_RX);
401 error = copyin(req->msg.data, kreq->ir_request,
402 req->msg.data_len);
403 if (error) {
404 ipmi_free_request(kreq);
405 return (error);
406 }
407 IPMI_LOCK(sc);
408 dev->ipmi_requests++;
409 error = sc->ipmi_enqueue_request(sc, kreq);
410 IPMI_UNLOCK(sc);
411 if (error)
412 return (error);
413 break;
414 #ifdef IPMICTL_SEND_COMMAND_32
415 case IPMICTL_RECEIVE_MSG_TRUNC_32:
416 case IPMICTL_RECEIVE_MSG_32:
417 #endif
418 case IPMICTL_RECEIVE_MSG_TRUNC:
419 case IPMICTL_RECEIVE_MSG:
420 error = copyin(recv->addr, &addr, sizeof(addr));
421 if (error)
422 return (error);
423
424 IPMI_LOCK(sc);
425 kreq = TAILQ_FIRST(&dev->ipmi_completed_requests);
426 if (kreq == NULL) {
427 IPMI_UNLOCK(sc);
428 return (EAGAIN);
429 }
430 addr.channel = IPMI_BMC_CHANNEL;
431 /* XXX */
432 recv->recv_type = IPMI_RESPONSE_RECV_TYPE;
433 recv->msgid = kreq->ir_msgid;
434 recv->msg.netfn = IPMI_REPLY_ADDR(kreq->ir_addr) >> 2;
435 recv->msg.cmd = kreq->ir_command;
436 error = kreq->ir_error;
437 if (error) {
438 TAILQ_REMOVE(&dev->ipmi_completed_requests, kreq,
439 ir_link);
440 dev->ipmi_requests--;
441 IPMI_UNLOCK(sc);
442 ipmi_free_request(kreq);
443 return (error);
444 }
445 len = kreq->ir_replylen + 1;
446 if (recv->msg.data_len < len &&
447 (cmd == IPMICTL_RECEIVE_MSG
448 #ifdef IPMICTL_RECEIVE_MSG_32
449 || cmd == IPMICTL_RECEIVE_MSG_32
450 #endif
451 )) {
452 IPMI_UNLOCK(sc);
453 return (EMSGSIZE);
454 }
455 TAILQ_REMOVE(&dev->ipmi_completed_requests, kreq, ir_link);
456 dev->ipmi_requests--;
457 IPMI_UNLOCK(sc);
458 len = min(recv->msg.data_len, len);
459 recv->msg.data_len = len;
460 error = copyout(&addr, recv->addr,sizeof(addr));
461 if (error == 0)
462 error = copyout(&kreq->ir_compcode, recv->msg.data, 1);
463 if (error == 0)
464 error = copyout(kreq->ir_reply, recv->msg.data + 1,
465 len - 1);
466 ipmi_free_request(kreq);
467 if (error)
468 return (error);
469 break;
470 case IPMICTL_SET_MY_ADDRESS_CMD:
471 IPMI_LOCK(sc);
472 dev->ipmi_address = *(int*)data;
473 IPMI_UNLOCK(sc);
474 break;
475 case IPMICTL_GET_MY_ADDRESS_CMD:
476 IPMI_LOCK(sc);
477 *(int*)data = dev->ipmi_address;
478 IPMI_UNLOCK(sc);
479 break;
480 case IPMICTL_SET_MY_LUN_CMD:
481 IPMI_LOCK(sc);
482 dev->ipmi_lun = *(int*)data & 0x3;
483 IPMI_UNLOCK(sc);
484 break;
485 case IPMICTL_GET_MY_LUN_CMD:
486 IPMI_LOCK(sc);
487 *(int*)data = dev->ipmi_lun;
488 IPMI_UNLOCK(sc);
489 break;
490 case IPMICTL_SET_GETS_EVENTS_CMD:
491 /*
492 device_printf(sc->ipmi_dev,
493 "IPMICTL_SET_GETS_EVENTS_CMD NA\n");
494 */
495 break;
496 case IPMICTL_REGISTER_FOR_CMD:
497 case IPMICTL_UNREGISTER_FOR_CMD:
498 return (EOPNOTSUPP);
499 default:
500 device_printf(sc->ipmi_dev, "Unknown IOCTL %lX\n", cmd);
501 return (ENOIOCTL);
502 }
503
504 #ifdef IPMICTL_SEND_COMMAND_32
505 /* Update changed fields in 32-bit structures. */
506 switch (cmd) {
507 case IPMICTL_RECEIVE_MSG_TRUNC_32:
508 case IPMICTL_RECEIVE_MSG_32:
509 recv32->recv_type = recv->recv_type;
510 recv32->msgid = recv->msgid;
511 recv32->msg.netfn = recv->msg.netfn;
512 recv32->msg.cmd = recv->msg.cmd;
513 recv32->msg.data_len = recv->msg.data_len;
514 break;
515 }
516 #endif
517 return (0);
518 }
519
520 /*
521 * Request management.
522 */
523
524 static __inline void
ipmi_init_request(struct ipmi_request * req,struct ipmi_device * dev,long msgid,uint8_t addr,uint8_t command,size_t requestlen,size_t replylen)525 ipmi_init_request(struct ipmi_request *req, struct ipmi_device *dev, long msgid,
526 uint8_t addr, uint8_t command, size_t requestlen, size_t replylen)
527 {
528
529 req->ir_owner = dev;
530 req->ir_msgid = msgid;
531 req->ir_addr = addr;
532 req->ir_command = command;
533 if (requestlen) {
534 req->ir_request = (char *)&req[1];
535 req->ir_requestlen = requestlen;
536 }
537 if (replylen) {
538 req->ir_reply = (char *)&req[1] + requestlen;
539 req->ir_replybuflen = replylen;
540 }
541 }
542
543 /* Allocate a new request with request and reply buffers. */
544 struct ipmi_request *
ipmi_alloc_request(struct ipmi_device * dev,long msgid,uint8_t addr,uint8_t command,size_t requestlen,size_t replylen)545 ipmi_alloc_request(struct ipmi_device *dev, long msgid, uint8_t addr,
546 uint8_t command, size_t requestlen, size_t replylen)
547 {
548 struct ipmi_request *req;
549
550 req = malloc(sizeof(struct ipmi_request) + requestlen + replylen,
551 M_IPMI, M_WAITOK | M_ZERO);
552 ipmi_init_request(req, dev, msgid, addr, command, requestlen, replylen);
553 return (req);
554 }
555
556 /* Free a request no longer in use. */
557 void
ipmi_free_request(struct ipmi_request * req)558 ipmi_free_request(struct ipmi_request *req)
559 {
560
561 free(req, M_IPMI);
562 }
563
564 /* Store a processed request on the appropriate completion queue. */
565 void
ipmi_complete_request(struct ipmi_softc * sc,struct ipmi_request * req)566 ipmi_complete_request(struct ipmi_softc *sc, struct ipmi_request *req)
567 {
568 struct ipmi_device *dev;
569
570 IPMI_LOCK_ASSERT(sc);
571
572 /*
573 * Anonymous requests (from inside the driver) always have a
574 * waiter that we awaken.
575 */
576 if (req->ir_owner == NULL)
577 wakeup(req);
578 else {
579 dev = req->ir_owner;
580 TAILQ_INSERT_TAIL(&dev->ipmi_completed_requests, req, ir_link);
581 selwakeup(&dev->ipmi_select);
582 if (dev->ipmi_closing)
583 wakeup(&dev->ipmi_requests);
584 }
585 }
586
587 /* Perform an internal driver request. */
588 int
ipmi_submit_driver_request(struct ipmi_softc * sc,struct ipmi_request * req,int timo)589 ipmi_submit_driver_request(struct ipmi_softc *sc, struct ipmi_request *req,
590 int timo)
591 {
592
593 return (sc->ipmi_driver_request(sc, req, timo));
594 }
595
596 /*
597 * Helper routine for polled system interfaces that use
598 * ipmi_polled_enqueue_request() to queue requests. This request
599 * waits until there is a pending request and then returns the first
600 * request. If the driver is shutting down, it returns NULL.
601 */
602 struct ipmi_request *
ipmi_dequeue_request(struct ipmi_softc * sc)603 ipmi_dequeue_request(struct ipmi_softc *sc)
604 {
605 struct ipmi_request *req;
606
607 IPMI_LOCK_ASSERT(sc);
608
609 while (!sc->ipmi_detaching && TAILQ_EMPTY(&sc->ipmi_pending_requests))
610 cv_wait(&sc->ipmi_request_added, &sc->ipmi_requests_lock);
611 if (sc->ipmi_detaching)
612 return (NULL);
613
614 req = TAILQ_FIRST(&sc->ipmi_pending_requests);
615 TAILQ_REMOVE(&sc->ipmi_pending_requests, req, ir_link);
616 return (req);
617 }
618
619 /* Default implementation of ipmi_enqueue_request() for polled interfaces. */
620 int
ipmi_polled_enqueue_request(struct ipmi_softc * sc,struct ipmi_request * req)621 ipmi_polled_enqueue_request(struct ipmi_softc *sc, struct ipmi_request *req)
622 {
623
624 IPMI_LOCK_ASSERT(sc);
625
626 TAILQ_INSERT_TAIL(&sc->ipmi_pending_requests, req, ir_link);
627 cv_signal(&sc->ipmi_request_added);
628 return (0);
629 }
630
631 /*
632 * Watchdog event handler.
633 */
634
635 static int
ipmi_reset_watchdog(struct ipmi_softc * sc)636 ipmi_reset_watchdog(struct ipmi_softc *sc)
637 {
638 struct ipmi_request *req;
639 int error;
640
641 IPMI_ALLOC_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0),
642 IPMI_RESET_WDOG, 0, 0);
643 error = ipmi_submit_driver_request(sc, req, 0);
644 if (error) {
645 device_printf(sc->ipmi_dev, "Failed to reset watchdog\n");
646 } else if (req->ir_compcode == 0x80) {
647 error = ENOENT;
648 } else if (req->ir_compcode != 0) {
649 device_printf(sc->ipmi_dev, "Watchdog reset returned 0x%x\n",
650 req->ir_compcode);
651 error = EINVAL;
652 }
653 return (error);
654 }
655
656 static int
ipmi_set_watchdog(struct ipmi_softc * sc,unsigned int sec)657 ipmi_set_watchdog(struct ipmi_softc *sc, unsigned int sec)
658 {
659 struct ipmi_request *req;
660 int error;
661
662 if (sec > 0xffff / 10)
663 return (EINVAL);
664
665 IPMI_ALLOC_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0),
666 IPMI_SET_WDOG, 6, 0);
667 if (sec) {
668 req->ir_request[0] = IPMI_SET_WD_TIMER_DONT_STOP
669 | IPMI_SET_WD_TIMER_SMS_OS;
670 req->ir_request[1] = (wd_timer_actions & 0xff);
671 req->ir_request[2] = min(0xff,
672 min(wd_pretimeout_countdown, (sec + 2) / 4));
673 req->ir_request[3] = 0; /* Timer use */
674 req->ir_request[4] = (sec * 10) & 0xff;
675 req->ir_request[5] = (sec * 10) >> 8;
676 } else {
677 req->ir_request[0] = IPMI_SET_WD_TIMER_SMS_OS;
678 req->ir_request[1] = 0;
679 req->ir_request[2] = 0;
680 req->ir_request[3] = 0; /* Timer use */
681 req->ir_request[4] = 0;
682 req->ir_request[5] = 0;
683 }
684 error = ipmi_submit_driver_request(sc, req, 0);
685 if (error) {
686 device_printf(sc->ipmi_dev, "Failed to set watchdog\n");
687 } else if (req->ir_compcode != 0) {
688 device_printf(sc->ipmi_dev, "Watchdog set returned 0x%x\n",
689 req->ir_compcode);
690 error = EINVAL;
691 }
692 return (error);
693 }
694
695 static void
ipmi_wd_event(void * arg,unsigned int cmd,int * error)696 ipmi_wd_event(void *arg, unsigned int cmd, int *error)
697 {
698 struct ipmi_softc *sc = arg;
699 unsigned int timeout;
700 int e;
701
702 /* Ignore requests while disabled. */
703 if (!on)
704 return;
705
706 /*
707 * To prevent infinite hangs, we don't let anyone pat or change
708 * the watchdog when we're shutting down. (See ipmi_shutdown_event().)
709 * However, we do want to keep patting the watchdog while we are doing
710 * a coredump.
711 */
712 if (wd_in_shutdown) {
713 if (dumping && sc->ipmi_watchdog_active)
714 ipmi_reset_watchdog(sc);
715 return;
716 }
717
718 cmd &= WD_INTERVAL;
719 if (cmd > 0 && cmd <= 63) {
720 timeout = ((uint64_t)1 << cmd) / 1000000000;
721 if (timeout == 0)
722 timeout = 1;
723 if (timeout != sc->ipmi_watchdog_active ||
724 wd_timer_actions != sc->ipmi_watchdog_actions ||
725 wd_pretimeout_countdown != sc->ipmi_watchdog_pretimeout) {
726 e = ipmi_set_watchdog(sc, timeout);
727 if (e == 0) {
728 sc->ipmi_watchdog_active = timeout;
729 sc->ipmi_watchdog_actions = wd_timer_actions;
730 sc->ipmi_watchdog_pretimeout = wd_pretimeout_countdown;
731 } else {
732 (void)ipmi_set_watchdog(sc, 0);
733 sc->ipmi_watchdog_active = 0;
734 sc->ipmi_watchdog_actions = 0;
735 sc->ipmi_watchdog_pretimeout = 0;
736 }
737 }
738 if (sc->ipmi_watchdog_active != 0) {
739 e = ipmi_reset_watchdog(sc);
740 if (e == 0) {
741 *error = 0;
742 } else {
743 (void)ipmi_set_watchdog(sc, 0);
744 sc->ipmi_watchdog_active = 0;
745 sc->ipmi_watchdog_actions = 0;
746 sc->ipmi_watchdog_pretimeout = 0;
747 }
748 }
749 } else if (atomic_readandclear_int(&sc->ipmi_watchdog_active) != 0) {
750 sc->ipmi_watchdog_actions = 0;
751 sc->ipmi_watchdog_pretimeout = 0;
752
753 e = ipmi_set_watchdog(sc, 0);
754 if (e != 0 && cmd == 0)
755 *error = EOPNOTSUPP;
756 }
757 }
758
759 static void
ipmi_shutdown_event(void * arg,unsigned int cmd,int * error)760 ipmi_shutdown_event(void *arg, unsigned int cmd, int *error)
761 {
762 struct ipmi_softc *sc = arg;
763
764 /* Ignore event if disabled. */
765 if (!on)
766 return;
767
768 /*
769 * Positive wd_shutdown_countdown value will re-arm watchdog;
770 * Zero value in wd_shutdown_countdown will disable watchdog;
771 * Negative value in wd_shutdown_countdown will keep existing state;
772 *
773 * Revert to using a power cycle to ensure that the watchdog will
774 * do something useful here. Having the watchdog send an NMI
775 * instead is useless during shutdown, and might be ignored if an
776 * NMI already triggered.
777 */
778
779 wd_in_shutdown = true;
780 if (wd_shutdown_countdown == 0) {
781 /* disable watchdog */
782 ipmi_set_watchdog(sc, 0);
783 sc->ipmi_watchdog_active = 0;
784 } else if (wd_shutdown_countdown > 0) {
785 /* set desired action and time, and, reset watchdog */
786 wd_timer_actions = IPMI_SET_WD_ACTION_POWER_CYCLE;
787 ipmi_set_watchdog(sc, wd_shutdown_countdown);
788 sc->ipmi_watchdog_active = wd_shutdown_countdown;
789 ipmi_reset_watchdog(sc);
790 }
791 }
792
793 static void
ipmi_power_cycle(void * arg,int howto)794 ipmi_power_cycle(void *arg, int howto)
795 {
796 struct ipmi_softc *sc = arg;
797 struct ipmi_request *req;
798
799 /*
800 * Ignore everything except power cycling requests
801 */
802 if ((howto & RB_POWERCYCLE) == 0)
803 return;
804
805 device_printf(sc->ipmi_dev, "Power cycling using IPMI\n");
806
807 /*
808 * Send a CHASSIS_CONTROL command to the CHASSIS device, subcommand 2
809 * as described in IPMI v2.0 spec section 28.3.
810 */
811 IPMI_ALLOC_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_CHASSIS_REQUEST, 0),
812 IPMI_CHASSIS_CONTROL, 1, 0);
813 req->ir_request[0] = IPMI_CC_POWER_CYCLE;
814
815 ipmi_submit_driver_request(sc, req, MAX_TIMEOUT);
816
817 if (req->ir_error != 0 || req->ir_compcode != 0) {
818 device_printf(sc->ipmi_dev, "Power cycling via IPMI failed code %#x %#x\n",
819 req->ir_error, req->ir_compcode);
820 return;
821 }
822
823 /*
824 * BMCs are notoriously slow, give it cyle_wait seconds for the power
825 * down leg of the power cycle. If that fails, fallback to the next
826 * hanlder in the shutdown_final chain and/or the platform failsafe.
827 */
828 DELAY(cycle_wait * 1000 * 1000);
829 device_printf(sc->ipmi_dev, "Power cycling via IPMI timed out\n");
830 }
831
832 static void
ipmi_startup(void * arg)833 ipmi_startup(void *arg)
834 {
835 struct ipmi_softc *sc = arg;
836 struct ipmi_request *req;
837 device_t dev;
838 int error, i;
839
840 config_intrhook_disestablish(&sc->ipmi_ich);
841 dev = sc->ipmi_dev;
842
843 /* Initialize interface-independent state. */
844 mtx_init(&sc->ipmi_requests_lock, "ipmi requests", NULL, MTX_DEF);
845 mtx_init(&sc->ipmi_io_lock, "ipmi io", NULL, MTX_DEF);
846 cv_init(&sc->ipmi_request_added, "ipmireq");
847 TAILQ_INIT(&sc->ipmi_pending_requests);
848
849 /* Initialize interface-dependent state. */
850 error = sc->ipmi_startup(sc);
851 if (error) {
852 device_printf(dev, "Failed to initialize interface: %d\n",
853 error);
854 return;
855 }
856
857 /* Send a GET_DEVICE_ID request. */
858 IPMI_ALLOC_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0),
859 IPMI_GET_DEVICE_ID, 0, 15);
860
861 error = ipmi_submit_driver_request(sc, req, MAX_TIMEOUT);
862 if (error == EWOULDBLOCK) {
863 device_printf(dev, "Timed out waiting for GET_DEVICE_ID\n");
864 return;
865 } else if (error) {
866 device_printf(dev, "Failed GET_DEVICE_ID: %d\n", error);
867 return;
868 } else if (req->ir_compcode != 0) {
869 device_printf(dev,
870 "Bad completion code for GET_DEVICE_ID: %d\n",
871 req->ir_compcode);
872 return;
873 } else if (req->ir_replylen < 5) {
874 device_printf(dev, "Short reply for GET_DEVICE_ID: %d\n",
875 req->ir_replylen);
876 return;
877 }
878
879 device_printf(dev, "IPMI device rev. %d, firmware rev. %d.%d%d, "
880 "version %d.%d, device support mask %#x\n",
881 req->ir_reply[1] & 0x0f,
882 req->ir_reply[2] & 0x7f, req->ir_reply[3] >> 4, req->ir_reply[3] & 0x0f,
883 req->ir_reply[4] & 0x0f, req->ir_reply[4] >> 4, req->ir_reply[5]);
884
885 sc->ipmi_dev_support = req->ir_reply[5];
886
887 IPMI_INIT_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0),
888 IPMI_CLEAR_FLAGS, 1, 0);
889
890 ipmi_submit_driver_request(sc, req, 0);
891
892 /* XXX: Magic numbers */
893 if (req->ir_compcode == 0xc0) {
894 device_printf(dev, "Clear flags is busy\n");
895 }
896 if (req->ir_compcode == 0xc1) {
897 device_printf(dev, "Clear flags illegal\n");
898 }
899
900 for (i = 0; i < 8; i++) {
901 IPMI_INIT_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0),
902 IPMI_GET_CHANNEL_INFO, 1, 0);
903 req->ir_request[0] = i;
904
905 error = ipmi_submit_driver_request(sc, req, 0);
906
907 if (error != 0 || req->ir_compcode != 0)
908 break;
909 }
910 device_printf(dev, "Number of channels %d\n", i);
911
912 /*
913 * Probe for watchdog, but only for backends which support
914 * polled driver requests.
915 */
916 if (wd_init_enable && sc->ipmi_driver_requests_polled) {
917 IPMI_INIT_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0),
918 IPMI_GET_WDOG, 0, 0);
919
920 error = ipmi_submit_driver_request(sc, req, 0);
921
922 if (error == 0 && req->ir_compcode == 0x00) {
923 device_printf(dev, "Attached watchdog\n");
924 /* register the watchdog event handler */
925 sc->ipmi_watchdog_tag = EVENTHANDLER_REGISTER(
926 watchdog_list, ipmi_wd_event, sc, 0);
927 sc->ipmi_shutdown_tag = EVENTHANDLER_REGISTER(
928 shutdown_pre_sync, ipmi_shutdown_event,
929 sc, 0);
930 }
931 }
932
933 sc->ipmi_cdev = make_dev(&ipmi_cdevsw, device_get_unit(dev),
934 UID_ROOT, GID_OPERATOR, 0660, "ipmi%d", device_get_unit(dev));
935 if (sc->ipmi_cdev == NULL) {
936 device_printf(dev, "Failed to create cdev\n");
937 return;
938 }
939 sc->ipmi_cdev->si_drv1 = sc;
940
941 /*
942 * Set initial watchdog state. If desired, set an initial
943 * watchdog on startup. Or, if the watchdog device is
944 * disabled, clear any existing watchdog.
945 */
946 if (on && wd_startup_countdown > 0) {
947 wd_timer_actions = IPMI_SET_WD_ACTION_POWER_CYCLE;
948 if (ipmi_set_watchdog(sc, wd_startup_countdown) == 0 &&
949 ipmi_reset_watchdog(sc) == 0) {
950 sc->ipmi_watchdog_active = wd_startup_countdown;
951 sc->ipmi_watchdog_actions = wd_timer_actions;
952 sc->ipmi_watchdog_pretimeout = wd_pretimeout_countdown;
953 } else
954 (void)ipmi_set_watchdog(sc, 0);
955 ipmi_reset_watchdog(sc);
956 } else if (!on)
957 (void)ipmi_set_watchdog(sc, 0);
958 /*
959 * Power cycle the system off using IPMI. We use last - 2 since we don't
960 * handle all the other kinds of reboots. We'll let others handle them.
961 * We only try to do this if the BMC supports the Chassis device.
962 */
963 if (sc->ipmi_dev_support & IPMI_ADS_CHASSIS) {
964 device_printf(dev, "Establishing power cycle handler\n");
965 sc->ipmi_power_cycle_tag = EVENTHANDLER_REGISTER(shutdown_final,
966 ipmi_power_cycle, sc, SHUTDOWN_PRI_LAST - 2);
967 }
968 }
969
970 int
ipmi_attach(device_t dev)971 ipmi_attach(device_t dev)
972 {
973 struct ipmi_softc *sc = device_get_softc(dev);
974 int error;
975
976 if (sc->ipmi_irq_res != NULL && sc->ipmi_intr != NULL) {
977 error = bus_setup_intr(dev, sc->ipmi_irq_res, INTR_TYPE_MISC,
978 NULL, sc->ipmi_intr, sc, &sc->ipmi_irq);
979 if (error) {
980 device_printf(dev, "can't set up interrupt\n");
981 return (error);
982 }
983 }
984
985 bzero(&sc->ipmi_ich, sizeof(struct intr_config_hook));
986 sc->ipmi_ich.ich_func = ipmi_startup;
987 sc->ipmi_ich.ich_arg = sc;
988 if (config_intrhook_establish(&sc->ipmi_ich) != 0) {
989 device_printf(dev, "can't establish configuration hook\n");
990 return (ENOMEM);
991 }
992
993 ipmi_attached = 1;
994 return (0);
995 }
996
997 int
ipmi_detach(device_t dev)998 ipmi_detach(device_t dev)
999 {
1000 struct ipmi_softc *sc;
1001
1002 sc = device_get_softc(dev);
1003
1004 /* Fail if there are any open handles. */
1005 IPMI_LOCK(sc);
1006 if (sc->ipmi_opened) {
1007 IPMI_UNLOCK(sc);
1008 return (EBUSY);
1009 }
1010 IPMI_UNLOCK(sc);
1011 if (sc->ipmi_cdev)
1012 destroy_dev(sc->ipmi_cdev);
1013
1014 /* Detach from watchdog handling and turn off watchdog. */
1015 if (sc->ipmi_shutdown_tag)
1016 EVENTHANDLER_DEREGISTER(shutdown_pre_sync,
1017 sc->ipmi_shutdown_tag);
1018 if (sc->ipmi_watchdog_tag) {
1019 EVENTHANDLER_DEREGISTER(watchdog_list, sc->ipmi_watchdog_tag);
1020 ipmi_set_watchdog(sc, 0);
1021 }
1022
1023 /* Detach from shutdown handling for power cycle reboot */
1024 if (sc->ipmi_power_cycle_tag)
1025 EVENTHANDLER_DEREGISTER(shutdown_final, sc->ipmi_power_cycle_tag);
1026
1027 /* XXX: should use shutdown callout I think. */
1028 /* If the backend uses a kthread, shut it down. */
1029 IPMI_LOCK(sc);
1030 sc->ipmi_detaching = 1;
1031 if (sc->ipmi_kthread) {
1032 cv_broadcast(&sc->ipmi_request_added);
1033 msleep(sc->ipmi_kthread, &sc->ipmi_requests_lock, 0,
1034 "ipmi_wait", 0);
1035 }
1036 IPMI_UNLOCK(sc);
1037 if (sc->ipmi_irq)
1038 bus_teardown_intr(dev, sc->ipmi_irq_res, sc->ipmi_irq);
1039
1040 ipmi_release_resources(dev);
1041 mtx_destroy(&sc->ipmi_io_lock);
1042 mtx_destroy(&sc->ipmi_requests_lock);
1043 return (0);
1044 }
1045
1046 void
ipmi_release_resources(device_t dev)1047 ipmi_release_resources(device_t dev)
1048 {
1049 struct ipmi_softc *sc;
1050 int i;
1051
1052 sc = device_get_softc(dev);
1053 if (sc->ipmi_irq)
1054 bus_teardown_intr(dev, sc->ipmi_irq_res, sc->ipmi_irq);
1055 if (sc->ipmi_irq_res)
1056 bus_release_resource(dev, SYS_RES_IRQ, sc->ipmi_irq_rid,
1057 sc->ipmi_irq_res);
1058 for (i = 0; i < MAX_RES; i++)
1059 if (sc->ipmi_io_res[i])
1060 bus_release_resource(dev, sc->ipmi_io_type,
1061 sc->ipmi_io_rid + i, sc->ipmi_io_res[i]);
1062 }
1063
1064 devclass_t ipmi_devclass;
1065
1066 /* XXX: Why? */
1067 static void
ipmi_unload(void * arg)1068 ipmi_unload(void *arg)
1069 {
1070 device_t * devs;
1071 int count;
1072 int i;
1073
1074 if (ipmi_devclass == NULL)
1075 return;
1076 if (devclass_get_devices(ipmi_devclass, &devs, &count) != 0)
1077 return;
1078 for (i = 0; i < count; i++)
1079 device_delete_child(device_get_parent(devs[i]), devs[i]);
1080 free(devs, M_TEMP);
1081 }
1082 SYSUNINIT(ipmi_unload, SI_SUB_DRIVERS, SI_ORDER_FIRST, ipmi_unload, NULL);
1083
1084 #ifdef IMPI_DEBUG
1085 static void
dump_buf(u_char * data,int len)1086 dump_buf(u_char *data, int len)
1087 {
1088 char buf[20];
1089 char line[1024];
1090 char temp[30];
1091 int count = 0;
1092 int i=0;
1093
1094 printf("Address %p len %d\n", data, len);
1095 if (len > 256)
1096 len = 256;
1097 line[0] = '\000';
1098 for (; len > 0; len--, data++) {
1099 sprintf(temp, "%02x ", *data);
1100 strcat(line, temp);
1101 if (*data >= ' ' && *data <= '~')
1102 buf[count] = *data;
1103 else if (*data >= 'A' && *data <= 'Z')
1104 buf[count] = *data;
1105 else
1106 buf[count] = '.';
1107 if (++count == 16) {
1108 buf[count] = '\000';
1109 count = 0;
1110 printf(" %3x %s %s\n", i, line, buf);
1111 i+=16;
1112 line[0] = '\000';
1113 }
1114 }
1115 buf[count] = '\000';
1116
1117 for (; count != 16; count++) {
1118 strcat(line, " ");
1119 }
1120 printf(" %3x %s %s\n", i, line, buf);
1121 }
1122 #endif
1123