1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (c) 2016 Mellanox Technologies. All rights reserved. 4 * Copyright (c) 2016 Jiri Pirko <[email protected]> 5 */ 6 7 #include <net/genetlink.h> 8 #include <net/sock.h> 9 #include "devl_internal.h" 10 11 struct devlink_reload_combination { 12 enum devlink_reload_action action; 13 enum devlink_reload_limit limit; 14 }; 15 16 static const struct devlink_reload_combination devlink_reload_invalid_combinations[] = { 17 { 18 /* can't reinitialize driver with no down time */ 19 .action = DEVLINK_RELOAD_ACTION_DRIVER_REINIT, 20 .limit = DEVLINK_RELOAD_LIMIT_NO_RESET, 21 }, 22 }; 23 24 static bool 25 devlink_reload_combination_is_invalid(enum devlink_reload_action action, 26 enum devlink_reload_limit limit) 27 { 28 int i; 29 30 for (i = 0; i < ARRAY_SIZE(devlink_reload_invalid_combinations); i++) 31 if (devlink_reload_invalid_combinations[i].action == action && 32 devlink_reload_invalid_combinations[i].limit == limit) 33 return true; 34 return false; 35 } 36 37 static bool 38 devlink_reload_action_is_supported(struct devlink *devlink, enum devlink_reload_action action) 39 { 40 return test_bit(action, &devlink->ops->reload_actions); 41 } 42 43 static bool 44 devlink_reload_limit_is_supported(struct devlink *devlink, enum devlink_reload_limit limit) 45 { 46 return test_bit(limit, &devlink->ops->reload_limits); 47 } 48 49 static int devlink_reload_stat_put(struct sk_buff *msg, 50 enum devlink_reload_limit limit, u32 value) 51 { 52 struct nlattr *reload_stats_entry; 53 54 reload_stats_entry = nla_nest_start(msg, DEVLINK_ATTR_RELOAD_STATS_ENTRY); 55 if (!reload_stats_entry) 56 return -EMSGSIZE; 57 58 if (nla_put_u8(msg, DEVLINK_ATTR_RELOAD_STATS_LIMIT, limit) || 59 nla_put_u32(msg, DEVLINK_ATTR_RELOAD_STATS_VALUE, value)) 60 goto nla_put_failure; 61 nla_nest_end(msg, reload_stats_entry); 62 return 0; 63 64 nla_put_failure: 65 nla_nest_cancel(msg, reload_stats_entry); 66 return -EMSGSIZE; 67 } 68 69 static int 70 devlink_reload_stats_put(struct sk_buff *msg, struct devlink *devlink, bool is_remote) 71 { 72 struct nlattr *reload_stats_attr, *act_info, *act_stats; 73 int i, j, stat_idx; 74 u32 value; 75 76 if (!is_remote) 77 reload_stats_attr = nla_nest_start(msg, DEVLINK_ATTR_RELOAD_STATS); 78 else 79 reload_stats_attr = nla_nest_start(msg, DEVLINK_ATTR_REMOTE_RELOAD_STATS); 80 81 if (!reload_stats_attr) 82 return -EMSGSIZE; 83 84 for (i = 0; i <= DEVLINK_RELOAD_ACTION_MAX; i++) { 85 if ((!is_remote && 86 !devlink_reload_action_is_supported(devlink, i)) || 87 i == DEVLINK_RELOAD_ACTION_UNSPEC) 88 continue; 89 act_info = nla_nest_start(msg, DEVLINK_ATTR_RELOAD_ACTION_INFO); 90 if (!act_info) 91 goto nla_put_failure; 92 93 if (nla_put_u8(msg, DEVLINK_ATTR_RELOAD_ACTION, i)) 94 goto action_info_nest_cancel; 95 act_stats = nla_nest_start(msg, DEVLINK_ATTR_RELOAD_ACTION_STATS); 96 if (!act_stats) 97 goto action_info_nest_cancel; 98 99 for (j = 0; j <= DEVLINK_RELOAD_LIMIT_MAX; j++) { 100 /* Remote stats are shown even if not locally supported. 101 * Stats of actions with unspecified limit are shown 102 * though drivers don't need to register unspecified 103 * limit. 104 */ 105 if ((!is_remote && j != DEVLINK_RELOAD_LIMIT_UNSPEC && 106 !devlink_reload_limit_is_supported(devlink, j)) || 107 devlink_reload_combination_is_invalid(i, j)) 108 continue; 109 110 stat_idx = j * __DEVLINK_RELOAD_ACTION_MAX + i; 111 if (!is_remote) 112 value = devlink->stats.reload_stats[stat_idx]; 113 else 114 value = devlink->stats.remote_reload_stats[stat_idx]; 115 if (devlink_reload_stat_put(msg, j, value)) 116 goto action_stats_nest_cancel; 117 } 118 nla_nest_end(msg, act_stats); 119 nla_nest_end(msg, act_info); 120 } 121 nla_nest_end(msg, reload_stats_attr); 122 return 0; 123 124 action_stats_nest_cancel: 125 nla_nest_cancel(msg, act_stats); 126 action_info_nest_cancel: 127 nla_nest_cancel(msg, act_info); 128 nla_put_failure: 129 nla_nest_cancel(msg, reload_stats_attr); 130 return -EMSGSIZE; 131 } 132 133 static int devlink_nl_fill(struct sk_buff *msg, struct devlink *devlink, 134 enum devlink_command cmd, u32 portid, 135 u32 seq, int flags) 136 { 137 struct nlattr *dev_stats; 138 void *hdr; 139 140 hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd); 141 if (!hdr) 142 return -EMSGSIZE; 143 144 if (devlink_nl_put_handle(msg, devlink)) 145 goto nla_put_failure; 146 if (nla_put_u8(msg, DEVLINK_ATTR_RELOAD_FAILED, devlink->reload_failed)) 147 goto nla_put_failure; 148 149 dev_stats = nla_nest_start(msg, DEVLINK_ATTR_DEV_STATS); 150 if (!dev_stats) 151 goto nla_put_failure; 152 153 if (devlink_reload_stats_put(msg, devlink, false)) 154 goto dev_stats_nest_cancel; 155 if (devlink_reload_stats_put(msg, devlink, true)) 156 goto dev_stats_nest_cancel; 157 158 nla_nest_end(msg, dev_stats); 159 genlmsg_end(msg, hdr); 160 return 0; 161 162 dev_stats_nest_cancel: 163 nla_nest_cancel(msg, dev_stats); 164 nla_put_failure: 165 genlmsg_cancel(msg, hdr); 166 return -EMSGSIZE; 167 } 168 169 void devlink_notify(struct devlink *devlink, enum devlink_command cmd) 170 { 171 struct sk_buff *msg; 172 int err; 173 174 WARN_ON(cmd != DEVLINK_CMD_NEW && cmd != DEVLINK_CMD_DEL); 175 WARN_ON(!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED)); 176 177 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 178 if (!msg) 179 return; 180 181 err = devlink_nl_fill(msg, devlink, cmd, 0, 0, 0); 182 if (err) { 183 nlmsg_free(msg); 184 return; 185 } 186 187 genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink), 188 msg, 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL); 189 } 190 191 int devlink_nl_cmd_get_doit(struct sk_buff *skb, struct genl_info *info) 192 { 193 struct devlink *devlink = info->user_ptr[0]; 194 struct sk_buff *msg; 195 int err; 196 197 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 198 if (!msg) 199 return -ENOMEM; 200 201 err = devlink_nl_fill(msg, devlink, DEVLINK_CMD_NEW, 202 info->snd_portid, info->snd_seq, 0); 203 if (err) { 204 nlmsg_free(msg); 205 return err; 206 } 207 208 return genlmsg_reply(msg, info); 209 } 210 211 static int 212 devlink_nl_cmd_get_dump_one(struct sk_buff *msg, struct devlink *devlink, 213 struct netlink_callback *cb) 214 { 215 return devlink_nl_fill(msg, devlink, DEVLINK_CMD_NEW, 216 NETLINK_CB(cb->skb).portid, 217 cb->nlh->nlmsg_seq, NLM_F_MULTI); 218 } 219 220 const struct devlink_cmd devl_cmd_get = { 221 .dump_one = devlink_nl_cmd_get_dump_one, 222 }; 223 224 static void devlink_reload_failed_set(struct devlink *devlink, 225 bool reload_failed) 226 { 227 if (devlink->reload_failed == reload_failed) 228 return; 229 devlink->reload_failed = reload_failed; 230 devlink_notify(devlink, DEVLINK_CMD_NEW); 231 } 232 233 bool devlink_is_reload_failed(const struct devlink *devlink) 234 { 235 return devlink->reload_failed; 236 } 237 EXPORT_SYMBOL_GPL(devlink_is_reload_failed); 238 239 static void 240 __devlink_reload_stats_update(struct devlink *devlink, u32 *reload_stats, 241 enum devlink_reload_limit limit, u32 actions_performed) 242 { 243 unsigned long actions = actions_performed; 244 int stat_idx; 245 int action; 246 247 for_each_set_bit(action, &actions, __DEVLINK_RELOAD_ACTION_MAX) { 248 stat_idx = limit * __DEVLINK_RELOAD_ACTION_MAX + action; 249 reload_stats[stat_idx]++; 250 } 251 devlink_notify(devlink, DEVLINK_CMD_NEW); 252 } 253 254 static void 255 devlink_reload_stats_update(struct devlink *devlink, enum devlink_reload_limit limit, 256 u32 actions_performed) 257 { 258 __devlink_reload_stats_update(devlink, devlink->stats.reload_stats, limit, 259 actions_performed); 260 } 261 262 /** 263 * devlink_remote_reload_actions_performed - Update devlink on reload actions 264 * performed which are not a direct result of devlink reload call. 265 * 266 * This should be called by a driver after performing reload actions in case it was not 267 * a result of devlink reload call. For example fw_activate was performed as a result 268 * of devlink reload triggered fw_activate on another host. 269 * The motivation for this function is to keep data on reload actions performed on this 270 * function whether it was done due to direct devlink reload call or not. 271 * 272 * @devlink: devlink 273 * @limit: reload limit 274 * @actions_performed: bitmask of actions performed 275 */ 276 void devlink_remote_reload_actions_performed(struct devlink *devlink, 277 enum devlink_reload_limit limit, 278 u32 actions_performed) 279 { 280 if (WARN_ON(!actions_performed || 281 actions_performed & BIT(DEVLINK_RELOAD_ACTION_UNSPEC) || 282 actions_performed >= BIT(__DEVLINK_RELOAD_ACTION_MAX) || 283 limit > DEVLINK_RELOAD_LIMIT_MAX)) 284 return; 285 286 __devlink_reload_stats_update(devlink, devlink->stats.remote_reload_stats, limit, 287 actions_performed); 288 } 289 EXPORT_SYMBOL_GPL(devlink_remote_reload_actions_performed); 290 291 static struct net *devlink_netns_get(struct sk_buff *skb, 292 struct genl_info *info) 293 { 294 struct nlattr *netns_pid_attr = info->attrs[DEVLINK_ATTR_NETNS_PID]; 295 struct nlattr *netns_fd_attr = info->attrs[DEVLINK_ATTR_NETNS_FD]; 296 struct nlattr *netns_id_attr = info->attrs[DEVLINK_ATTR_NETNS_ID]; 297 struct net *net; 298 299 if (!!netns_pid_attr + !!netns_fd_attr + !!netns_id_attr > 1) { 300 NL_SET_ERR_MSG_MOD(info->extack, "multiple netns identifying attributes specified"); 301 return ERR_PTR(-EINVAL); 302 } 303 304 if (netns_pid_attr) { 305 net = get_net_ns_by_pid(nla_get_u32(netns_pid_attr)); 306 } else if (netns_fd_attr) { 307 net = get_net_ns_by_fd(nla_get_u32(netns_fd_attr)); 308 } else if (netns_id_attr) { 309 net = get_net_ns_by_id(sock_net(skb->sk), 310 nla_get_u32(netns_id_attr)); 311 if (!net) 312 net = ERR_PTR(-EINVAL); 313 } else { 314 WARN_ON(1); 315 net = ERR_PTR(-EINVAL); 316 } 317 if (IS_ERR(net)) { 318 NL_SET_ERR_MSG_MOD(info->extack, "Unknown network namespace"); 319 return ERR_PTR(-EINVAL); 320 } 321 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) { 322 put_net(net); 323 return ERR_PTR(-EPERM); 324 } 325 return net; 326 } 327 328 static void devlink_reload_netns_change(struct devlink *devlink, 329 struct net *curr_net, 330 struct net *dest_net) 331 { 332 /* Userspace needs to be notified about devlink objects 333 * removed from original and entering new network namespace. 334 * The rest of the devlink objects are re-created during 335 * reload process so the notifications are generated separatelly. 336 */ 337 devlink_notify_unregister(devlink); 338 move_netdevice_notifier_net(curr_net, dest_net, 339 &devlink->netdevice_nb); 340 write_pnet(&devlink->_net, dest_net); 341 devlink_notify_register(devlink); 342 } 343 344 int devlink_reload(struct devlink *devlink, struct net *dest_net, 345 enum devlink_reload_action action, 346 enum devlink_reload_limit limit, 347 u32 *actions_performed, struct netlink_ext_ack *extack) 348 { 349 u32 remote_reload_stats[DEVLINK_RELOAD_STATS_ARRAY_SIZE]; 350 struct net *curr_net; 351 int err; 352 353 memcpy(remote_reload_stats, devlink->stats.remote_reload_stats, 354 sizeof(remote_reload_stats)); 355 356 err = devlink->ops->reload_down(devlink, !!dest_net, action, limit, extack); 357 if (err) 358 return err; 359 360 curr_net = devlink_net(devlink); 361 if (dest_net && !net_eq(dest_net, curr_net)) 362 devlink_reload_netns_change(devlink, curr_net, dest_net); 363 364 err = devlink->ops->reload_up(devlink, action, limit, actions_performed, extack); 365 devlink_reload_failed_set(devlink, !!err); 366 if (err) 367 return err; 368 369 WARN_ON(!(*actions_performed & BIT(action))); 370 /* Catch driver on updating the remote action within devlink reload */ 371 WARN_ON(memcmp(remote_reload_stats, devlink->stats.remote_reload_stats, 372 sizeof(remote_reload_stats))); 373 devlink_reload_stats_update(devlink, limit, *actions_performed); 374 return 0; 375 } 376 377 static int 378 devlink_nl_reload_actions_performed_snd(struct devlink *devlink, u32 actions_performed, 379 enum devlink_command cmd, struct genl_info *info) 380 { 381 struct sk_buff *msg; 382 void *hdr; 383 384 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 385 if (!msg) 386 return -ENOMEM; 387 388 hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq, &devlink_nl_family, 0, cmd); 389 if (!hdr) 390 goto free_msg; 391 392 if (devlink_nl_put_handle(msg, devlink)) 393 goto nla_put_failure; 394 395 if (nla_put_bitfield32(msg, DEVLINK_ATTR_RELOAD_ACTIONS_PERFORMED, actions_performed, 396 actions_performed)) 397 goto nla_put_failure; 398 genlmsg_end(msg, hdr); 399 400 return genlmsg_reply(msg, info); 401 402 nla_put_failure: 403 genlmsg_cancel(msg, hdr); 404 free_msg: 405 nlmsg_free(msg); 406 return -EMSGSIZE; 407 } 408 409 int devlink_nl_cmd_reload(struct sk_buff *skb, struct genl_info *info) 410 { 411 struct devlink *devlink = info->user_ptr[0]; 412 enum devlink_reload_action action; 413 enum devlink_reload_limit limit; 414 struct net *dest_net = NULL; 415 u32 actions_performed; 416 int err; 417 418 err = devlink_resources_validate(devlink, NULL, info); 419 if (err) { 420 NL_SET_ERR_MSG_MOD(info->extack, "resources size validation failed"); 421 return err; 422 } 423 424 if (info->attrs[DEVLINK_ATTR_RELOAD_ACTION]) 425 action = nla_get_u8(info->attrs[DEVLINK_ATTR_RELOAD_ACTION]); 426 else 427 action = DEVLINK_RELOAD_ACTION_DRIVER_REINIT; 428 429 if (!devlink_reload_action_is_supported(devlink, action)) { 430 NL_SET_ERR_MSG_MOD(info->extack, 431 "Requested reload action is not supported by the driver"); 432 return -EOPNOTSUPP; 433 } 434 435 limit = DEVLINK_RELOAD_LIMIT_UNSPEC; 436 if (info->attrs[DEVLINK_ATTR_RELOAD_LIMITS]) { 437 struct nla_bitfield32 limits; 438 u32 limits_selected; 439 440 limits = nla_get_bitfield32(info->attrs[DEVLINK_ATTR_RELOAD_LIMITS]); 441 limits_selected = limits.value & limits.selector; 442 if (!limits_selected) { 443 NL_SET_ERR_MSG_MOD(info->extack, "Invalid limit selected"); 444 return -EINVAL; 445 } 446 for (limit = 0 ; limit <= DEVLINK_RELOAD_LIMIT_MAX ; limit++) 447 if (limits_selected & BIT(limit)) 448 break; 449 /* UAPI enables multiselection, but currently it is not used */ 450 if (limits_selected != BIT(limit)) { 451 NL_SET_ERR_MSG_MOD(info->extack, 452 "Multiselection of limit is not supported"); 453 return -EOPNOTSUPP; 454 } 455 if (!devlink_reload_limit_is_supported(devlink, limit)) { 456 NL_SET_ERR_MSG_MOD(info->extack, 457 "Requested limit is not supported by the driver"); 458 return -EOPNOTSUPP; 459 } 460 if (devlink_reload_combination_is_invalid(action, limit)) { 461 NL_SET_ERR_MSG_MOD(info->extack, 462 "Requested limit is invalid for this action"); 463 return -EINVAL; 464 } 465 } 466 if (info->attrs[DEVLINK_ATTR_NETNS_PID] || 467 info->attrs[DEVLINK_ATTR_NETNS_FD] || 468 info->attrs[DEVLINK_ATTR_NETNS_ID]) { 469 dest_net = devlink_netns_get(skb, info); 470 if (IS_ERR(dest_net)) 471 return PTR_ERR(dest_net); 472 } 473 474 err = devlink_reload(devlink, dest_net, action, limit, &actions_performed, info->extack); 475 476 if (dest_net) 477 put_net(dest_net); 478 479 if (err) 480 return err; 481 /* For backward compatibility generate reply only if attributes used by user */ 482 if (!info->attrs[DEVLINK_ATTR_RELOAD_ACTION] && !info->attrs[DEVLINK_ATTR_RELOAD_LIMITS]) 483 return 0; 484 485 return devlink_nl_reload_actions_performed_snd(devlink, actions_performed, 486 DEVLINK_CMD_RELOAD, info); 487 } 488 489 bool devlink_reload_actions_valid(const struct devlink_ops *ops) 490 { 491 const struct devlink_reload_combination *comb; 492 int i; 493 494 if (!devlink_reload_supported(ops)) { 495 if (WARN_ON(ops->reload_actions)) 496 return false; 497 return true; 498 } 499 500 if (WARN_ON(!ops->reload_actions || 501 ops->reload_actions & BIT(DEVLINK_RELOAD_ACTION_UNSPEC) || 502 ops->reload_actions >= BIT(__DEVLINK_RELOAD_ACTION_MAX))) 503 return false; 504 505 if (WARN_ON(ops->reload_limits & BIT(DEVLINK_RELOAD_LIMIT_UNSPEC) || 506 ops->reload_limits >= BIT(__DEVLINK_RELOAD_LIMIT_MAX))) 507 return false; 508 509 for (i = 0; i < ARRAY_SIZE(devlink_reload_invalid_combinations); i++) { 510 comb = &devlink_reload_invalid_combinations[i]; 511 if (ops->reload_actions == BIT(comb->action) && 512 ops->reload_limits == BIT(comb->limit)) 513 return false; 514 } 515 return true; 516 } 517 518 static int devlink_nl_eswitch_fill(struct sk_buff *msg, struct devlink *devlink, 519 enum devlink_command cmd, u32 portid, 520 u32 seq, int flags) 521 { 522 const struct devlink_ops *ops = devlink->ops; 523 enum devlink_eswitch_encap_mode encap_mode; 524 u8 inline_mode; 525 void *hdr; 526 int err = 0; 527 u16 mode; 528 529 hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd); 530 if (!hdr) 531 return -EMSGSIZE; 532 533 err = devlink_nl_put_handle(msg, devlink); 534 if (err) 535 goto nla_put_failure; 536 537 if (ops->eswitch_mode_get) { 538 err = ops->eswitch_mode_get(devlink, &mode); 539 if (err) 540 goto nla_put_failure; 541 err = nla_put_u16(msg, DEVLINK_ATTR_ESWITCH_MODE, mode); 542 if (err) 543 goto nla_put_failure; 544 } 545 546 if (ops->eswitch_inline_mode_get) { 547 err = ops->eswitch_inline_mode_get(devlink, &inline_mode); 548 if (err) 549 goto nla_put_failure; 550 err = nla_put_u8(msg, DEVLINK_ATTR_ESWITCH_INLINE_MODE, 551 inline_mode); 552 if (err) 553 goto nla_put_failure; 554 } 555 556 if (ops->eswitch_encap_mode_get) { 557 err = ops->eswitch_encap_mode_get(devlink, &encap_mode); 558 if (err) 559 goto nla_put_failure; 560 err = nla_put_u8(msg, DEVLINK_ATTR_ESWITCH_ENCAP_MODE, encap_mode); 561 if (err) 562 goto nla_put_failure; 563 } 564 565 genlmsg_end(msg, hdr); 566 return 0; 567 568 nla_put_failure: 569 genlmsg_cancel(msg, hdr); 570 return err; 571 } 572 573 int devlink_nl_cmd_eswitch_get_doit(struct sk_buff *skb, struct genl_info *info) 574 { 575 struct devlink *devlink = info->user_ptr[0]; 576 struct sk_buff *msg; 577 int err; 578 579 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 580 if (!msg) 581 return -ENOMEM; 582 583 err = devlink_nl_eswitch_fill(msg, devlink, DEVLINK_CMD_ESWITCH_GET, 584 info->snd_portid, info->snd_seq, 0); 585 586 if (err) { 587 nlmsg_free(msg); 588 return err; 589 } 590 591 return genlmsg_reply(msg, info); 592 } 593 594 int devlink_nl_cmd_eswitch_set_doit(struct sk_buff *skb, struct genl_info *info) 595 { 596 struct devlink *devlink = info->user_ptr[0]; 597 const struct devlink_ops *ops = devlink->ops; 598 enum devlink_eswitch_encap_mode encap_mode; 599 u8 inline_mode; 600 int err = 0; 601 u16 mode; 602 603 if (info->attrs[DEVLINK_ATTR_ESWITCH_MODE]) { 604 if (!ops->eswitch_mode_set) 605 return -EOPNOTSUPP; 606 mode = nla_get_u16(info->attrs[DEVLINK_ATTR_ESWITCH_MODE]); 607 err = devlink_rate_nodes_check(devlink, mode, info->extack); 608 if (err) 609 return err; 610 err = ops->eswitch_mode_set(devlink, mode, info->extack); 611 if (err) 612 return err; 613 } 614 615 if (info->attrs[DEVLINK_ATTR_ESWITCH_INLINE_MODE]) { 616 if (!ops->eswitch_inline_mode_set) 617 return -EOPNOTSUPP; 618 inline_mode = nla_get_u8(info->attrs[DEVLINK_ATTR_ESWITCH_INLINE_MODE]); 619 err = ops->eswitch_inline_mode_set(devlink, inline_mode, 620 info->extack); 621 if (err) 622 return err; 623 } 624 625 if (info->attrs[DEVLINK_ATTR_ESWITCH_ENCAP_MODE]) { 626 if (!ops->eswitch_encap_mode_set) 627 return -EOPNOTSUPP; 628 encap_mode = nla_get_u8(info->attrs[DEVLINK_ATTR_ESWITCH_ENCAP_MODE]); 629 err = ops->eswitch_encap_mode_set(devlink, encap_mode, 630 info->extack); 631 if (err) 632 return err; 633 } 634 635 return 0; 636 } 637 638 int devlink_info_serial_number_put(struct devlink_info_req *req, const char *sn) 639 { 640 if (!req->msg) 641 return 0; 642 return nla_put_string(req->msg, DEVLINK_ATTR_INFO_SERIAL_NUMBER, sn); 643 } 644 EXPORT_SYMBOL_GPL(devlink_info_serial_number_put); 645 646 int devlink_info_board_serial_number_put(struct devlink_info_req *req, 647 const char *bsn) 648 { 649 if (!req->msg) 650 return 0; 651 return nla_put_string(req->msg, DEVLINK_ATTR_INFO_BOARD_SERIAL_NUMBER, 652 bsn); 653 } 654 EXPORT_SYMBOL_GPL(devlink_info_board_serial_number_put); 655 656 static int devlink_info_version_put(struct devlink_info_req *req, int attr, 657 const char *version_name, 658 const char *version_value, 659 enum devlink_info_version_type version_type) 660 { 661 struct nlattr *nest; 662 int err; 663 664 if (req->version_cb) 665 req->version_cb(version_name, version_type, 666 req->version_cb_priv); 667 668 if (!req->msg) 669 return 0; 670 671 nest = nla_nest_start_noflag(req->msg, attr); 672 if (!nest) 673 return -EMSGSIZE; 674 675 err = nla_put_string(req->msg, DEVLINK_ATTR_INFO_VERSION_NAME, 676 version_name); 677 if (err) 678 goto nla_put_failure; 679 680 err = nla_put_string(req->msg, DEVLINK_ATTR_INFO_VERSION_VALUE, 681 version_value); 682 if (err) 683 goto nla_put_failure; 684 685 nla_nest_end(req->msg, nest); 686 687 return 0; 688 689 nla_put_failure: 690 nla_nest_cancel(req->msg, nest); 691 return err; 692 } 693 694 int devlink_info_version_fixed_put(struct devlink_info_req *req, 695 const char *version_name, 696 const char *version_value) 697 { 698 return devlink_info_version_put(req, DEVLINK_ATTR_INFO_VERSION_FIXED, 699 version_name, version_value, 700 DEVLINK_INFO_VERSION_TYPE_NONE); 701 } 702 EXPORT_SYMBOL_GPL(devlink_info_version_fixed_put); 703 704 int devlink_info_version_stored_put(struct devlink_info_req *req, 705 const char *version_name, 706 const char *version_value) 707 { 708 return devlink_info_version_put(req, DEVLINK_ATTR_INFO_VERSION_STORED, 709 version_name, version_value, 710 DEVLINK_INFO_VERSION_TYPE_NONE); 711 } 712 EXPORT_SYMBOL_GPL(devlink_info_version_stored_put); 713 714 int devlink_info_version_stored_put_ext(struct devlink_info_req *req, 715 const char *version_name, 716 const char *version_value, 717 enum devlink_info_version_type version_type) 718 { 719 return devlink_info_version_put(req, DEVLINK_ATTR_INFO_VERSION_STORED, 720 version_name, version_value, 721 version_type); 722 } 723 EXPORT_SYMBOL_GPL(devlink_info_version_stored_put_ext); 724 725 int devlink_info_version_running_put(struct devlink_info_req *req, 726 const char *version_name, 727 const char *version_value) 728 { 729 return devlink_info_version_put(req, DEVLINK_ATTR_INFO_VERSION_RUNNING, 730 version_name, version_value, 731 DEVLINK_INFO_VERSION_TYPE_NONE); 732 } 733 EXPORT_SYMBOL_GPL(devlink_info_version_running_put); 734 735 int devlink_info_version_running_put_ext(struct devlink_info_req *req, 736 const char *version_name, 737 const char *version_value, 738 enum devlink_info_version_type version_type) 739 { 740 return devlink_info_version_put(req, DEVLINK_ATTR_INFO_VERSION_RUNNING, 741 version_name, version_value, 742 version_type); 743 } 744 EXPORT_SYMBOL_GPL(devlink_info_version_running_put_ext); 745 746 static int devlink_nl_driver_info_get(struct device_driver *drv, 747 struct devlink_info_req *req) 748 { 749 if (!drv) 750 return 0; 751 752 if (drv->name[0]) 753 return nla_put_string(req->msg, DEVLINK_ATTR_INFO_DRIVER_NAME, 754 drv->name); 755 756 return 0; 757 } 758 759 static int 760 devlink_nl_info_fill(struct sk_buff *msg, struct devlink *devlink, 761 enum devlink_command cmd, u32 portid, 762 u32 seq, int flags, struct netlink_ext_ack *extack) 763 { 764 struct device *dev = devlink_to_dev(devlink); 765 struct devlink_info_req req = {}; 766 void *hdr; 767 int err; 768 769 hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd); 770 if (!hdr) 771 return -EMSGSIZE; 772 773 err = -EMSGSIZE; 774 if (devlink_nl_put_handle(msg, devlink)) 775 goto err_cancel_msg; 776 777 req.msg = msg; 778 if (devlink->ops->info_get) { 779 err = devlink->ops->info_get(devlink, &req, extack); 780 if (err) 781 goto err_cancel_msg; 782 } 783 784 err = devlink_nl_driver_info_get(dev->driver, &req); 785 if (err) 786 goto err_cancel_msg; 787 788 genlmsg_end(msg, hdr); 789 return 0; 790 791 err_cancel_msg: 792 genlmsg_cancel(msg, hdr); 793 return err; 794 } 795 796 int devlink_nl_cmd_info_get_doit(struct sk_buff *skb, struct genl_info *info) 797 { 798 struct devlink *devlink = info->user_ptr[0]; 799 struct sk_buff *msg; 800 int err; 801 802 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 803 if (!msg) 804 return -ENOMEM; 805 806 err = devlink_nl_info_fill(msg, devlink, DEVLINK_CMD_INFO_GET, 807 info->snd_portid, info->snd_seq, 0, 808 info->extack); 809 if (err) { 810 nlmsg_free(msg); 811 return err; 812 } 813 814 return genlmsg_reply(msg, info); 815 } 816 817 static int 818 devlink_nl_cmd_info_get_dump_one(struct sk_buff *msg, struct devlink *devlink, 819 struct netlink_callback *cb) 820 { 821 int err; 822 823 err = devlink_nl_info_fill(msg, devlink, DEVLINK_CMD_INFO_GET, 824 NETLINK_CB(cb->skb).portid, 825 cb->nlh->nlmsg_seq, NLM_F_MULTI, 826 cb->extack); 827 if (err == -EOPNOTSUPP) 828 err = 0; 829 return err; 830 } 831 832 const struct devlink_cmd devl_cmd_info_get = { 833 .dump_one = devlink_nl_cmd_info_get_dump_one, 834 }; 835 836 static void __devlink_compat_running_version(struct devlink *devlink, 837 char *buf, size_t len) 838 { 839 struct devlink_info_req req = {}; 840 const struct nlattr *nlattr; 841 struct sk_buff *msg; 842 int rem, err; 843 844 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 845 if (!msg) 846 return; 847 848 req.msg = msg; 849 err = devlink->ops->info_get(devlink, &req, NULL); 850 if (err) 851 goto free_msg; 852 853 nla_for_each_attr(nlattr, (void *)msg->data, msg->len, rem) { 854 const struct nlattr *kv; 855 int rem_kv; 856 857 if (nla_type(nlattr) != DEVLINK_ATTR_INFO_VERSION_RUNNING) 858 continue; 859 860 nla_for_each_nested(kv, nlattr, rem_kv) { 861 if (nla_type(kv) != DEVLINK_ATTR_INFO_VERSION_VALUE) 862 continue; 863 864 strlcat(buf, nla_data(kv), len); 865 strlcat(buf, " ", len); 866 } 867 } 868 free_msg: 869 nlmsg_free(msg); 870 } 871 872 void devlink_compat_running_version(struct devlink *devlink, 873 char *buf, size_t len) 874 { 875 if (!devlink->ops->info_get) 876 return; 877 878 devl_lock(devlink); 879 if (devl_is_registered(devlink)) 880 __devlink_compat_running_version(devlink, buf, len); 881 devl_unlock(devlink); 882 } 883