1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2016-2020 Intel Corporation 3 */ 4 5 #ifndef __DLB2_RESOURCE_H 6 #define __DLB2_RESOURCE_H 7 8 #include "dlb2_user.h" 9 10 #include "dlb2_hw_types.h" 11 #include "dlb2_osdep_types.h" 12 13 /** 14 * dlb2_resource_init() - initialize the device 15 * @hw: pointer to struct dlb2_hw. 16 * 17 * This function initializes the device's software state (pointed to by the hw 18 * argument) and programs global scheduling QoS registers. This function should 19 * be called during driver initialization. 20 * 21 * The dlb2_hw struct must be unique per DLB 2.0 device and persist until the 22 * device is reset. 23 * 24 * Return: 25 * Returns 0 upon success, <0 otherwise. 26 */ 27 int dlb2_resource_init(struct dlb2_hw *hw); 28 29 /** 30 * dlb2_resource_free() - free device state memory 31 * @hw: dlb2_hw handle for a particular device. 32 * 33 * This function frees software state pointed to by dlb2_hw. This function 34 * should be called when resetting the device or unloading the driver. 35 */ 36 void dlb2_resource_free(struct dlb2_hw *hw); 37 38 /** 39 * dlb2_resource_reset() - reset in-use resources to their initial state 40 * @hw: dlb2_hw handle for a particular device. 41 * 42 * This function resets in-use resources, and makes them available for use. 43 * All resources go back to their owning function, whether a PF or a VF. 44 */ 45 void dlb2_resource_reset(struct dlb2_hw *hw); 46 47 /** 48 * dlb2_hw_create_sched_domain() - create a scheduling domain 49 * @hw: dlb2_hw handle for a particular device. 50 * @args: scheduling domain creation arguments. 51 * @resp: response structure. 52 * @vdev_request: indicates whether this request came from a vdev. 53 * @vdev_id: If vdev_request is true, this contains the vdev's ID. 54 * 55 * This function creates a scheduling domain containing the resources specified 56 * in args. The individual resources (queues, ports, credits) can be configured 57 * after creating a scheduling domain. 58 * 59 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual 60 * device. 61 * 62 * Return: 63 * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is 64 * assigned a detailed error code from enum dlb2_error. If successful, resp->id 65 * contains the domain ID. 66 * 67 * resp->id contains a virtual ID if vdev_request is true. 68 * 69 * Errors: 70 * EINVAL - A requested resource is unavailable, or the requested domain name 71 * is already in use. 72 * EFAULT - Internal error (resp->status not set). 73 */ 74 int dlb2_hw_create_sched_domain(struct dlb2_hw *hw, 75 struct dlb2_create_sched_domain_args *args, 76 struct dlb2_cmd_response *resp, 77 bool vdev_request, 78 unsigned int vdev_id); 79 80 /** 81 * dlb2_hw_create_ldb_queue() - create a load-balanced queue 82 * @hw: dlb2_hw handle for a particular device. 83 * @domain_id: domain ID. 84 * @args: queue creation arguments. 85 * @resp: response structure. 86 * @vdev_request: indicates whether this request came from a vdev. 87 * @vdev_id: If vdev_request is true, this contains the vdev's ID. 88 * 89 * This function creates a load-balanced queue. 90 * 91 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual 92 * device. 93 * 94 * Return: 95 * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is 96 * assigned a detailed error code from enum dlb2_error. If successful, resp->id 97 * contains the queue ID. 98 * 99 * resp->id contains a virtual ID if vdev_request is true. 100 * 101 * Errors: 102 * EINVAL - A requested resource is unavailable, the domain is not configured, 103 * the domain has already been started, or the requested queue name is 104 * already in use. 105 * EFAULT - Internal error (resp->status not set). 106 */ 107 int dlb2_hw_create_ldb_queue(struct dlb2_hw *hw, 108 u32 domain_id, 109 struct dlb2_create_ldb_queue_args *args, 110 struct dlb2_cmd_response *resp, 111 bool vdev_request, 112 unsigned int vdev_id); 113 114 /** 115 * dlb2_hw_create_dir_queue() - create a directed queue 116 * @hw: dlb2_hw handle for a particular device. 117 * @domain_id: domain ID. 118 * @args: queue creation arguments. 119 * @resp: response structure. 120 * @vdev_request: indicates whether this request came from a vdev. 121 * @vdev_id: If vdev_request is true, this contains the vdev's ID. 122 * 123 * This function creates a directed queue. 124 * 125 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual 126 * device. 127 * 128 * Return: 129 * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is 130 * assigned a detailed error code from enum dlb2_error. If successful, resp->id 131 * contains the queue ID. 132 * 133 * resp->id contains a virtual ID if vdev_request is true. 134 * 135 * Errors: 136 * EINVAL - A requested resource is unavailable, the domain is not configured, 137 * or the domain has already been started. 138 * EFAULT - Internal error (resp->status not set). 139 */ 140 int dlb2_hw_create_dir_queue(struct dlb2_hw *hw, 141 u32 domain_id, 142 struct dlb2_create_dir_queue_args *args, 143 struct dlb2_cmd_response *resp, 144 bool vdev_request, 145 unsigned int vdev_id); 146 147 /** 148 * dlb2_hw_create_dir_port() - create a directed port 149 * @hw: dlb2_hw handle for a particular device. 150 * @domain_id: domain ID. 151 * @args: port creation arguments. 152 * @cq_dma_base: base address of the CQ memory. This can be a PA or an IOVA. 153 * @resp: response structure. 154 * @vdev_request: indicates whether this request came from a vdev. 155 * @vdev_id: If vdev_request is true, this contains the vdev's ID. 156 * 157 * This function creates a directed port. 158 * 159 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual 160 * device. 161 * 162 * Return: 163 * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is 164 * assigned a detailed error code from enum dlb2_error. If successful, resp->id 165 * contains the port ID. 166 * 167 * resp->id contains a virtual ID if vdev_request is true. 168 * 169 * Errors: 170 * EINVAL - A requested resource is unavailable, a credit setting is invalid, a 171 * pointer address is not properly aligned, the domain is not 172 * configured, or the domain has already been started. 173 * EFAULT - Internal error (resp->status not set). 174 */ 175 int dlb2_hw_create_dir_port(struct dlb2_hw *hw, 176 u32 domain_id, 177 struct dlb2_create_dir_port_args *args, 178 uintptr_t cq_dma_base, 179 struct dlb2_cmd_response *resp, 180 bool vdev_request, 181 unsigned int vdev_id); 182 183 /** 184 * dlb2_hw_create_ldb_port() - create a load-balanced port 185 * @hw: dlb2_hw handle for a particular device. 186 * @domain_id: domain ID. 187 * @args: port creation arguments. 188 * @cq_dma_base: base address of the CQ memory. This can be a PA or an IOVA. 189 * @resp: response structure. 190 * @vdev_request: indicates whether this request came from a vdev. 191 * @vdev_id: If vdev_request is true, this contains the vdev's ID. 192 * 193 * This function creates a load-balanced port. 194 * 195 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual 196 * device. 197 * 198 * Return: 199 * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is 200 * assigned a detailed error code from enum dlb2_error. If successful, resp->id 201 * contains the port ID. 202 * 203 * resp->id contains a virtual ID if vdev_request is true. 204 * 205 * Errors: 206 * EINVAL - A requested resource is unavailable, a credit setting is invalid, a 207 * pointer address is not properly aligned, the domain is not 208 * configured, or the domain has already been started. 209 * EFAULT - Internal error (resp->status not set). 210 */ 211 int dlb2_hw_create_ldb_port(struct dlb2_hw *hw, 212 u32 domain_id, 213 struct dlb2_create_ldb_port_args *args, 214 uintptr_t cq_dma_base, 215 struct dlb2_cmd_response *resp, 216 bool vdev_request, 217 unsigned int vdev_id); 218 219 /** 220 * dlb2_hw_start_domain() - start a scheduling domain 221 * @hw: dlb2_hw handle for a particular device. 222 * @domain_id: domain ID. 223 * @args: start domain arguments. 224 * @resp: response structure. 225 * @vdev_request: indicates whether this request came from a vdev. 226 * @vdev_id: If vdev_request is true, this contains the vdev's ID. 227 * 228 * This function starts a scheduling domain, which allows applications to send 229 * traffic through it. Once a domain is started, its resources can no longer be 230 * configured (besides QID remapping and port enable/disable). 231 * 232 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual 233 * device. 234 * 235 * Return: 236 * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is 237 * assigned a detailed error code from enum dlb2_error. 238 * 239 * Errors: 240 * EINVAL - the domain is not configured, or the domain is already started. 241 */ 242 int dlb2_hw_start_domain(struct dlb2_hw *hw, 243 u32 domain_id, 244 struct dlb2_start_domain_args *args, 245 struct dlb2_cmd_response *resp, 246 bool vdev_request, 247 unsigned int vdev_id); 248 249 /** 250 * dlb2_hw_map_qid() - map a load-balanced queue to a load-balanced port 251 * @hw: dlb2_hw handle for a particular device. 252 * @domain_id: domain ID. 253 * @args: map QID arguments. 254 * @resp: response structure. 255 * @vdev_request: indicates whether this request came from a vdev. 256 * @vdev_id: If vdev_request is true, this contains the vdev's ID. 257 * 258 * This function configures the DLB to schedule QEs from the specified queue 259 * to the specified port. Each load-balanced port can be mapped to up to 8 260 * queues; each load-balanced queue can potentially map to all the 261 * load-balanced ports. 262 * 263 * A successful return does not necessarily mean the mapping was configured. If 264 * this function is unable to immediately map the queue to the port, it will 265 * add the requested operation to a per-port list of pending map/unmap 266 * operations, and (if it's not already running) launch a kernel thread that 267 * periodically attempts to process all pending operations. In a sense, this is 268 * an asynchronous function. 269 * 270 * This asynchronicity creates two views of the state of hardware: the actual 271 * hardware state and the requested state (as if every request completed 272 * immediately). If there are any pending map/unmap operations, the requested 273 * state will differ from the actual state. All validation is performed with 274 * respect to the pending state; for instance, if there are 8 pending map 275 * operations for port X, a request for a 9th will fail because a load-balanced 276 * port can only map up to 8 queues. 277 * 278 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual 279 * device. 280 * 281 * Return: 282 * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is 283 * assigned a detailed error code from enum dlb2_error. 284 * 285 * Errors: 286 * EINVAL - A requested resource is unavailable, invalid port or queue ID, or 287 * the domain is not configured. 288 * EFAULT - Internal error (resp->status not set). 289 */ 290 int dlb2_hw_map_qid(struct dlb2_hw *hw, 291 u32 domain_id, 292 struct dlb2_map_qid_args *args, 293 struct dlb2_cmd_response *resp, 294 bool vdev_request, 295 unsigned int vdev_id); 296 297 /** 298 * dlb2_hw_unmap_qid() - Unmap a load-balanced queue from a load-balanced port 299 * @hw: dlb2_hw handle for a particular device. 300 * @domain_id: domain ID. 301 * @args: unmap QID arguments. 302 * @resp: response structure. 303 * @vdev_request: indicates whether this request came from a vdev. 304 * @vdev_id: If vdev_request is true, this contains the vdev's ID. 305 * 306 * This function configures the DLB to stop scheduling QEs from the specified 307 * queue to the specified port. 308 * 309 * A successful return does not necessarily mean the mapping was removed. If 310 * this function is unable to immediately unmap the queue from the port, it 311 * will add the requested operation to a per-port list of pending map/unmap 312 * operations, and (if it's not already running) launch a kernel thread that 313 * periodically attempts to process all pending operations. See 314 * dlb2_hw_map_qid() for more details. 315 * 316 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual 317 * device. 318 * 319 * Return: 320 * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is 321 * assigned a detailed error code from enum dlb2_error. 322 * 323 * Errors: 324 * EINVAL - A requested resource is unavailable, invalid port or queue ID, or 325 * the domain is not configured. 326 * EFAULT - Internal error (resp->status not set). 327 */ 328 int dlb2_hw_unmap_qid(struct dlb2_hw *hw, 329 u32 domain_id, 330 struct dlb2_unmap_qid_args *args, 331 struct dlb2_cmd_response *resp, 332 bool vdev_request, 333 unsigned int vdev_id); 334 335 /** 336 * dlb2_finish_unmap_qid_procedures() - finish any pending unmap procedures 337 * @hw: dlb2_hw handle for a particular device. 338 * 339 * This function attempts to finish any outstanding unmap procedures. 340 * This function should be called by the kernel thread responsible for 341 * finishing map/unmap procedures. 342 * 343 * Return: 344 * Returns the number of procedures that weren't completed. 345 */ 346 unsigned int dlb2_finish_unmap_qid_procedures(struct dlb2_hw *hw); 347 348 /** 349 * dlb2_finish_map_qid_procedures() - finish any pending map procedures 350 * @hw: dlb2_hw handle for a particular device. 351 * 352 * This function attempts to finish any outstanding map procedures. 353 * This function should be called by the kernel thread responsible for 354 * finishing map/unmap procedures. 355 * 356 * Return: 357 * Returns the number of procedures that weren't completed. 358 */ 359 unsigned int dlb2_finish_map_qid_procedures(struct dlb2_hw *hw); 360 361 /** 362 * dlb2_hw_enable_ldb_port() - enable a load-balanced port for scheduling 363 * @hw: dlb2_hw handle for a particular device. 364 * @domain_id: domain ID. 365 * @args: port enable arguments. 366 * @resp: response structure. 367 * @vdev_request: indicates whether this request came from a vdev. 368 * @vdev_id: If vdev_request is true, this contains the vdev's ID. 369 * 370 * This function configures the DLB to schedule QEs to a load-balanced port. 371 * Ports are enabled by default. 372 * 373 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual 374 * device. 375 * 376 * Return: 377 * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is 378 * assigned a detailed error code from enum dlb2_error. 379 * 380 * Errors: 381 * EINVAL - The port ID is invalid or the domain is not configured. 382 * EFAULT - Internal error (resp->status not set). 383 */ 384 int dlb2_hw_enable_ldb_port(struct dlb2_hw *hw, 385 u32 domain_id, 386 struct dlb2_enable_ldb_port_args *args, 387 struct dlb2_cmd_response *resp, 388 bool vdev_request, 389 unsigned int vdev_id); 390 391 /** 392 * dlb2_hw_disable_ldb_port() - disable a load-balanced port for scheduling 393 * @hw: dlb2_hw handle for a particular device. 394 * @domain_id: domain ID. 395 * @args: port disable arguments. 396 * @resp: response structure. 397 * @vdev_request: indicates whether this request came from a vdev. 398 * @vdev_id: If vdev_request is true, this contains the vdev's ID. 399 * 400 * This function configures the DLB to stop scheduling QEs to a load-balanced 401 * port. Ports are enabled by default. 402 * 403 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual 404 * device. 405 * 406 * Return: 407 * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is 408 * assigned a detailed error code from enum dlb2_error. 409 * 410 * Errors: 411 * EINVAL - The port ID is invalid or the domain is not configured. 412 * EFAULT - Internal error (resp->status not set). 413 */ 414 int dlb2_hw_disable_ldb_port(struct dlb2_hw *hw, 415 u32 domain_id, 416 struct dlb2_disable_ldb_port_args *args, 417 struct dlb2_cmd_response *resp, 418 bool vdev_request, 419 unsigned int vdev_id); 420 421 /** 422 * dlb2_hw_enable_dir_port() - enable a directed port for scheduling 423 * @hw: dlb2_hw handle for a particular device. 424 * @domain_id: domain ID. 425 * @args: port enable arguments. 426 * @resp: response structure. 427 * @vdev_request: indicates whether this request came from a vdev. 428 * @vdev_id: If vdev_request is true, this contains the vdev's ID. 429 * 430 * This function configures the DLB to schedule QEs to a directed port. 431 * Ports are enabled by default. 432 * 433 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual 434 * device. 435 * 436 * Return: 437 * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is 438 * assigned a detailed error code from enum dlb2_error. 439 * 440 * Errors: 441 * EINVAL - The port ID is invalid or the domain is not configured. 442 * EFAULT - Internal error (resp->status not set). 443 */ 444 int dlb2_hw_enable_dir_port(struct dlb2_hw *hw, 445 u32 domain_id, 446 struct dlb2_enable_dir_port_args *args, 447 struct dlb2_cmd_response *resp, 448 bool vdev_request, 449 unsigned int vdev_id); 450 451 /** 452 * dlb2_hw_disable_dir_port() - disable a directed port for scheduling 453 * @hw: dlb2_hw handle for a particular device. 454 * @domain_id: domain ID. 455 * @args: port disable arguments. 456 * @resp: response structure. 457 * @vdev_request: indicates whether this request came from a vdev. 458 * @vdev_id: If vdev_request is true, this contains the vdev's ID. 459 * 460 * This function configures the DLB to stop scheduling QEs to a directed port. 461 * Ports are enabled by default. 462 * 463 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual 464 * device. 465 * 466 * Return: 467 * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is 468 * assigned a detailed error code from enum dlb2_error. 469 * 470 * Errors: 471 * EINVAL - The port ID is invalid or the domain is not configured. 472 * EFAULT - Internal error (resp->status not set). 473 */ 474 int dlb2_hw_disable_dir_port(struct dlb2_hw *hw, 475 u32 domain_id, 476 struct dlb2_disable_dir_port_args *args, 477 struct dlb2_cmd_response *resp, 478 bool vdev_request, 479 unsigned int vdev_id); 480 481 /** 482 * dlb2_configure_ldb_cq_interrupt() - configure load-balanced CQ for 483 * interrupts 484 * @hw: dlb2_hw handle for a particular device. 485 * @port_id: load-balanced port ID. 486 * @vector: interrupt vector ID. Should be 0 for MSI or compressed MSI-X mode, 487 * else a value up to 64. 488 * @mode: interrupt type (DLB2_CQ_ISR_MODE_MSI or DLB2_CQ_ISR_MODE_MSIX) 489 * @vf: If the port is VF-owned, the VF's ID. This is used for translating the 490 * virtual port ID to a physical port ID. Ignored if mode is not MSI. 491 * @owner_vf: the VF to route the interrupt to. Ignore if mode is not MSI. 492 * @threshold: the minimum CQ depth at which the interrupt can fire. Must be 493 * greater than 0. 494 * 495 * This function configures the DLB registers for load-balanced CQ's 496 * interrupts. This doesn't enable the CQ's interrupt; that can be done with 497 * dlb2_arm_cq_interrupt() or through an interrupt arm QE. 498 * 499 * Return: 500 * Returns 0 upon success, < 0 otherwise. 501 * 502 * Errors: 503 * EINVAL - The port ID is invalid. 504 */ 505 int dlb2_configure_ldb_cq_interrupt(struct dlb2_hw *hw, 506 int port_id, 507 int vector, 508 int mode, 509 unsigned int vf, 510 unsigned int owner_vf, 511 u16 threshold); 512 513 /** 514 * dlb2_configure_dir_cq_interrupt() - configure directed CQ for interrupts 515 * @hw: dlb2_hw handle for a particular device. 516 * @port_id: load-balanced port ID. 517 * @vector: interrupt vector ID. Should be 0 for MSI or compressed MSI-X mode, 518 * else a value up to 64. 519 * @mode: interrupt type (DLB2_CQ_ISR_MODE_MSI or DLB2_CQ_ISR_MODE_MSIX) 520 * @vf: If the port is VF-owned, the VF's ID. This is used for translating the 521 * virtual port ID to a physical port ID. Ignored if mode is not MSI. 522 * @owner_vf: the VF to route the interrupt to. Ignore if mode is not MSI. 523 * @threshold: the minimum CQ depth at which the interrupt can fire. Must be 524 * greater than 0. 525 * 526 * This function configures the DLB registers for directed CQ's interrupts. 527 * This doesn't enable the CQ's interrupt; that can be done with 528 * dlb2_arm_cq_interrupt() or through an interrupt arm QE. 529 * 530 * Return: 531 * Returns 0 upon success, < 0 otherwise. 532 * 533 * Errors: 534 * EINVAL - The port ID is invalid. 535 */ 536 int dlb2_configure_dir_cq_interrupt(struct dlb2_hw *hw, 537 int port_id, 538 int vector, 539 int mode, 540 unsigned int vf, 541 unsigned int owner_vf, 542 u16 threshold); 543 544 /** 545 * dlb2_enable_ingress_error_alarms() - enable ingress error alarm interrupts 546 * @hw: dlb2_hw handle for a particular device. 547 */ 548 void dlb2_enable_ingress_error_alarms(struct dlb2_hw *hw); 549 550 /** 551 * dlb2_disable_ingress_error_alarms() - disable ingress error alarm interrupts 552 * @hw: dlb2_hw handle for a particular device. 553 */ 554 void dlb2_disable_ingress_error_alarms(struct dlb2_hw *hw); 555 556 /** 557 * dlb2_set_msix_mode() - enable certain hardware alarm interrupts 558 * @hw: dlb2_hw handle for a particular device. 559 * @mode: MSI-X mode (DLB2_MSIX_MODE_PACKED or DLB2_MSIX_MODE_COMPRESSED) 560 * 561 * This function configures the hardware to use either packed or compressed 562 * mode. This function should not be called if using MSI interrupts. 563 */ 564 void dlb2_set_msix_mode(struct dlb2_hw *hw, int mode); 565 566 /** 567 * dlb2_ack_msix_interrupt() - Ack an MSI-X interrupt 568 * @hw: dlb2_hw handle for a particular device. 569 * @vector: interrupt vector. 570 * 571 * Note: Only needed for PF service interrupts (vector 0). CQ interrupts are 572 * acked in dlb2_ack_compressed_cq_intr(). 573 */ 574 void dlb2_ack_msix_interrupt(struct dlb2_hw *hw, int vector); 575 576 /** 577 * dlb2_arm_cq_interrupt() - arm a CQ's interrupt 578 * @hw: dlb2_hw handle for a particular device. 579 * @port_id: port ID 580 * @is_ldb: true for load-balanced port, false for a directed port 581 * @vdev_request: indicates whether this request came from a vdev. 582 * @vdev_id: If vdev_request is true, this contains the vdev's ID. 583 * 584 * This function arms the CQ's interrupt. The CQ must be configured prior to 585 * calling this function. 586 * 587 * The function does no parameter validation; that is the caller's 588 * responsibility. 589 * 590 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual 591 * device. 592 * 593 * Return: returns 0 upon success, <0 otherwise. 594 * 595 * EINVAL - Invalid port ID. 596 */ 597 int dlb2_arm_cq_interrupt(struct dlb2_hw *hw, 598 int port_id, 599 bool is_ldb, 600 bool vdev_request, 601 unsigned int vdev_id); 602 603 /** 604 * dlb2_read_compressed_cq_intr_status() - read compressed CQ interrupt status 605 * @hw: dlb2_hw handle for a particular device. 606 * @ldb_interrupts: 2-entry array of u32 bitmaps 607 * @dir_interrupts: 4-entry array of u32 bitmaps 608 * 609 * This function can be called from a compressed CQ interrupt handler to 610 * determine which CQ interrupts have fired. The caller should take appropriate 611 * (such as waking threads blocked on a CQ's interrupt) then ack the interrupts 612 * with dlb2_ack_compressed_cq_intr(). 613 */ 614 void dlb2_read_compressed_cq_intr_status(struct dlb2_hw *hw, 615 u32 *ldb_interrupts, 616 u32 *dir_interrupts); 617 618 /** 619 * dlb2_ack_compressed_cq_intr_status() - ack compressed CQ interrupts 620 * @hw: dlb2_hw handle for a particular device. 621 * @ldb_interrupts: 2-entry array of u32 bitmaps 622 * @dir_interrupts: 4-entry array of u32 bitmaps 623 * 624 * This function ACKs compressed CQ interrupts. Its arguments should be the 625 * same ones passed to dlb2_read_compressed_cq_intr_status(). 626 */ 627 void dlb2_ack_compressed_cq_intr(struct dlb2_hw *hw, 628 u32 *ldb_interrupts, 629 u32 *dir_interrupts); 630 631 /** 632 * dlb2_read_vf_intr_status() - read the VF interrupt status register 633 * @hw: dlb2_hw handle for a particular device. 634 * 635 * This function can be called from a VF's interrupt handler to determine 636 * which interrupts have fired. The first 31 bits correspond to CQ interrupt 637 * vectors, and the final bit is for the PF->VF mailbox interrupt vector. 638 * 639 * Return: 640 * Returns a bit vector indicating which interrupt vectors are active. 641 */ 642 u32 dlb2_read_vf_intr_status(struct dlb2_hw *hw); 643 644 /** 645 * dlb2_ack_vf_intr_status() - ack VF interrupts 646 * @hw: dlb2_hw handle for a particular device. 647 * @interrupts: 32-bit bitmap 648 * 649 * This function ACKs a VF's interrupts. Its interrupts argument should be the 650 * value returned by dlb2_read_vf_intr_status(). 651 */ 652 void dlb2_ack_vf_intr_status(struct dlb2_hw *hw, u32 interrupts); 653 654 /** 655 * dlb2_ack_vf_msi_intr() - ack VF MSI interrupt 656 * @hw: dlb2_hw handle for a particular device. 657 * @interrupts: 32-bit bitmap 658 * 659 * This function clears the VF's MSI interrupt pending register. Its interrupts 660 * argument should be contain the MSI vectors to ACK. For example, if MSI MME 661 * is in mode 0, then one bit 0 should ever be set. 662 */ 663 void dlb2_ack_vf_msi_intr(struct dlb2_hw *hw, u32 interrupts); 664 665 /** 666 * dlb2_ack_pf_mbox_int() - ack PF->VF mailbox interrupt 667 * @hw: dlb2_hw handle for a particular device. 668 * 669 * When done processing the PF mailbox request, this function unsets 670 * the PF's mailbox ISR register. 671 */ 672 void dlb2_ack_pf_mbox_int(struct dlb2_hw *hw); 673 674 /** 675 * dlb2_read_vdev_to_pf_int_bitvec() - return a bit vector of all requesting 676 * vdevs 677 * @hw: dlb2_hw handle for a particular device. 678 * 679 * When the vdev->PF ISR fires, this function can be called to determine which 680 * vdev(s) are requesting service. This bitvector must be passed to 681 * dlb2_ack_vdev_to_pf_int() when processing is complete for all requesting 682 * vdevs. 683 * 684 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual 685 * device. 686 * 687 * Return: 688 * Returns a bit vector indicating which VFs (0-15) have requested service. 689 */ 690 u32 dlb2_read_vdev_to_pf_int_bitvec(struct dlb2_hw *hw); 691 692 /** 693 * dlb2_ack_vdev_mbox_int() - ack processed vdev->PF mailbox interrupt 694 * @hw: dlb2_hw handle for a particular device. 695 * @bitvec: bit vector returned by dlb2_read_vdev_to_pf_int_bitvec() 696 * 697 * When done processing all VF mailbox requests, this function unsets the VF's 698 * mailbox ISR register. 699 * 700 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual 701 * device. 702 */ 703 void dlb2_ack_vdev_mbox_int(struct dlb2_hw *hw, u32 bitvec); 704 705 /** 706 * dlb2_read_vf_flr_int_bitvec() - return a bit vector of all VFs requesting 707 * FLR 708 * @hw: dlb2_hw handle for a particular device. 709 * 710 * When the VF FLR ISR fires, this function can be called to determine which 711 * VF(s) are requesting FLRs. This bitvector must passed to 712 * dlb2_ack_vf_flr_int() when processing is complete for all requesting VFs. 713 * 714 * Return: 715 * Returns a bit vector indicating which VFs (0-15) have requested FLRs. 716 */ 717 u32 dlb2_read_vf_flr_int_bitvec(struct dlb2_hw *hw); 718 719 /** 720 * dlb2_ack_vf_flr_int() - ack processed VF<->PF interrupt(s) 721 * @hw: dlb2_hw handle for a particular device. 722 * @bitvec: bit vector returned by dlb2_read_vf_flr_int_bitvec() 723 * 724 * When done processing all VF FLR requests, this function unsets the VF's FLR 725 * ISR register. 726 */ 727 void dlb2_ack_vf_flr_int(struct dlb2_hw *hw, u32 bitvec); 728 729 /** 730 * dlb2_ack_vdev_to_pf_int() - ack processed VF mbox and FLR interrupt(s) 731 * @hw: dlb2_hw handle for a particular device. 732 * @mbox_bitvec: bit vector returned by dlb2_read_vdev_to_pf_int_bitvec() 733 * @flr_bitvec: bit vector returned by dlb2_read_vf_flr_int_bitvec() 734 * 735 * When done processing all VF requests, this function communicates to the 736 * hardware that processing is complete. 737 * 738 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual 739 * device. 740 */ 741 void dlb2_ack_vdev_to_pf_int(struct dlb2_hw *hw, 742 u32 mbox_bitvec, 743 u32 flr_bitvec); 744 745 /** 746 * dlb2_process_wdt_interrupt() - process watchdog timer interrupts 747 * @hw: dlb2_hw handle for a particular device. 748 * 749 * This function reads the watchdog timer interrupt cause registers to 750 * determine which port(s) had a watchdog timeout, and notifies the 751 * application(s) that own the port(s). 752 */ 753 void dlb2_process_wdt_interrupt(struct dlb2_hw *hw); 754 755 /** 756 * dlb2_process_alarm_interrupt() - process an alarm interrupt 757 * @hw: dlb2_hw handle for a particular device. 758 * 759 * This function reads and logs the alarm syndrome, then acks the interrupt. 760 * This function should be called from the alarm interrupt handler when 761 * interrupt vector DLB2_INT_ALARM fires. 762 */ 763 void dlb2_process_alarm_interrupt(struct dlb2_hw *hw); 764 765 /** 766 * dlb2_process_ingress_error_interrupt() - process ingress error interrupts 767 * @hw: dlb2_hw handle for a particular device. 768 * 769 * This function reads the alarm syndrome, logs it, notifies user-space, and 770 * acks the interrupt. This function should be called from the alarm interrupt 771 * handler when interrupt vector DLB2_INT_INGRESS_ERROR fires. 772 * 773 * Return: 774 * Returns true if an ingress error interrupt occurred, false otherwise 775 */ 776 bool dlb2_process_ingress_error_interrupt(struct dlb2_hw *hw); 777 778 /** 779 * dlb2_get_group_sequence_numbers() - return a group's number of SNs per queue 780 * @hw: dlb2_hw handle for a particular device. 781 * @group_id: sequence number group ID. 782 * 783 * This function returns the configured number of sequence numbers per queue 784 * for the specified group. 785 * 786 * Return: 787 * Returns -EINVAL if group_id is invalid, else the group's SNs per queue. 788 */ 789 int dlb2_get_group_sequence_numbers(struct dlb2_hw *hw, 790 unsigned int group_id); 791 792 /** 793 * dlb2_get_group_sequence_number_occupancy() - return a group's in-use slots 794 * @hw: dlb2_hw handle for a particular device. 795 * @group_id: sequence number group ID. 796 * 797 * This function returns the group's number of in-use slots (i.e. load-balanced 798 * queues using the specified group). 799 * 800 * Return: 801 * Returns -EINVAL if group_id is invalid, else the group's SNs per queue. 802 */ 803 int dlb2_get_group_sequence_number_occupancy(struct dlb2_hw *hw, 804 unsigned int group_id); 805 806 /** 807 * dlb2_set_group_sequence_numbers() - assign a group's number of SNs per queue 808 * @hw: dlb2_hw handle for a particular device. 809 * @group_id: sequence number group ID. 810 * @val: requested amount of sequence numbers per queue. 811 * 812 * This function configures the group's number of sequence numbers per queue. 813 * val can be a power-of-two between 32 and 1024, inclusive. This setting can 814 * be configured until the first ordered load-balanced queue is configured, at 815 * which point the configuration is locked. 816 * 817 * Return: 818 * Returns 0 upon success; -EINVAL if group_id or val is invalid, -EPERM if an 819 * ordered queue is configured. 820 */ 821 int dlb2_set_group_sequence_numbers(struct dlb2_hw *hw, 822 unsigned int group_id, 823 unsigned long val); 824 825 /** 826 * dlb2_reset_domain() - reset a scheduling domain 827 * @hw: dlb2_hw handle for a particular device. 828 * @domain_id: domain ID. 829 * @vdev_request: indicates whether this request came from a vdev. 830 * @vdev_id: If vdev_request is true, this contains the vdev's ID. 831 * 832 * This function resets and frees a DLB 2.0 scheduling domain and its associated 833 * resources. 834 * 835 * Pre-condition: the driver must ensure software has stopped sending QEs 836 * through this domain's producer ports before invoking this function, or 837 * undefined behavior will result. 838 * 839 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual 840 * device. 841 * 842 * Return: 843 * Returns 0 upon success, -1 otherwise. 844 * 845 * EINVAL - Invalid domain ID, or the domain is not configured. 846 * EFAULT - Internal error. (Possibly caused if software is the pre-condition 847 * is not met.) 848 * ETIMEDOUT - Hardware component didn't reset in the expected time. 849 */ 850 int dlb2_reset_domain(struct dlb2_hw *hw, 851 u32 domain_id, 852 bool vdev_request, 853 unsigned int vdev_id); 854 855 /** 856 * dlb2_ldb_port_owned_by_domain() - query whether a port is owned by a domain 857 * @hw: dlb2_hw handle for a particular device. 858 * @domain_id: domain ID. 859 * @port_id: indicates whether this request came from a VF. 860 * @vdev_request: indicates whether this request came from a vdev. 861 * @vdev_id: If vdev_request is true, this contains the vdev's ID. 862 * 863 * This function returns whether a load-balanced port is owned by a specified 864 * domain. 865 * 866 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual 867 * device. 868 * 869 * Return: 870 * Returns 0 if false, 1 if true, <0 otherwise. 871 * 872 * EINVAL - Invalid domain or port ID, or the domain is not configured. 873 */ 874 int dlb2_ldb_port_owned_by_domain(struct dlb2_hw *hw, 875 u32 domain_id, 876 u32 port_id, 877 bool vdev_request, 878 unsigned int vdev_id); 879 880 /** 881 * dlb2_dir_port_owned_by_domain() - query whether a port is owned by a domain 882 * @hw: dlb2_hw handle for a particular device. 883 * @domain_id: domain ID. 884 * @port_id: indicates whether this request came from a VF. 885 * @vdev_request: indicates whether this request came from a vdev. 886 * @vdev_id: If vdev_request is true, this contains the vdev's ID. 887 * 888 * This function returns whether a directed port is owned by a specified 889 * domain. 890 * 891 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual 892 * device. 893 * 894 * Return: 895 * Returns 0 if false, 1 if true, <0 otherwise. 896 * 897 * EINVAL - Invalid domain or port ID, or the domain is not configured. 898 */ 899 int dlb2_dir_port_owned_by_domain(struct dlb2_hw *hw, 900 u32 domain_id, 901 u32 port_id, 902 bool vdev_request, 903 unsigned int vdev_id); 904 905 /** 906 * dlb2_hw_get_num_resources() - query the PCI function's available resources 907 * @hw: dlb2_hw handle for a particular device. 908 * @arg: pointer to resource counts. 909 * @vdev_request: indicates whether this request came from a vdev. 910 * @vdev_id: If vdev_request is true, this contains the vdev's ID. 911 * 912 * This function returns the number of available resources for the PF or for a 913 * VF. 914 * 915 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual 916 * device. 917 * 918 * Return: 919 * Returns 0 upon success, -EINVAL if vdev_request is true and vdev_id is 920 * invalid. 921 */ 922 int dlb2_hw_get_num_resources(struct dlb2_hw *hw, 923 struct dlb2_get_num_resources_args *arg, 924 bool vdev_request, 925 unsigned int vdev_id); 926 927 /** 928 * dlb2_hw_get_num_used_resources() - query the PCI function's used resources 929 * @hw: dlb2_hw handle for a particular device. 930 * @arg: pointer to resource counts. 931 * @vdev_request: indicates whether this request came from a vdev. 932 * @vdev_id: If vdev_request is true, this contains the vdev's ID. 933 * 934 * This function returns the number of resources in use by the PF or a VF. It 935 * fills in the fields that args points to, except the following: 936 * - max_contiguous_atomic_inflights 937 * - max_contiguous_hist_list_entries 938 * - max_contiguous_ldb_credits 939 * - max_contiguous_dir_credits 940 * 941 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual 942 * device. 943 * 944 * Return: 945 * Returns 0 upon success, -EINVAL if vdev_request is true and vdev_id is 946 * invalid. 947 */ 948 int dlb2_hw_get_num_used_resources(struct dlb2_hw *hw, 949 struct dlb2_get_num_resources_args *arg, 950 bool vdev_request, 951 unsigned int vdev_id); 952 953 /** 954 * dlb2_send_async_vdev_to_pf_msg() - (vdev only) send a mailbox message to 955 * the PF 956 * @hw: dlb2_hw handle for a particular device. 957 * 958 * This function sends a VF->PF mailbox message. It is asynchronous, so it 959 * returns once the message is sent but potentially before the PF has processed 960 * the message. The caller must call dlb2_vdev_to_pf_complete() to determine 961 * when the PF has finished processing the request. 962 * 963 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual 964 * device. 965 */ 966 void dlb2_send_async_vdev_to_pf_msg(struct dlb2_hw *hw); 967 968 /** 969 * dlb2_vdev_to_pf_complete() - check the status of an asynchronous mailbox 970 * request 971 * @hw: dlb2_hw handle for a particular device. 972 * 973 * This function returns a boolean indicating whether the PF has finished 974 * processing a VF->PF mailbox request. It should only be called after sending 975 * an asynchronous request with dlb2_send_async_vdev_to_pf_msg(). 976 * 977 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual 978 * device. 979 */ 980 bool dlb2_vdev_to_pf_complete(struct dlb2_hw *hw); 981 982 /** 983 * dlb2_vf_flr_complete() - check the status of a VF FLR 984 * @hw: dlb2_hw handle for a particular device. 985 * 986 * This function returns a boolean indicating whether the PF has finished 987 * executing the VF FLR. It should only be called after setting the VF's FLR 988 * bit. 989 */ 990 bool dlb2_vf_flr_complete(struct dlb2_hw *hw); 991 992 /** 993 * dlb2_send_async_pf_to_vdev_msg() - (PF only) send a mailbox message to a 994 * vdev 995 * @hw: dlb2_hw handle for a particular device. 996 * @vdev_id: vdev ID. 997 * 998 * This function sends a PF->vdev mailbox message. It is asynchronous, so it 999 * returns once the message is sent but potentially before the vdev has 1000 * processed the message. The caller must call dlb2_pf_to_vdev_complete() to 1001 * determine when the vdev has finished processing the request. 1002 * 1003 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual 1004 * device. 1005 */ 1006 void dlb2_send_async_pf_to_vdev_msg(struct dlb2_hw *hw, unsigned int vdev_id); 1007 1008 /** 1009 * dlb2_pf_to_vdev_complete() - check the status of an asynchronous mailbox 1010 * request 1011 * @hw: dlb2_hw handle for a particular device. 1012 * @vdev_id: vdev ID. 1013 * 1014 * This function returns a boolean indicating whether the vdev has finished 1015 * processing a PF->vdev mailbox request. It should only be called after 1016 * sending an asynchronous request with dlb2_send_async_pf_to_vdev_msg(). 1017 * 1018 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual 1019 * device. 1020 */ 1021 bool dlb2_pf_to_vdev_complete(struct dlb2_hw *hw, unsigned int vdev_id); 1022 1023 /** 1024 * dlb2_pf_read_vf_mbox_req() - (PF only) read a VF->PF mailbox request 1025 * @hw: dlb2_hw handle for a particular device. 1026 * @vf_id: VF ID. 1027 * @data: pointer to message data. 1028 * @len: size, in bytes, of the data array. 1029 * 1030 * This function copies one of the PF's VF->PF mailboxes into the array pointed 1031 * to by data. 1032 * 1033 * Return: 1034 * Returns 0 upon success, <0 otherwise. 1035 * 1036 * EINVAL - len >= DLB2_VF2PF_REQ_BYTES. 1037 */ 1038 int dlb2_pf_read_vf_mbox_req(struct dlb2_hw *hw, 1039 unsigned int vf_id, 1040 void *data, 1041 int len); 1042 1043 /** 1044 * dlb2_pf_read_vf_mbox_resp() - (PF only) read a VF->PF mailbox response 1045 * @hw: dlb2_hw handle for a particular device. 1046 * @vf_id: VF ID. 1047 * @data: pointer to message data. 1048 * @len: size, in bytes, of the data array. 1049 * 1050 * This function copies one of the PF's VF->PF mailboxes into the array pointed 1051 * to by data. 1052 * 1053 * Return: 1054 * Returns 0 upon success, <0 otherwise. 1055 * 1056 * EINVAL - len >= DLB2_VF2PF_RESP_BYTES. 1057 */ 1058 int dlb2_pf_read_vf_mbox_resp(struct dlb2_hw *hw, 1059 unsigned int vf_id, 1060 void *data, 1061 int len); 1062 1063 /** 1064 * dlb2_pf_write_vf_mbox_resp() - (PF only) write a PF->VF mailbox response 1065 * @hw: dlb2_hw handle for a particular device. 1066 * @vf_id: VF ID. 1067 * @data: pointer to message data. 1068 * @len: size, in bytes, of the data array. 1069 * 1070 * This function copies the user-provided message data into of the PF's VF->PF 1071 * mailboxes. 1072 * 1073 * Return: 1074 * Returns 0 upon success, <0 otherwise. 1075 * 1076 * EINVAL - len >= DLB2_PF2VF_RESP_BYTES. 1077 */ 1078 int dlb2_pf_write_vf_mbox_resp(struct dlb2_hw *hw, 1079 unsigned int vf_id, 1080 void *data, 1081 int len); 1082 1083 /** 1084 * dlb2_pf_write_vf_mbox_req() - (PF only) write a PF->VF mailbox request 1085 * @hw: dlb2_hw handle for a particular device. 1086 * @vf_id: VF ID. 1087 * @data: pointer to message data. 1088 * @len: size, in bytes, of the data array. 1089 * 1090 * This function copies the user-provided message data into of the PF's VF->PF 1091 * mailboxes. 1092 * 1093 * Return: 1094 * Returns 0 upon success, <0 otherwise. 1095 * 1096 * EINVAL - len >= DLB2_PF2VF_REQ_BYTES. 1097 */ 1098 int dlb2_pf_write_vf_mbox_req(struct dlb2_hw *hw, 1099 unsigned int vf_id, 1100 void *data, 1101 int len); 1102 1103 /** 1104 * dlb2_vf_read_pf_mbox_resp() - (VF only) read a PF->VF mailbox response 1105 * @hw: dlb2_hw handle for a particular device. 1106 * @data: pointer to message data. 1107 * @len: size, in bytes, of the data array. 1108 * 1109 * This function copies the VF's PF->VF mailbox into the array pointed to by 1110 * data. 1111 * 1112 * Return: 1113 * Returns 0 upon success, <0 otherwise. 1114 * 1115 * EINVAL - len >= DLB2_PF2VF_RESP_BYTES. 1116 */ 1117 int dlb2_vf_read_pf_mbox_resp(struct dlb2_hw *hw, void *data, int len); 1118 1119 /** 1120 * dlb2_vf_read_pf_mbox_req() - (VF only) read a PF->VF mailbox request 1121 * @hw: dlb2_hw handle for a particular device. 1122 * @data: pointer to message data. 1123 * @len: size, in bytes, of the data array. 1124 * 1125 * This function copies the VF's PF->VF mailbox into the array pointed to by 1126 * data. 1127 * 1128 * Return: 1129 * Returns 0 upon success, <0 otherwise. 1130 * 1131 * EINVAL - len >= DLB2_PF2VF_REQ_BYTES. 1132 */ 1133 int dlb2_vf_read_pf_mbox_req(struct dlb2_hw *hw, void *data, int len); 1134 1135 /** 1136 * dlb2_vf_write_pf_mbox_req() - (VF only) write a VF->PF mailbox request 1137 * @hw: dlb2_hw handle for a particular device. 1138 * @data: pointer to message data. 1139 * @len: size, in bytes, of the data array. 1140 * 1141 * This function copies the user-provided message data into of the VF's PF->VF 1142 * mailboxes. 1143 * 1144 * Return: 1145 * Returns 0 upon success, <0 otherwise. 1146 * 1147 * EINVAL - len >= DLB2_VF2PF_REQ_BYTES. 1148 */ 1149 int dlb2_vf_write_pf_mbox_req(struct dlb2_hw *hw, void *data, int len); 1150 1151 /** 1152 * dlb2_vf_write_pf_mbox_resp() - (VF only) write a VF->PF mailbox response 1153 * @hw: dlb2_hw handle for a particular device. 1154 * @data: pointer to message data. 1155 * @len: size, in bytes, of the data array. 1156 * 1157 * This function copies the user-provided message data into of the VF's PF->VF 1158 * mailboxes. 1159 * 1160 * Return: 1161 * Returns 0 upon success, <0 otherwise. 1162 * 1163 * EINVAL - len >= DLB2_VF2PF_RESP_BYTES. 1164 */ 1165 int dlb2_vf_write_pf_mbox_resp(struct dlb2_hw *hw, void *data, int len); 1166 1167 /** 1168 * dlb2_reset_vdev() - reset the hardware owned by a virtual device 1169 * @hw: dlb2_hw handle for a particular device. 1170 * @id: virtual device ID 1171 * 1172 * This function resets the hardware owned by a vdev, by resetting the vdev's 1173 * domains one by one. 1174 * 1175 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual 1176 * device. 1177 */ 1178 int dlb2_reset_vdev(struct dlb2_hw *hw, unsigned int id); 1179 1180 /** 1181 * dlb2_vdev_is_locked() - check whether the vdev's resources are locked 1182 * @hw: dlb2_hw handle for a particular device. 1183 * @id: virtual device ID 1184 * 1185 * This function returns whether or not the vdev's resource assignments are 1186 * locked. If locked, no resources can be added to or subtracted from the 1187 * group. 1188 * 1189 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual 1190 * device. 1191 */ 1192 bool dlb2_vdev_is_locked(struct dlb2_hw *hw, unsigned int id); 1193 1194 /** 1195 * dlb2_lock_vdev() - lock the vdev's resources 1196 * @hw: dlb2_hw handle for a particular device. 1197 * @id: virtual device ID 1198 * 1199 * This function sets a flag indicating that the vdev is using its resources. 1200 * When the vdev is locked, its resource assignment cannot be changed. 1201 * 1202 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual 1203 * device. 1204 */ 1205 void dlb2_lock_vdev(struct dlb2_hw *hw, unsigned int id); 1206 1207 /** 1208 * dlb2_unlock_vdev() - unlock the vdev's resources 1209 * @hw: dlb2_hw handle for a particular device. 1210 * @id: virtual device ID 1211 * 1212 * This function unlocks the vdev's resource assignment, allowing it to be 1213 * modified. 1214 * 1215 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual 1216 * device. 1217 */ 1218 void dlb2_unlock_vdev(struct dlb2_hw *hw, unsigned int id); 1219 1220 /** 1221 * dlb2_update_vdev_sched_domains() - update the domains assigned to a vdev 1222 * @hw: dlb2_hw handle for a particular device. 1223 * @id: virtual device ID 1224 * @num: number of scheduling domains to assign to this vdev 1225 * 1226 * This function assigns num scheduling domains to the specified vdev. If the 1227 * vdev already has domains assigned, this existing assignment is adjusted 1228 * accordingly. 1229 * 1230 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual 1231 * device. 1232 * 1233 * Return: 1234 * Returns 0 upon success, <0 otherwise. 1235 * 1236 * Errors: 1237 * EINVAL - id is invalid, or the requested number of resources are 1238 * unavailable. 1239 * EPERM - The vdev's resource assignment is locked and cannot be changed. 1240 */ 1241 int dlb2_update_vdev_sched_domains(struct dlb2_hw *hw, u32 id, u32 num); 1242 1243 /** 1244 * dlb2_update_vdev_ldb_queues() - update the LDB queues assigned to a vdev 1245 * @hw: dlb2_hw handle for a particular device. 1246 * @id: virtual device ID 1247 * @num: number of LDB queues to assign to this vdev 1248 * 1249 * This function assigns num LDB queues to the specified vdev. If the vdev 1250 * already has LDB queues assigned, this existing assignment is adjusted 1251 * accordingly. 1252 * 1253 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual 1254 * device. 1255 * 1256 * Return: 1257 * Returns 0 upon success, <0 otherwise. 1258 * 1259 * Errors: 1260 * EINVAL - id is invalid, or the requested number of resources are 1261 * unavailable. 1262 * EPERM - The vdev's resource assignment is locked and cannot be changed. 1263 */ 1264 int dlb2_update_vdev_ldb_queues(struct dlb2_hw *hw, u32 id, u32 num); 1265 1266 /** 1267 * dlb2_update_vdev_ldb_ports() - update the LDB ports assigned to a vdev 1268 * @hw: dlb2_hw handle for a particular device. 1269 * @id: virtual device ID 1270 * @num: number of LDB ports to assign to this vdev 1271 * 1272 * This function assigns num LDB ports to the specified vdev. If the vdev 1273 * already has LDB ports assigned, this existing assignment is adjusted 1274 * accordingly. 1275 * 1276 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual 1277 * device. 1278 * 1279 * Return: 1280 * Returns 0 upon success, <0 otherwise. 1281 * 1282 * Errors: 1283 * EINVAL - id is invalid, or the requested number of resources are 1284 * unavailable. 1285 * EPERM - The vdev's resource assignment is locked and cannot be changed. 1286 */ 1287 int dlb2_update_vdev_ldb_ports(struct dlb2_hw *hw, u32 id, u32 num); 1288 1289 /** 1290 * dlb2_update_vdev_ldb_cos_ports() - update the LDB ports assigned to a vdev 1291 * @hw: dlb2_hw handle for a particular device. 1292 * @id: virtual device ID 1293 * @cos: class-of-service ID 1294 * @num: number of LDB ports to assign to this vdev 1295 * 1296 * This function assigns num LDB ports from class-of-service cos to the 1297 * specified vdev. If the vdev already has LDB ports from this class-of-service 1298 * assigned, this existing assignment is adjusted accordingly. 1299 * 1300 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual 1301 * device. 1302 * 1303 * Return: 1304 * Returns 0 upon success, <0 otherwise. 1305 * 1306 * Errors: 1307 * EINVAL - id is invalid, or the requested number of resources are 1308 * unavailable. 1309 * EPERM - The vdev's resource assignment is locked and cannot be changed. 1310 */ 1311 int dlb2_update_vdev_ldb_cos_ports(struct dlb2_hw *hw, 1312 u32 id, 1313 u32 cos, 1314 u32 num); 1315 1316 /** 1317 * dlb2_update_vdev_dir_ports() - update the DIR ports assigned to a vdev 1318 * @hw: dlb2_hw handle for a particular device. 1319 * @id: virtual device ID 1320 * @num: number of DIR ports to assign to this vdev 1321 * 1322 * This function assigns num DIR ports to the specified vdev. If the vdev 1323 * already has DIR ports assigned, this existing assignment is adjusted 1324 * accordingly. 1325 * 1326 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual 1327 * device. 1328 * 1329 * Return: 1330 * Returns 0 upon success, <0 otherwise. 1331 * 1332 * Errors: 1333 * EINVAL - id is invalid, or the requested number of resources are 1334 * unavailable. 1335 * EPERM - The vdev's resource assignment is locked and cannot be changed. 1336 */ 1337 int dlb2_update_vdev_dir_ports(struct dlb2_hw *hw, u32 id, u32 num); 1338 1339 /** 1340 * dlb2_update_vdev_ldb_credits() - update the vdev's assigned LDB credits 1341 * @hw: dlb2_hw handle for a particular device. 1342 * @id: virtual device ID 1343 * @num: number of LDB credit credits to assign to this vdev 1344 * 1345 * This function assigns num LDB credit to the specified vdev. If the vdev 1346 * already has LDB credits assigned, this existing assignment is adjusted 1347 * accordingly. vdevs are assigned a contiguous chunk of credits, so this 1348 * function may fail if a sufficiently large contiguous chunk is not available. 1349 * 1350 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual 1351 * device. 1352 * 1353 * Return: 1354 * Returns 0 upon success, <0 otherwise. 1355 * 1356 * Errors: 1357 * EINVAL - id is invalid, or the requested number of resources are 1358 * unavailable. 1359 * EPERM - The vdev's resource assignment is locked and cannot be changed. 1360 */ 1361 int dlb2_update_vdev_ldb_credits(struct dlb2_hw *hw, u32 id, u32 num); 1362 1363 /** 1364 * dlb2_update_vdev_dir_credits() - update the vdev's assigned DIR credits 1365 * @hw: dlb2_hw handle for a particular device. 1366 * @id: virtual device ID 1367 * @num: number of DIR credits to assign to this vdev 1368 * 1369 * This function assigns num DIR credit to the specified vdev. If the vdev 1370 * already has DIR credits assigned, this existing assignment is adjusted 1371 * accordingly. vdevs are assigned a contiguous chunk of credits, so this 1372 * function may fail if a sufficiently large contiguous chunk is not available. 1373 * 1374 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual 1375 * device. 1376 * 1377 * Return: 1378 * Returns 0 upon success, <0 otherwise. 1379 * 1380 * Errors: 1381 * EINVAL - id is invalid, or the requested number of resources are 1382 * unavailable. 1383 * EPERM - The vdev's resource assignment is locked and cannot be changed. 1384 */ 1385 int dlb2_update_vdev_dir_credits(struct dlb2_hw *hw, u32 id, u32 num); 1386 1387 /** 1388 * dlb2_update_vdev_hist_list_entries() - update the vdev's assigned HL entries 1389 * @hw: dlb2_hw handle for a particular device. 1390 * @id: virtual device ID 1391 * @num: number of history list entries to assign to this vdev 1392 * 1393 * This function assigns num history list entries to the specified vdev. If the 1394 * vdev already has history list entries assigned, this existing assignment is 1395 * adjusted accordingly. vdevs are assigned a contiguous chunk of entries, so 1396 * this function may fail if a sufficiently large contiguous chunk is not 1397 * available. 1398 * 1399 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual 1400 * device. 1401 * 1402 * Return: 1403 * Returns 0 upon success, <0 otherwise. 1404 * 1405 * Errors: 1406 * EINVAL - id is invalid, or the requested number of resources are 1407 * unavailable. 1408 * EPERM - The vdev's resource assignment is locked and cannot be changed. 1409 */ 1410 int dlb2_update_vdev_hist_list_entries(struct dlb2_hw *hw, u32 id, u32 num); 1411 1412 /** 1413 * dlb2_update_vdev_atomic_inflights() - update the vdev's atomic inflights 1414 * @hw: dlb2_hw handle for a particular device. 1415 * @id: virtual device ID 1416 * @num: number of atomic inflights to assign to this vdev 1417 * 1418 * This function assigns num atomic inflights to the specified vdev. If the vdev 1419 * already has atomic inflights assigned, this existing assignment is adjusted 1420 * accordingly. vdevs are assigned a contiguous chunk of entries, so this 1421 * function may fail if a sufficiently large contiguous chunk is not available. 1422 * 1423 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual 1424 * device. 1425 * 1426 * Return: 1427 * Returns 0 upon success, <0 otherwise. 1428 * 1429 * Errors: 1430 * EINVAL - id is invalid, or the requested number of resources are 1431 * unavailable. 1432 * EPERM - The vdev's resource assignment is locked and cannot be changed. 1433 */ 1434 int dlb2_update_vdev_atomic_inflights(struct dlb2_hw *hw, u32 id, u32 num); 1435 1436 /** 1437 * dlb2_reset_vdev_resources() - reassign the vdev's resources to the PF 1438 * @hw: dlb2_hw handle for a particular device. 1439 * @id: virtual device ID 1440 * 1441 * This function takes any resources currently assigned to the vdev and 1442 * reassigns them to the PF. 1443 * 1444 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual 1445 * device. 1446 * 1447 * Return: 1448 * Returns 0 upon success, <0 otherwise. 1449 * 1450 * Errors: 1451 * EINVAL - id is invalid 1452 * EPERM - The vdev's resource assignment is locked and cannot be changed. 1453 */ 1454 int dlb2_reset_vdev_resources(struct dlb2_hw *hw, unsigned int id); 1455 1456 /** 1457 * dlb2_notify_vf() - send an alarm to a VF 1458 * @hw: dlb2_hw handle for a particular device. 1459 * @vf_id: VF ID 1460 * @notification: notification 1461 * 1462 * This function sends a notification (as defined in dlb2_mbox.h) to a VF. 1463 * 1464 * Return: 1465 * Returns 0 upon success, <0 if the VF doesn't ACK the PF->VF interrupt. 1466 */ 1467 int dlb2_notify_vf(struct dlb2_hw *hw, 1468 unsigned int vf_id, 1469 u32 notification); 1470 1471 /** 1472 * dlb2_vdev_in_use() - query whether a virtual device is in use 1473 * @hw: dlb2_hw handle for a particular device. 1474 * @id: virtual device ID 1475 * 1476 * This function sends a mailbox request to the vdev to query whether the vdev 1477 * is in use. 1478 * 1479 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual 1480 * device. 1481 * 1482 * Return: 1483 * Returns 0 for false, 1 for true, and <0 if the mailbox request times out or 1484 * an internal error occurs. 1485 */ 1486 int dlb2_vdev_in_use(struct dlb2_hw *hw, unsigned int id); 1487 1488 /** 1489 * dlb2_clr_pmcsr_disable() - power on bulk of DLB 2.0 logic 1490 * @hw: dlb2_hw handle for a particular device. 1491 * 1492 * Clearing the PMCSR must be done at initialization to make the device fully 1493 * operational. 1494 */ 1495 void dlb2_clr_pmcsr_disable(struct dlb2_hw *hw); 1496 1497 /** 1498 * dlb2_hw_get_ldb_queue_depth() - returns the depth of a load-balanced queue 1499 * @hw: dlb2_hw handle for a particular device. 1500 * @domain_id: domain ID. 1501 * @args: queue depth args 1502 * @resp: response structure. 1503 * @vdev_request: indicates whether this request came from a vdev. 1504 * @vdev_id: If vdev_request is true, this contains the vdev's ID. 1505 * 1506 * This function returns the depth of a load-balanced queue. 1507 * 1508 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual 1509 * device. 1510 * 1511 * Return: 1512 * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is 1513 * assigned a detailed error code from enum dlb2_error. If successful, resp->id 1514 * contains the depth. 1515 * 1516 * Errors: 1517 * EINVAL - Invalid domain ID or queue ID. 1518 */ 1519 int dlb2_hw_get_ldb_queue_depth(struct dlb2_hw *hw, 1520 u32 domain_id, 1521 struct dlb2_get_ldb_queue_depth_args *args, 1522 struct dlb2_cmd_response *resp, 1523 bool vdev_request, 1524 unsigned int vdev_id); 1525 1526 /** 1527 * dlb2_hw_get_dir_queue_depth() - returns the depth of a directed queue 1528 * @hw: dlb2_hw handle for a particular device. 1529 * @domain_id: domain ID. 1530 * @args: queue depth args 1531 * @resp: response structure. 1532 * @vdev_request: indicates whether this request came from a vdev. 1533 * @vdev_id: If vdev_request is true, this contains the vdev's ID. 1534 * 1535 * This function returns the depth of a directed queue. 1536 * 1537 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual 1538 * device. 1539 * 1540 * Return: 1541 * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is 1542 * assigned a detailed error code from enum dlb2_error. If successful, resp->id 1543 * contains the depth. 1544 * 1545 * Errors: 1546 * EINVAL - Invalid domain ID or queue ID. 1547 */ 1548 int dlb2_hw_get_dir_queue_depth(struct dlb2_hw *hw, 1549 u32 domain_id, 1550 struct dlb2_get_dir_queue_depth_args *args, 1551 struct dlb2_cmd_response *resp, 1552 bool vdev_request, 1553 unsigned int vdev_id); 1554 1555 enum dlb2_virt_mode { 1556 DLB2_VIRT_NONE, 1557 DLB2_VIRT_SRIOV, 1558 DLB2_VIRT_SIOV, 1559 1560 /* NUM_DLB2_VIRT_MODES must be last */ 1561 NUM_DLB2_VIRT_MODES, 1562 }; 1563 1564 /** 1565 * dlb2_hw_set_virt_mode() - set the device's virtualization mode 1566 * @hw: dlb2_hw handle for a particular device. 1567 * @mode: either none, SR-IOV, or Scalable IOV. 1568 * 1569 * This function sets the virtualization mode of the device. This controls 1570 * whether the device uses a software or hardware mailbox. 1571 * 1572 * This should be called by the PF driver when either SR-IOV or Scalable IOV is 1573 * selected as the virtualization mechanism, and by the VF/VDEV driver during 1574 * initialization after recognizing itself as an SR-IOV or Scalable IOV device. 1575 * 1576 * Errors: 1577 * EINVAL - Invalid mode. 1578 */ 1579 int dlb2_hw_set_virt_mode(struct dlb2_hw *hw, enum dlb2_virt_mode mode); 1580 1581 /** 1582 * dlb2_hw_get_virt_mode() - get the device's virtualization mode 1583 * @hw: dlb2_hw handle for a particular device. 1584 * 1585 * This function gets the virtualization mode of the device. 1586 */ 1587 enum dlb2_virt_mode dlb2_hw_get_virt_mode(struct dlb2_hw *hw); 1588 1589 /** 1590 * dlb2_hw_get_ldb_port_phys_id() - get a physical port ID from its virt ID 1591 * @hw: dlb2_hw handle for a particular device. 1592 * @id: virtual port ID. 1593 * @vdev_id: vdev ID. 1594 * 1595 * Return: 1596 * Returns >= 0 upon success, -1 otherwise. 1597 */ 1598 s32 dlb2_hw_get_ldb_port_phys_id(struct dlb2_hw *hw, 1599 u32 id, 1600 unsigned int vdev_id); 1601 1602 /** 1603 * dlb2_hw_get_dir_port_phys_id() - get a physical port ID from its virt ID 1604 * @hw: dlb2_hw handle for a particular device. 1605 * @id: virtual port ID. 1606 * @vdev_id: vdev ID. 1607 * 1608 * Return: 1609 * Returns >= 0 upon success, -1 otherwise. 1610 */ 1611 s32 dlb2_hw_get_dir_port_phys_id(struct dlb2_hw *hw, 1612 u32 id, 1613 unsigned int vdev_id); 1614 1615 /** 1616 * dlb2_hw_register_sw_mbox() - register a software mailbox 1617 * @hw: dlb2_hw handle for a particular device. 1618 * @vdev_id: vdev ID. 1619 * @vdev2pf_mbox: pointer to a 4KB memory page used for vdev->PF communication. 1620 * @pf2vdev_mbox: pointer to a 4KB memory page used for PF->vdev communication. 1621 * @pf2vdev_inject: callback function for injecting a PF->vdev interrupt. 1622 * @inject_arg: user argument for pf2vdev_inject callback. 1623 * 1624 * When Scalable IOV is enabled, the VDCM must register a software mailbox for 1625 * every virtual device during vdev creation. 1626 * 1627 * This function notifies the driver to use a software mailbox using the 1628 * provided pointers, instead of the device's hardware mailbox. When the driver 1629 * calls mailbox functions like dlb2_pf_write_vf_mbox_req(), the request will 1630 * go to the software mailbox instead of the hardware one. This is used in 1631 * Scalable IOV virtualization. 1632 */ 1633 void dlb2_hw_register_sw_mbox(struct dlb2_hw *hw, 1634 unsigned int vdev_id, 1635 u32 *vdev2pf_mbox, 1636 u32 *pf2vdev_mbox, 1637 void (*pf2vdev_inject)(void *), 1638 void *inject_arg); 1639 1640 /** 1641 * dlb2_hw_unregister_sw_mbox() - unregister a software mailbox 1642 * @hw: dlb2_hw handle for a particular device. 1643 * @vdev_id: vdev ID. 1644 * 1645 * This function notifies the driver to stop using a previously registered 1646 * software mailbox. 1647 */ 1648 void dlb2_hw_unregister_sw_mbox(struct dlb2_hw *hw, unsigned int vdev_id); 1649 1650 /** 1651 * dlb2_hw_setup_cq_ims_entry() - setup a CQ's IMS entry 1652 * @hw: dlb2_hw handle for a particular device. 1653 * @vdev_id: vdev ID. 1654 * @virt_cq_id: virtual CQ ID. 1655 * @is_ldb: CQ is load-balanced. 1656 * @addr_lo: least-significant 32 bits of address. 1657 * @data: 32 data bits. 1658 * 1659 * This sets up the CQ's IMS entry with the provided address and data values. 1660 * This function should only be called if the device is configured for Scalable 1661 * IOV virtualization. The upper 32 address bits are fixed in hardware and thus 1662 * not needed. 1663 */ 1664 void dlb2_hw_setup_cq_ims_entry(struct dlb2_hw *hw, 1665 unsigned int vdev_id, 1666 u32 virt_cq_id, 1667 bool is_ldb, 1668 u32 addr_lo, 1669 u32 data); 1670 1671 /** 1672 * dlb2_hw_clear_cq_ims_entry() - clear a CQ's IMS entry 1673 * @hw: dlb2_hw handle for a particular device. 1674 * @vdev_id: vdev ID. 1675 * @virt_cq_id: virtual CQ ID. 1676 * @is_ldb: CQ is load-balanced. 1677 * 1678 * This clears the CQ's IMS entry, reverting it to its reset state. 1679 */ 1680 void dlb2_hw_clear_cq_ims_entry(struct dlb2_hw *hw, 1681 unsigned int vdev_id, 1682 u32 virt_cq_id, 1683 bool is_ldb); 1684 1685 /** 1686 * dlb2_hw_register_pasid() - register a vdev's PASID 1687 * @hw: dlb2_hw handle for a particular device. 1688 * @vdev_id: vdev ID. 1689 * @pasid: the vdev's PASID. 1690 * 1691 * This function stores the user-supplied PASID, and uses it when configuring 1692 * the vdev's CQs. 1693 * 1694 * Return: 1695 * Returns >= 0 upon success, -1 otherwise. 1696 */ 1697 int dlb2_hw_register_pasid(struct dlb2_hw *hw, 1698 unsigned int vdev_id, 1699 unsigned int pasid); 1700 1701 /** 1702 * dlb2_hw_pending_port_unmaps() - returns the number of unmap operations in 1703 * progress. 1704 * @hw: dlb2_hw handle for a particular device. 1705 * @domain_id: domain ID. 1706 * @args: number of unmaps in progress args 1707 * @resp: response structure. 1708 * @vf_request: indicates whether this request came from a VF. 1709 * @vf_id: If vf_request is true, this contains the VF's ID. 1710 * 1711 * Return: 1712 * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is 1713 * assigned a detailed error code from enum dlb2_error. If successful, resp->id 1714 * contains the number of unmaps in progress. 1715 * 1716 * Errors: 1717 * EINVAL - Invalid port ID. 1718 */ 1719 int dlb2_hw_pending_port_unmaps(struct dlb2_hw *hw, 1720 u32 domain_id, 1721 struct dlb2_pending_port_unmaps_args *args, 1722 struct dlb2_cmd_response *resp, 1723 bool vf_request, 1724 unsigned int vf_id); 1725 1726 /** 1727 * dlb2_hw_get_cos_bandwidth() - returns the percent of bandwidth allocated 1728 * to a port class-of-service. 1729 * @hw: dlb2_hw handle for a particular device. 1730 * @cos_id: class-of-service ID. 1731 * 1732 * Return: 1733 * Returns -EINVAL if cos_id is invalid, else the class' bandwidth allocation. 1734 */ 1735 int dlb2_hw_get_cos_bandwidth(struct dlb2_hw *hw, u32 cos_id); 1736 1737 /** 1738 * dlb2_hw_set_cos_bandwidth() - set a bandwidth allocation percentage for a 1739 * port class-of-service. 1740 * @hw: dlb2_hw handle for a particular device. 1741 * @cos_id: class-of-service ID. 1742 * @bandwidth: class-of-service bandwidth. 1743 * 1744 * Return: 1745 * Returns 0 upon success, < 0 otherwise. 1746 * 1747 * Errors: 1748 * EINVAL - Invalid cos ID, bandwidth is greater than 100, or bandwidth would 1749 * cause the total bandwidth across all classes of service to exceed 1750 * 100%. 1751 */ 1752 int dlb2_hw_set_cos_bandwidth(struct dlb2_hw *hw, u32 cos_id, u8 bandwidth); 1753 1754 enum dlb2_wd_tmo { 1755 /* 40s watchdog timeout */ 1756 DLB2_WD_TMO_40S, 1757 /* 10s watchdog timeout */ 1758 DLB2_WD_TMO_10S, 1759 /* 1s watchdog timeout */ 1760 DLB2_WD_TMO_1S, 1761 1762 /* Must be last */ 1763 NUM_DLB2_WD_TMOS, 1764 }; 1765 1766 /** 1767 * dlb2_hw_enable_wd_timer() - enable the CQ watchdog timers with a 1768 * caller-specified timeout. 1769 * @hw: dlb2_hw handle for a particular device. 1770 * @tmo: watchdog timeout. 1771 * 1772 * This function should be called during device initialization and after reset. 1773 * The watchdog timer interrupt must also be enabled per-CQ, using either 1774 * dlb2_hw_enable_dir_cq_wd_int() or dlb2_hw_enable_ldb_cq_wd_int(). 1775 * 1776 * Return: 1777 * Returns 0 upon success, < 0 otherwise. 1778 * 1779 * Errors: 1780 * EINVAL - Invalid timeout. 1781 */ 1782 int dlb2_hw_enable_wd_timer(struct dlb2_hw *hw, enum dlb2_wd_tmo tmo); 1783 1784 /** 1785 * dlb2_hw_enable_dir_cq_wd_int() - enable the CQ watchdog interrupt on an 1786 * individual CQ. 1787 * @hw: dlb2_hw handle for a particular device. 1788 * @id: port ID. 1789 * @vdev_req: indicates whether this request came from a vdev. 1790 * @vdev_id: If vdev_request is true, this contains the vdev's ID. 1791 * 1792 * Return: 1793 * Returns 0 upon success, < 0 otherwise. 1794 * 1795 * Errors: 1796 * EINVAL - Invalid directed port ID. 1797 */ 1798 int dlb2_hw_enable_dir_cq_wd_int(struct dlb2_hw *hw, 1799 u32 id, 1800 bool vdev_req, 1801 unsigned int vdev_id); 1802 1803 /** 1804 * dlb2_hw_enable_ldb_cq_wd_int() - enable the CQ watchdog interrupt on an 1805 * individual CQ. 1806 * @hw: dlb2_hw handle for a particular device. 1807 * @id: port ID. 1808 * @vdev_req: indicates whether this request came from a vdev. 1809 * @vdev_id: If vdev_request is true, this contains the vdev's ID. 1810 * 1811 * Return: 1812 * Returns 0 upon success, < 0 otherwise. 1813 * 1814 * Errors: 1815 * EINVAL - Invalid load-balanced port ID. 1816 */ 1817 int dlb2_hw_enable_ldb_cq_wd_int(struct dlb2_hw *hw, 1818 u32 id, 1819 bool vdev_req, 1820 unsigned int vdev_id); 1821 1822 /** 1823 * dlb2_hw_enable_sparse_ldb_cq_mode() - enable sparse mode for load-balanced 1824 * ports. 1825 * @hw: dlb2_hw handle for a particular device. 1826 * 1827 * This function must be called prior to configuring scheduling domains. 1828 */ 1829 void dlb2_hw_enable_sparse_ldb_cq_mode(struct dlb2_hw *hw); 1830 1831 /** 1832 * dlb2_hw_enable_sparse_dir_cq_mode() - enable sparse mode for directed ports. 1833 * @hw: dlb2_hw handle for a particular device. 1834 * 1835 * This function must be called prior to configuring scheduling domains. 1836 */ 1837 void dlb2_hw_enable_sparse_dir_cq_mode(struct dlb2_hw *hw); 1838 1839 /** 1840 * dlb2_hw_set_qe_arbiter_weights() - program QE arbiter weights 1841 * @hw: dlb2_hw handle for a particular device. 1842 * @weight: 8-entry array of arbiter weights. 1843 * 1844 * weight[N] programs priority N's weight. In cases where the 8 priorities are 1845 * reduced to 4 bins, the mapping is: 1846 * - weight[1] programs bin 0 1847 * - weight[3] programs bin 1 1848 * - weight[5] programs bin 2 1849 * - weight[7] programs bin 3 1850 */ 1851 void dlb2_hw_set_qe_arbiter_weights(struct dlb2_hw *hw, u8 weight[8]); 1852 1853 /** 1854 * dlb2_hw_set_qid_arbiter_weights() - program QID arbiter weights 1855 * @hw: dlb2_hw handle for a particular device. 1856 * @weight: 8-entry array of arbiter weights. 1857 * 1858 * weight[N] programs priority N's weight. In cases where the 8 priorities are 1859 * reduced to 4 bins, the mapping is: 1860 * - weight[1] programs bin 0 1861 * - weight[3] programs bin 1 1862 * - weight[5] programs bin 2 1863 * - weight[7] programs bin 3 1864 */ 1865 void dlb2_hw_set_qid_arbiter_weights(struct dlb2_hw *hw, u8 weight[8]); 1866 1867 /** 1868 * dlb2_hw_ldb_cq_interrupt_enabled() - Check if the interrupt is enabled 1869 * @hw: dlb2_hw handle for a particular device. 1870 * @port_id: physical load-balanced port ID. 1871 * 1872 * This function returns whether the load-balanced CQ interrupt is enabled. 1873 */ 1874 int dlb2_hw_ldb_cq_interrupt_enabled(struct dlb2_hw *hw, int port_id); 1875 1876 /** 1877 * dlb2_hw_ldb_cq_interrupt_set_mode() - Program the CQ interrupt mode 1878 * @hw: dlb2_hw handle for a particular device. 1879 * @port_id: physical load-balanced port ID. 1880 * @mode: interrupt type (DLB2_CQ_ISR_MODE_{DIS, MSI, MSIX, ADI}) 1881 * 1882 * This function can be used to disable (MODE_DIS) and re-enable the 1883 * load-balanced CQ's interrupt. It should only be called after the interrupt 1884 * has been configured with dlb2_configure_ldb_cq_interrupt(). 1885 */ 1886 void dlb2_hw_ldb_cq_interrupt_set_mode(struct dlb2_hw *hw, 1887 int port_id, 1888 int mode); 1889 1890 /** 1891 * dlb2_hw_dir_cq_interrupt_enabled() - Check if the interrupt is enabled 1892 * @hw: dlb2_hw handle for a particular device. 1893 * @port_id: physical load-balanced port ID. 1894 * 1895 * This function returns whether the load-balanced CQ interrupt is enabled. 1896 */ 1897 int dlb2_hw_dir_cq_interrupt_enabled(struct dlb2_hw *hw, int port_id); 1898 1899 /** 1900 * dlb2_hw_dir_cq_interrupt_set_mode() - Program the CQ interrupt mode 1901 * @hw: dlb2_hw handle for a particular device. 1902 * @port_id: physical directed port ID. 1903 * @mode: interrupt type (DLB2_CQ_ISR_MODE_{DIS, MSI, MSIX, ADI}) 1904 * 1905 * This function can be used to disable (MODE_DIS) and re-enable the 1906 * directed CQ's interrupt. It should only be called after the interrupt 1907 * has been configured with dlb2_configure_dir_cq_interrupt(). 1908 */ 1909 void dlb2_hw_dir_cq_interrupt_set_mode(struct dlb2_hw *hw, 1910 int port_id, 1911 int mode); 1912 1913 #endif /* __DLB2_RESOURCE_H */ 1914