1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_VIRTIO_CONFIG_H 3 #define _LINUX_VIRTIO_CONFIG_H 4 5 #include <linux/err.h> 6 #include <linux/bug.h> 7 #include <linux/virtio.h> 8 #include <linux/virtio_byteorder.h> 9 #include <linux/compiler_types.h> 10 #include <uapi/linux/virtio_config.h> 11 12 struct irq_affinity; 13 14 struct virtio_shm_region { 15 u64 addr; 16 u64 len; 17 }; 18 19 typedef void vq_callback_t(struct virtqueue *); 20 21 /** 22 * struct virtqueue_info - Info for a virtqueue passed to find_vqs(). 23 * @name: virtqueue description. Used mainly for debugging, NULL for 24 * a virtqueue unused by the driver. 25 * @callback: A callback to invoke on a used buffer notification. 26 * NULL for a virtqueue that does not need a callback. 27 * @ctx: A flag to indicate to maintain an extra context per virtqueue. 28 */ 29 struct virtqueue_info { 30 const char *name; 31 vq_callback_t *callback; 32 bool ctx; 33 }; 34 35 /** 36 * struct virtio_config_ops - operations for configuring a virtio device 37 * Note: Do not assume that a transport implements all of the operations 38 * getting/setting a value as a simple read/write! Generally speaking, 39 * any of @get/@set, @get_status/@set_status, or @get_features/ 40 * @finalize_features are NOT safe to be called from an atomic 41 * context. 42 * @get: read the value of a configuration field 43 * vdev: the virtio_device 44 * offset: the offset of the configuration field 45 * buf: the buffer to write the field value into. 46 * len: the length of the buffer 47 * @set: write the value of a configuration field 48 * vdev: the virtio_device 49 * offset: the offset of the configuration field 50 * buf: the buffer to read the field value from. 51 * len: the length of the buffer 52 * @generation: config generation counter (optional) 53 * vdev: the virtio_device 54 * Returns the config generation counter 55 * @get_status: read the status byte 56 * vdev: the virtio_device 57 * Returns the status byte 58 * @set_status: write the status byte 59 * vdev: the virtio_device 60 * status: the new status byte 61 * @reset: reset the device 62 * vdev: the virtio device 63 * After this, status and feature negotiation must be done again 64 * Device must not be reset from its vq/config callbacks, or in 65 * parallel with being added/removed. 66 * @find_vqs: find virtqueues and instantiate them. 67 * vdev: the virtio_device 68 * nvqs: the number of virtqueues to find 69 * vqs: on success, includes new virtqueues 70 * vqs_info: array of virtqueue info structures 71 * Returns 0 on success or error status 72 * @del_vqs: free virtqueues found by find_vqs(). 73 * @synchronize_cbs: synchronize with the virtqueue callbacks (optional) 74 * The function guarantees that all memory operations on the 75 * queue before it are visible to the vring_interrupt() that is 76 * called after it. 77 * vdev: the virtio_device 78 * @get_features: get the array of feature bits for this device. 79 * vdev: the virtio_device 80 * Returns the first 64 feature bits (all we currently need). 81 * @finalize_features: confirm what device features we'll be using. 82 * vdev: the virtio_device 83 * This sends the driver feature bits to the device: it can change 84 * the dev->feature bits if it wants. 85 * Note that despite the name this can be called any number of 86 * times. 87 * Returns 0 on success or error status 88 * @bus_name: return the bus name associated with the device (optional) 89 * vdev: the virtio_device 90 * This returns a pointer to the bus name a la pci_name from which 91 * the caller can then copy. 92 * @set_vq_affinity: set the affinity for a virtqueue (optional). 93 * @get_vq_affinity: get the affinity for a virtqueue (optional). 94 * @get_shm_region: get a shared memory region based on the index. 95 * @disable_vq_and_reset: reset a queue individually (optional). 96 * vq: the virtqueue 97 * Returns 0 on success or error status 98 * disable_vq_and_reset will guarantee that the callbacks are disabled and 99 * synchronized. 100 * Except for the callback, the caller should guarantee that the vring is 101 * not accessed by any functions of virtqueue. 102 * @enable_vq_after_reset: enable a reset queue 103 * vq: the virtqueue 104 * Returns 0 on success or error status 105 * If disable_vq_and_reset is set, then enable_vq_after_reset must also be 106 * set. 107 * @create_avq: create admin virtqueue resource. 108 * @destroy_avq: destroy admin virtqueue resource. 109 */ 110 struct virtio_config_ops { 111 void (*get)(struct virtio_device *vdev, unsigned offset, 112 void *buf, unsigned len); 113 void (*set)(struct virtio_device *vdev, unsigned offset, 114 const void *buf, unsigned len); 115 u32 (*generation)(struct virtio_device *vdev); 116 u8 (*get_status)(struct virtio_device *vdev); 117 void (*set_status)(struct virtio_device *vdev, u8 status); 118 void (*reset)(struct virtio_device *vdev); 119 int (*find_vqs)(struct virtio_device *vdev, unsigned int nvqs, 120 struct virtqueue *vqs[], 121 struct virtqueue_info vqs_info[], 122 struct irq_affinity *desc); 123 void (*del_vqs)(struct virtio_device *); 124 void (*synchronize_cbs)(struct virtio_device *); 125 u64 (*get_features)(struct virtio_device *vdev); 126 int (*finalize_features)(struct virtio_device *vdev); 127 const char *(*bus_name)(struct virtio_device *vdev); 128 int (*set_vq_affinity)(struct virtqueue *vq, 129 const struct cpumask *cpu_mask); 130 const struct cpumask *(*get_vq_affinity)(struct virtio_device *vdev, 131 int index); 132 bool (*get_shm_region)(struct virtio_device *vdev, 133 struct virtio_shm_region *region, u8 id); 134 int (*disable_vq_and_reset)(struct virtqueue *vq); 135 int (*enable_vq_after_reset)(struct virtqueue *vq); 136 int (*create_avq)(struct virtio_device *vdev); 137 void (*destroy_avq)(struct virtio_device *vdev); 138 }; 139 140 /* If driver didn't advertise the feature, it will never appear. */ 141 void virtio_check_driver_offered_feature(const struct virtio_device *vdev, 142 unsigned int fbit); 143 144 /** 145 * __virtio_test_bit - helper to test feature bits. For use by transports. 146 * Devices should normally use virtio_has_feature, 147 * which includes more checks. 148 * @vdev: the device 149 * @fbit: the feature bit 150 */ 151 static inline bool __virtio_test_bit(const struct virtio_device *vdev, 152 unsigned int fbit) 153 { 154 /* Did you forget to fix assumptions on max features? */ 155 if (__builtin_constant_p(fbit)) 156 BUILD_BUG_ON(fbit >= 64); 157 else 158 BUG_ON(fbit >= 64); 159 160 return vdev->features & BIT_ULL(fbit); 161 } 162 163 /** 164 * __virtio_set_bit - helper to set feature bits. For use by transports. 165 * @vdev: the device 166 * @fbit: the feature bit 167 */ 168 static inline void __virtio_set_bit(struct virtio_device *vdev, 169 unsigned int fbit) 170 { 171 /* Did you forget to fix assumptions on max features? */ 172 if (__builtin_constant_p(fbit)) 173 BUILD_BUG_ON(fbit >= 64); 174 else 175 BUG_ON(fbit >= 64); 176 177 vdev->features |= BIT_ULL(fbit); 178 } 179 180 /** 181 * __virtio_clear_bit - helper to clear feature bits. For use by transports. 182 * @vdev: the device 183 * @fbit: the feature bit 184 */ 185 static inline void __virtio_clear_bit(struct virtio_device *vdev, 186 unsigned int fbit) 187 { 188 /* Did you forget to fix assumptions on max features? */ 189 if (__builtin_constant_p(fbit)) 190 BUILD_BUG_ON(fbit >= 64); 191 else 192 BUG_ON(fbit >= 64); 193 194 vdev->features &= ~BIT_ULL(fbit); 195 } 196 197 /** 198 * virtio_has_feature - helper to determine if this device has this feature. 199 * @vdev: the device 200 * @fbit: the feature bit 201 */ 202 static inline bool virtio_has_feature(const struct virtio_device *vdev, 203 unsigned int fbit) 204 { 205 if (fbit < VIRTIO_TRANSPORT_F_START) 206 virtio_check_driver_offered_feature(vdev, fbit); 207 208 return __virtio_test_bit(vdev, fbit); 209 } 210 211 /** 212 * virtio_has_dma_quirk - determine whether this device has the DMA quirk 213 * @vdev: the device 214 */ 215 static inline bool virtio_has_dma_quirk(const struct virtio_device *vdev) 216 { 217 /* 218 * Note the reverse polarity of the quirk feature (compared to most 219 * other features), this is for compatibility with legacy systems. 220 */ 221 return !virtio_has_feature(vdev, VIRTIO_F_ACCESS_PLATFORM); 222 } 223 224 static inline 225 int virtio_find_vqs(struct virtio_device *vdev, unsigned int nvqs, 226 struct virtqueue *vqs[], 227 struct virtqueue_info vqs_info[], 228 struct irq_affinity *desc) 229 { 230 return vdev->config->find_vqs(vdev, nvqs, vqs, vqs_info, desc); 231 } 232 233 static inline 234 struct virtqueue *virtio_find_single_vq(struct virtio_device *vdev, 235 vq_callback_t *c, const char *n) 236 { 237 struct virtqueue_info vqs_info[] = { 238 { n, c }, 239 }; 240 struct virtqueue *vq; 241 int err = virtio_find_vqs(vdev, 1, &vq, vqs_info, NULL); 242 243 if (err < 0) 244 return ERR_PTR(err); 245 return vq; 246 } 247 248 /** 249 * virtio_synchronize_cbs - synchronize with virtqueue callbacks 250 * @dev: the virtio device 251 */ 252 static inline 253 void virtio_synchronize_cbs(struct virtio_device *dev) 254 { 255 if (dev->config->synchronize_cbs) { 256 dev->config->synchronize_cbs(dev); 257 } else { 258 /* 259 * A best effort fallback to synchronize with 260 * interrupts, preemption and softirq disabled 261 * regions. See comment above synchronize_rcu(). 262 */ 263 synchronize_rcu(); 264 } 265 } 266 267 /** 268 * virtio_device_ready - enable vq use in probe function 269 * @dev: the virtio device 270 * 271 * Driver must call this to use vqs in the probe function. 272 * 273 * Note: vqs are enabled automatically after probe returns. 274 */ 275 static inline 276 void virtio_device_ready(struct virtio_device *dev) 277 { 278 unsigned status = dev->config->get_status(dev); 279 280 WARN_ON(status & VIRTIO_CONFIG_S_DRIVER_OK); 281 282 #ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION 283 /* 284 * The virtio_synchronize_cbs() makes sure vring_interrupt() 285 * will see the driver specific setup if it sees vq->broken 286 * as false (even if the notifications come before DRIVER_OK). 287 */ 288 virtio_synchronize_cbs(dev); 289 __virtio_unbreak_device(dev); 290 #endif 291 /* 292 * The transport should ensure the visibility of vq->broken 293 * before setting DRIVER_OK. See the comments for the transport 294 * specific set_status() method. 295 * 296 * A well behaved device will only notify a virtqueue after 297 * DRIVER_OK, this means the device should "see" the coherenct 298 * memory write that set vq->broken as false which is done by 299 * the driver when it sees DRIVER_OK, then the following 300 * driver's vring_interrupt() will see vq->broken as false so 301 * we won't lose any notification. 302 */ 303 dev->config->set_status(dev, status | VIRTIO_CONFIG_S_DRIVER_OK); 304 } 305 306 static inline 307 const char *virtio_bus_name(struct virtio_device *vdev) 308 { 309 if (!vdev->config->bus_name) 310 return "virtio"; 311 return vdev->config->bus_name(vdev); 312 } 313 314 /** 315 * virtqueue_set_affinity - setting affinity for a virtqueue 316 * @vq: the virtqueue 317 * @cpu_mask: the cpu mask 318 * 319 * Pay attention the function are best-effort: the affinity hint may not be set 320 * due to config support, irq type and sharing. 321 * 322 */ 323 static inline 324 int virtqueue_set_affinity(struct virtqueue *vq, const struct cpumask *cpu_mask) 325 { 326 struct virtio_device *vdev = vq->vdev; 327 if (vdev->config->set_vq_affinity) 328 return vdev->config->set_vq_affinity(vq, cpu_mask); 329 return 0; 330 } 331 332 static inline 333 bool virtio_get_shm_region(struct virtio_device *vdev, 334 struct virtio_shm_region *region, u8 id) 335 { 336 if (!vdev->config->get_shm_region) 337 return false; 338 return vdev->config->get_shm_region(vdev, region, id); 339 } 340 341 static inline bool virtio_is_little_endian(struct virtio_device *vdev) 342 { 343 return virtio_has_feature(vdev, VIRTIO_F_VERSION_1) || 344 virtio_legacy_is_little_endian(); 345 } 346 347 /* Memory accessors */ 348 static inline u16 virtio16_to_cpu(struct virtio_device *vdev, __virtio16 val) 349 { 350 return __virtio16_to_cpu(virtio_is_little_endian(vdev), val); 351 } 352 353 static inline __virtio16 cpu_to_virtio16(struct virtio_device *vdev, u16 val) 354 { 355 return __cpu_to_virtio16(virtio_is_little_endian(vdev), val); 356 } 357 358 static inline u32 virtio32_to_cpu(struct virtio_device *vdev, __virtio32 val) 359 { 360 return __virtio32_to_cpu(virtio_is_little_endian(vdev), val); 361 } 362 363 static inline __virtio32 cpu_to_virtio32(struct virtio_device *vdev, u32 val) 364 { 365 return __cpu_to_virtio32(virtio_is_little_endian(vdev), val); 366 } 367 368 static inline u64 virtio64_to_cpu(struct virtio_device *vdev, __virtio64 val) 369 { 370 return __virtio64_to_cpu(virtio_is_little_endian(vdev), val); 371 } 372 373 static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val) 374 { 375 return __cpu_to_virtio64(virtio_is_little_endian(vdev), val); 376 } 377 378 #define virtio_to_cpu(vdev, x) \ 379 _Generic((x), \ 380 __u8: (x), \ 381 __virtio16: virtio16_to_cpu((vdev), (x)), \ 382 __virtio32: virtio32_to_cpu((vdev), (x)), \ 383 __virtio64: virtio64_to_cpu((vdev), (x)) \ 384 ) 385 386 #define cpu_to_virtio(vdev, x, m) \ 387 _Generic((m), \ 388 __u8: (x), \ 389 __virtio16: cpu_to_virtio16((vdev), (x)), \ 390 __virtio32: cpu_to_virtio32((vdev), (x)), \ 391 __virtio64: cpu_to_virtio64((vdev), (x)) \ 392 ) 393 394 #define __virtio_native_type(structname, member) \ 395 typeof(virtio_to_cpu(NULL, ((structname*)0)->member)) 396 397 /* Config space accessors. */ 398 #define virtio_cread(vdev, structname, member, ptr) \ 399 do { \ 400 typeof(((structname*)0)->member) virtio_cread_v; \ 401 \ 402 might_sleep(); \ 403 /* Sanity check: must match the member's type */ \ 404 typecheck(typeof(virtio_to_cpu((vdev), virtio_cread_v)), *(ptr)); \ 405 \ 406 switch (sizeof(virtio_cread_v)) { \ 407 case 1: \ 408 case 2: \ 409 case 4: \ 410 vdev->config->get((vdev), \ 411 offsetof(structname, member), \ 412 &virtio_cread_v, \ 413 sizeof(virtio_cread_v)); \ 414 break; \ 415 default: \ 416 __virtio_cread_many((vdev), \ 417 offsetof(structname, member), \ 418 &virtio_cread_v, \ 419 1, \ 420 sizeof(virtio_cread_v)); \ 421 break; \ 422 } \ 423 *(ptr) = virtio_to_cpu(vdev, virtio_cread_v); \ 424 } while(0) 425 426 /* Config space accessors. */ 427 #define virtio_cwrite(vdev, structname, member, ptr) \ 428 do { \ 429 typeof(((structname*)0)->member) virtio_cwrite_v = \ 430 cpu_to_virtio(vdev, *(ptr), ((structname*)0)->member); \ 431 \ 432 might_sleep(); \ 433 /* Sanity check: must match the member's type */ \ 434 typecheck(typeof(virtio_to_cpu((vdev), virtio_cwrite_v)), *(ptr)); \ 435 \ 436 vdev->config->set((vdev), offsetof(structname, member), \ 437 &virtio_cwrite_v, \ 438 sizeof(virtio_cwrite_v)); \ 439 } while(0) 440 441 /* 442 * Nothing virtio-specific about these, but let's worry about generalizing 443 * these later. 444 */ 445 #define virtio_le_to_cpu(x) \ 446 _Generic((x), \ 447 __u8: (u8)(x), \ 448 __le16: (u16)le16_to_cpu(x), \ 449 __le32: (u32)le32_to_cpu(x), \ 450 __le64: (u64)le64_to_cpu(x) \ 451 ) 452 453 #define virtio_cpu_to_le(x, m) \ 454 _Generic((m), \ 455 __u8: (x), \ 456 __le16: cpu_to_le16(x), \ 457 __le32: cpu_to_le32(x), \ 458 __le64: cpu_to_le64(x) \ 459 ) 460 461 /* LE (e.g. modern) Config space accessors. */ 462 #define virtio_cread_le(vdev, structname, member, ptr) \ 463 do { \ 464 typeof(((structname*)0)->member) virtio_cread_v; \ 465 \ 466 might_sleep(); \ 467 /* Sanity check: must match the member's type */ \ 468 typecheck(typeof(virtio_le_to_cpu(virtio_cread_v)), *(ptr)); \ 469 \ 470 switch (sizeof(virtio_cread_v)) { \ 471 case 1: \ 472 case 2: \ 473 case 4: \ 474 vdev->config->get((vdev), \ 475 offsetof(structname, member), \ 476 &virtio_cread_v, \ 477 sizeof(virtio_cread_v)); \ 478 break; \ 479 default: \ 480 __virtio_cread_many((vdev), \ 481 offsetof(structname, member), \ 482 &virtio_cread_v, \ 483 1, \ 484 sizeof(virtio_cread_v)); \ 485 break; \ 486 } \ 487 *(ptr) = virtio_le_to_cpu(virtio_cread_v); \ 488 } while(0) 489 490 #define virtio_cwrite_le(vdev, structname, member, ptr) \ 491 do { \ 492 typeof(((structname*)0)->member) virtio_cwrite_v = \ 493 virtio_cpu_to_le(*(ptr), ((structname*)0)->member); \ 494 \ 495 might_sleep(); \ 496 /* Sanity check: must match the member's type */ \ 497 typecheck(typeof(virtio_le_to_cpu(virtio_cwrite_v)), *(ptr)); \ 498 \ 499 vdev->config->set((vdev), offsetof(structname, member), \ 500 &virtio_cwrite_v, \ 501 sizeof(virtio_cwrite_v)); \ 502 } while(0) 503 504 505 /* Read @count fields, @bytes each. */ 506 static inline void __virtio_cread_many(struct virtio_device *vdev, 507 unsigned int offset, 508 void *buf, size_t count, size_t bytes) 509 { 510 u32 old, gen = vdev->config->generation ? 511 vdev->config->generation(vdev) : 0; 512 int i; 513 514 might_sleep(); 515 do { 516 old = gen; 517 518 for (i = 0; i < count; i++) 519 vdev->config->get(vdev, offset + bytes * i, 520 buf + i * bytes, bytes); 521 522 gen = vdev->config->generation ? 523 vdev->config->generation(vdev) : 0; 524 } while (gen != old); 525 } 526 527 static inline void virtio_cread_bytes(struct virtio_device *vdev, 528 unsigned int offset, 529 void *buf, size_t len) 530 { 531 __virtio_cread_many(vdev, offset, buf, len, 1); 532 } 533 534 static inline u8 virtio_cread8(struct virtio_device *vdev, unsigned int offset) 535 { 536 u8 ret; 537 538 might_sleep(); 539 vdev->config->get(vdev, offset, &ret, sizeof(ret)); 540 return ret; 541 } 542 543 static inline void virtio_cwrite8(struct virtio_device *vdev, 544 unsigned int offset, u8 val) 545 { 546 might_sleep(); 547 vdev->config->set(vdev, offset, &val, sizeof(val)); 548 } 549 550 static inline u16 virtio_cread16(struct virtio_device *vdev, 551 unsigned int offset) 552 { 553 __virtio16 ret; 554 555 might_sleep(); 556 vdev->config->get(vdev, offset, &ret, sizeof(ret)); 557 return virtio16_to_cpu(vdev, ret); 558 } 559 560 static inline void virtio_cwrite16(struct virtio_device *vdev, 561 unsigned int offset, u16 val) 562 { 563 __virtio16 v; 564 565 might_sleep(); 566 v = cpu_to_virtio16(vdev, val); 567 vdev->config->set(vdev, offset, &v, sizeof(v)); 568 } 569 570 static inline u32 virtio_cread32(struct virtio_device *vdev, 571 unsigned int offset) 572 { 573 __virtio32 ret; 574 575 might_sleep(); 576 vdev->config->get(vdev, offset, &ret, sizeof(ret)); 577 return virtio32_to_cpu(vdev, ret); 578 } 579 580 static inline void virtio_cwrite32(struct virtio_device *vdev, 581 unsigned int offset, u32 val) 582 { 583 __virtio32 v; 584 585 might_sleep(); 586 v = cpu_to_virtio32(vdev, val); 587 vdev->config->set(vdev, offset, &v, sizeof(v)); 588 } 589 590 static inline u64 virtio_cread64(struct virtio_device *vdev, 591 unsigned int offset) 592 { 593 __virtio64 ret; 594 595 __virtio_cread_many(vdev, offset, &ret, 1, sizeof(ret)); 596 return virtio64_to_cpu(vdev, ret); 597 } 598 599 static inline void virtio_cwrite64(struct virtio_device *vdev, 600 unsigned int offset, u64 val) 601 { 602 __virtio64 v; 603 604 might_sleep(); 605 v = cpu_to_virtio64(vdev, val); 606 vdev->config->set(vdev, offset, &v, sizeof(v)); 607 } 608 609 /* Conditional config space accessors. */ 610 #define virtio_cread_feature(vdev, fbit, structname, member, ptr) \ 611 ({ \ 612 int _r = 0; \ 613 if (!virtio_has_feature(vdev, fbit)) \ 614 _r = -ENOENT; \ 615 else \ 616 virtio_cread((vdev), structname, member, ptr); \ 617 _r; \ 618 }) 619 620 /* Conditional config space accessors. */ 621 #define virtio_cread_le_feature(vdev, fbit, structname, member, ptr) \ 622 ({ \ 623 int _r = 0; \ 624 if (!virtio_has_feature(vdev, fbit)) \ 625 _r = -ENOENT; \ 626 else \ 627 virtio_cread_le((vdev), structname, member, ptr); \ 628 _r; \ 629 }) 630 631 #endif /* _LINUX_VIRTIO_CONFIG_H */ 632