1 /* SPDX-License-Identifier: GPL-2.0-or-later 2 * 3 * Copyright (C) 2005 David Brownell 4 */ 5 6 #ifndef __LINUX_SPI_H 7 #define __LINUX_SPI_H 8 9 #include <linux/device.h> 10 #include <linux/mod_devicetable.h> 11 #include <linux/slab.h> 12 #include <linux/kthread.h> 13 #include <linux/completion.h> 14 #include <linux/scatterlist.h> 15 #include <linux/gpio/consumer.h> 16 #include <linux/ptp_clock_kernel.h> 17 18 struct dma_chan; 19 struct property_entry; 20 struct spi_controller; 21 struct spi_transfer; 22 struct spi_controller_mem_ops; 23 24 /* 25 * INTERFACES between SPI master-side drivers and SPI slave protocol handlers, 26 * and SPI infrastructure. 27 */ 28 extern struct bus_type spi_bus_type; 29 30 /** 31 * struct spi_statistics - statistics for spi transfers 32 * @lock: lock protecting this structure 33 * 34 * @messages: number of spi-messages handled 35 * @transfers: number of spi_transfers handled 36 * @errors: number of errors during spi_transfer 37 * @timedout: number of timeouts during spi_transfer 38 * 39 * @spi_sync: number of times spi_sync is used 40 * @spi_sync_immediate: 41 * number of times spi_sync is executed immediately 42 * in calling context without queuing and scheduling 43 * @spi_async: number of times spi_async is used 44 * 45 * @bytes: number of bytes transferred to/from device 46 * @bytes_tx: number of bytes sent to device 47 * @bytes_rx: number of bytes received from device 48 * 49 * @transfer_bytes_histo: 50 * transfer bytes histogramm 51 * 52 * @transfers_split_maxsize: 53 * number of transfers that have been split because of 54 * maxsize limit 55 */ 56 struct spi_statistics { 57 spinlock_t lock; /* lock for the whole structure */ 58 59 unsigned long messages; 60 unsigned long transfers; 61 unsigned long errors; 62 unsigned long timedout; 63 64 unsigned long spi_sync; 65 unsigned long spi_sync_immediate; 66 unsigned long spi_async; 67 68 unsigned long long bytes; 69 unsigned long long bytes_rx; 70 unsigned long long bytes_tx; 71 72 #define SPI_STATISTICS_HISTO_SIZE 17 73 unsigned long transfer_bytes_histo[SPI_STATISTICS_HISTO_SIZE]; 74 75 unsigned long transfers_split_maxsize; 76 }; 77 78 void spi_statistics_add_transfer_stats(struct spi_statistics *stats, 79 struct spi_transfer *xfer, 80 struct spi_controller *ctlr); 81 82 #define SPI_STATISTICS_ADD_TO_FIELD(stats, field, count) \ 83 do { \ 84 unsigned long flags; \ 85 spin_lock_irqsave(&(stats)->lock, flags); \ 86 (stats)->field += count; \ 87 spin_unlock_irqrestore(&(stats)->lock, flags); \ 88 } while (0) 89 90 #define SPI_STATISTICS_INCREMENT_FIELD(stats, field) \ 91 SPI_STATISTICS_ADD_TO_FIELD(stats, field, 1) 92 93 /** 94 * struct spi_delay - SPI delay information 95 * @value: Value for the delay 96 * @unit: Unit for the delay 97 */ 98 struct spi_delay { 99 #define SPI_DELAY_UNIT_USECS 0 100 #define SPI_DELAY_UNIT_NSECS 1 101 #define SPI_DELAY_UNIT_SCK 2 102 u16 value; 103 u8 unit; 104 }; 105 106 extern int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer); 107 extern int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer); 108 109 /** 110 * struct spi_device - Controller side proxy for an SPI slave device 111 * @dev: Driver model representation of the device. 112 * @controller: SPI controller used with the device. 113 * @master: Copy of controller, for backwards compatibility. 114 * @max_speed_hz: Maximum clock rate to be used with this chip 115 * (on this board); may be changed by the device's driver. 116 * The spi_transfer.speed_hz can override this for each transfer. 117 * @chip_select: Chipselect, distinguishing chips handled by @controller. 118 * @mode: The spi mode defines how data is clocked out and in. 119 * This may be changed by the device's driver. 120 * The "active low" default for chipselect mode can be overridden 121 * (by specifying SPI_CS_HIGH) as can the "MSB first" default for 122 * each word in a transfer (by specifying SPI_LSB_FIRST). 123 * @bits_per_word: Data transfers involve one or more words; word sizes 124 * like eight or 12 bits are common. In-memory wordsizes are 125 * powers of two bytes (e.g. 20 bit samples use 32 bits). 126 * This may be changed by the device's driver, or left at the 127 * default (0) indicating protocol words are eight bit bytes. 128 * The spi_transfer.bits_per_word can override this for each transfer. 129 * @rt: Make the pump thread real time priority. 130 * @irq: Negative, or the number passed to request_irq() to receive 131 * interrupts from this device. 132 * @controller_state: Controller's runtime state 133 * @controller_data: Board-specific definitions for controller, such as 134 * FIFO initialization parameters; from board_info.controller_data 135 * @modalias: Name of the driver to use with this device, or an alias 136 * for that name. This appears in the sysfs "modalias" attribute 137 * for driver coldplugging, and in uevents used for hotplugging 138 * @cs_gpio: LEGACY: gpio number of the chipselect line (optional, -ENOENT when 139 * not using a GPIO line) use cs_gpiod in new drivers by opting in on 140 * the spi_master. 141 * @cs_gpiod: gpio descriptor of the chipselect line (optional, NULL when 142 * not using a GPIO line) 143 * @word_delay: delay to be inserted between consecutive 144 * words of a transfer 145 * 146 * @statistics: statistics for the spi_device 147 * 148 * A @spi_device is used to interchange data between an SPI slave 149 * (usually a discrete chip) and CPU memory. 150 * 151 * In @dev, the platform_data is used to hold information about this 152 * device that's meaningful to the device's protocol driver, but not 153 * to its controller. One example might be an identifier for a chip 154 * variant with slightly different functionality; another might be 155 * information about how this particular board wires the chip's pins. 156 */ 157 struct spi_device { 158 struct device dev; 159 struct spi_controller *controller; 160 struct spi_controller *master; /* compatibility layer */ 161 u32 max_speed_hz; 162 u8 chip_select; 163 u8 bits_per_word; 164 bool rt; 165 u32 mode; 166 #define SPI_CPHA 0x01 /* clock phase */ 167 #define SPI_CPOL 0x02 /* clock polarity */ 168 #define SPI_MODE_0 (0|0) /* (original MicroWire) */ 169 #define SPI_MODE_1 (0|SPI_CPHA) 170 #define SPI_MODE_2 (SPI_CPOL|0) 171 #define SPI_MODE_3 (SPI_CPOL|SPI_CPHA) 172 #define SPI_CS_HIGH 0x04 /* chipselect active high? */ 173 #define SPI_LSB_FIRST 0x08 /* per-word bits-on-wire */ 174 #define SPI_3WIRE 0x10 /* SI/SO signals shared */ 175 #define SPI_LOOP 0x20 /* loopback mode */ 176 #define SPI_NO_CS 0x40 /* 1 dev/bus, no chipselect */ 177 #define SPI_READY 0x80 /* slave pulls low to pause */ 178 #define SPI_TX_DUAL 0x100 /* transmit with 2 wires */ 179 #define SPI_TX_QUAD 0x200 /* transmit with 4 wires */ 180 #define SPI_RX_DUAL 0x400 /* receive with 2 wires */ 181 #define SPI_RX_QUAD 0x800 /* receive with 4 wires */ 182 #define SPI_CS_WORD 0x1000 /* toggle cs after each word */ 183 #define SPI_TX_OCTAL 0x2000 /* transmit with 8 wires */ 184 #define SPI_RX_OCTAL 0x4000 /* receive with 8 wires */ 185 #define SPI_3WIRE_HIZ 0x8000 /* high impedance turnaround */ 186 int irq; 187 void *controller_state; 188 void *controller_data; 189 char modalias[SPI_NAME_SIZE]; 190 const char *driver_override; 191 int cs_gpio; /* LEGACY: chip select gpio */ 192 struct gpio_desc *cs_gpiod; /* chip select gpio desc */ 193 struct spi_delay word_delay; /* inter-word delay */ 194 195 /* the statistics */ 196 struct spi_statistics statistics; 197 198 /* 199 * likely need more hooks for more protocol options affecting how 200 * the controller talks to each chip, like: 201 * - memory packing (12 bit samples into low bits, others zeroed) 202 * - priority 203 * - chipselect delays 204 * - ... 205 */ 206 }; 207 208 static inline struct spi_device *to_spi_device(struct device *dev) 209 { 210 return dev ? container_of(dev, struct spi_device, dev) : NULL; 211 } 212 213 /* most drivers won't need to care about device refcounting */ 214 static inline struct spi_device *spi_dev_get(struct spi_device *spi) 215 { 216 return (spi && get_device(&spi->dev)) ? spi : NULL; 217 } 218 219 static inline void spi_dev_put(struct spi_device *spi) 220 { 221 if (spi) 222 put_device(&spi->dev); 223 } 224 225 /* ctldata is for the bus_controller driver's runtime state */ 226 static inline void *spi_get_ctldata(struct spi_device *spi) 227 { 228 return spi->controller_state; 229 } 230 231 static inline void spi_set_ctldata(struct spi_device *spi, void *state) 232 { 233 spi->controller_state = state; 234 } 235 236 /* device driver data */ 237 238 static inline void spi_set_drvdata(struct spi_device *spi, void *data) 239 { 240 dev_set_drvdata(&spi->dev, data); 241 } 242 243 static inline void *spi_get_drvdata(struct spi_device *spi) 244 { 245 return dev_get_drvdata(&spi->dev); 246 } 247 248 struct spi_message; 249 struct spi_transfer; 250 251 /** 252 * struct spi_driver - Host side "protocol" driver 253 * @id_table: List of SPI devices supported by this driver 254 * @probe: Binds this driver to the spi device. Drivers can verify 255 * that the device is actually present, and may need to configure 256 * characteristics (such as bits_per_word) which weren't needed for 257 * the initial configuration done during system setup. 258 * @remove: Unbinds this driver from the spi device 259 * @shutdown: Standard shutdown callback used during system state 260 * transitions such as powerdown/halt and kexec 261 * @driver: SPI device drivers should initialize the name and owner 262 * field of this structure. 263 * 264 * This represents the kind of device driver that uses SPI messages to 265 * interact with the hardware at the other end of a SPI link. It's called 266 * a "protocol" driver because it works through messages rather than talking 267 * directly to SPI hardware (which is what the underlying SPI controller 268 * driver does to pass those messages). These protocols are defined in the 269 * specification for the device(s) supported by the driver. 270 * 271 * As a rule, those device protocols represent the lowest level interface 272 * supported by a driver, and it will support upper level interfaces too. 273 * Examples of such upper levels include frameworks like MTD, networking, 274 * MMC, RTC, filesystem character device nodes, and hardware monitoring. 275 */ 276 struct spi_driver { 277 const struct spi_device_id *id_table; 278 int (*probe)(struct spi_device *spi); 279 int (*remove)(struct spi_device *spi); 280 void (*shutdown)(struct spi_device *spi); 281 struct device_driver driver; 282 }; 283 284 static inline struct spi_driver *to_spi_driver(struct device_driver *drv) 285 { 286 return drv ? container_of(drv, struct spi_driver, driver) : NULL; 287 } 288 289 extern int __spi_register_driver(struct module *owner, struct spi_driver *sdrv); 290 291 /** 292 * spi_unregister_driver - reverse effect of spi_register_driver 293 * @sdrv: the driver to unregister 294 * Context: can sleep 295 */ 296 static inline void spi_unregister_driver(struct spi_driver *sdrv) 297 { 298 if (sdrv) 299 driver_unregister(&sdrv->driver); 300 } 301 302 /* use a define to avoid include chaining to get THIS_MODULE */ 303 #define spi_register_driver(driver) \ 304 __spi_register_driver(THIS_MODULE, driver) 305 306 /** 307 * module_spi_driver() - Helper macro for registering a SPI driver 308 * @__spi_driver: spi_driver struct 309 * 310 * Helper macro for SPI drivers which do not do anything special in module 311 * init/exit. This eliminates a lot of boilerplate. Each module may only 312 * use this macro once, and calling it replaces module_init() and module_exit() 313 */ 314 #define module_spi_driver(__spi_driver) \ 315 module_driver(__spi_driver, spi_register_driver, \ 316 spi_unregister_driver) 317 318 /** 319 * struct spi_controller - interface to SPI master or slave controller 320 * @dev: device interface to this driver 321 * @list: link with the global spi_controller list 322 * @bus_num: board-specific (and often SOC-specific) identifier for a 323 * given SPI controller. 324 * @num_chipselect: chipselects are used to distinguish individual 325 * SPI slaves, and are numbered from zero to num_chipselects. 326 * each slave has a chipselect signal, but it's common that not 327 * every chipselect is connected to a slave. 328 * @dma_alignment: SPI controller constraint on DMA buffers alignment. 329 * @mode_bits: flags understood by this controller driver 330 * @bits_per_word_mask: A mask indicating which values of bits_per_word are 331 * supported by the driver. Bit n indicates that a bits_per_word n+1 is 332 * supported. If set, the SPI core will reject any transfer with an 333 * unsupported bits_per_word. If not set, this value is simply ignored, 334 * and it's up to the individual driver to perform any validation. 335 * @min_speed_hz: Lowest supported transfer speed 336 * @max_speed_hz: Highest supported transfer speed 337 * @flags: other constraints relevant to this driver 338 * @slave: indicates that this is an SPI slave controller 339 * @max_transfer_size: function that returns the max transfer size for 340 * a &spi_device; may be %NULL, so the default %SIZE_MAX will be used. 341 * @max_message_size: function that returns the max message size for 342 * a &spi_device; may be %NULL, so the default %SIZE_MAX will be used. 343 * @io_mutex: mutex for physical bus access 344 * @bus_lock_spinlock: spinlock for SPI bus locking 345 * @bus_lock_mutex: mutex for exclusion of multiple callers 346 * @bus_lock_flag: indicates that the SPI bus is locked for exclusive use 347 * @setup: updates the device mode and clocking records used by a 348 * device's SPI controller; protocol code may call this. This 349 * must fail if an unrecognized or unsupported mode is requested. 350 * It's always safe to call this unless transfers are pending on 351 * the device whose settings are being modified. 352 * @set_cs_timing: optional hook for SPI devices to request SPI master 353 * controller for configuring specific CS setup time, hold time and inactive 354 * delay interms of clock counts 355 * @transfer: adds a message to the controller's transfer queue. 356 * @cleanup: frees controller-specific state 357 * @can_dma: determine whether this controller supports DMA 358 * @queued: whether this controller is providing an internal message queue 359 * @kworker: thread struct for message pump 360 * @kworker_task: pointer to task for message pump kworker thread 361 * @pump_messages: work struct for scheduling work to the message pump 362 * @queue_lock: spinlock to syncronise access to message queue 363 * @queue: message queue 364 * @idling: the device is entering idle state 365 * @cur_msg: the currently in-flight message 366 * @cur_msg_prepared: spi_prepare_message was called for the currently 367 * in-flight message 368 * @cur_msg_mapped: message has been mapped for DMA 369 * @xfer_completion: used by core transfer_one_message() 370 * @busy: message pump is busy 371 * @running: message pump is running 372 * @rt: whether this queue is set to run as a realtime task 373 * @auto_runtime_pm: the core should ensure a runtime PM reference is held 374 * while the hardware is prepared, using the parent 375 * device for the spidev 376 * @max_dma_len: Maximum length of a DMA transfer for the device. 377 * @prepare_transfer_hardware: a message will soon arrive from the queue 378 * so the subsystem requests the driver to prepare the transfer hardware 379 * by issuing this call 380 * @transfer_one_message: the subsystem calls the driver to transfer a single 381 * message while queuing transfers that arrive in the meantime. When the 382 * driver is finished with this message, it must call 383 * spi_finalize_current_message() so the subsystem can issue the next 384 * message 385 * @unprepare_transfer_hardware: there are currently no more messages on the 386 * queue so the subsystem notifies the driver that it may relax the 387 * hardware by issuing this call 388 * 389 * @set_cs: set the logic level of the chip select line. May be called 390 * from interrupt context. 391 * @prepare_message: set up the controller to transfer a single message, 392 * for example doing DMA mapping. Called from threaded 393 * context. 394 * @transfer_one: transfer a single spi_transfer. 395 * - return 0 if the transfer is finished, 396 * - return 1 if the transfer is still in progress. When 397 * the driver is finished with this transfer it must 398 * call spi_finalize_current_transfer() so the subsystem 399 * can issue the next transfer. Note: transfer_one and 400 * transfer_one_message are mutually exclusive; when both 401 * are set, the generic subsystem does not call your 402 * transfer_one callback. 403 * @handle_err: the subsystem calls the driver to handle an error that occurs 404 * in the generic implementation of transfer_one_message(). 405 * @mem_ops: optimized/dedicated operations for interactions with SPI memory. 406 * This field is optional and should only be implemented if the 407 * controller has native support for memory like operations. 408 * @unprepare_message: undo any work done by prepare_message(). 409 * @slave_abort: abort the ongoing transfer request on an SPI slave controller 410 * @cs_setup: delay to be introduced by the controller after CS is asserted 411 * @cs_hold: delay to be introduced by the controller before CS is deasserted 412 * @cs_inactive: delay to be introduced by the controller after CS is 413 * deasserted. If @cs_change_delay is used from @spi_transfer, then the 414 * two delays will be added up. 415 * @cs_gpios: LEGACY: array of GPIO descs to use as chip select lines; one per 416 * CS number. Any individual value may be -ENOENT for CS lines that 417 * are not GPIOs (driven by the SPI controller itself). Use the cs_gpiods 418 * in new drivers. 419 * @cs_gpiods: Array of GPIO descs to use as chip select lines; one per CS 420 * number. Any individual value may be NULL for CS lines that 421 * are not GPIOs (driven by the SPI controller itself). 422 * @use_gpio_descriptors: Turns on the code in the SPI core to parse and grab 423 * GPIO descriptors rather than using global GPIO numbers grabbed by the 424 * driver. This will fill in @cs_gpiods and @cs_gpios should not be used, 425 * and SPI devices will have the cs_gpiod assigned rather than cs_gpio. 426 * @statistics: statistics for the spi_controller 427 * @dma_tx: DMA transmit channel 428 * @dma_rx: DMA receive channel 429 * @dummy_rx: dummy receive buffer for full-duplex devices 430 * @dummy_tx: dummy transmit buffer for full-duplex devices 431 * @fw_translate_cs: If the boot firmware uses different numbering scheme 432 * what Linux expects, this optional hook can be used to translate 433 * between the two. 434 * @ptp_sts_supported: If the driver sets this to true, it must provide a 435 * time snapshot in @spi_transfer->ptp_sts as close as possible to the 436 * moment in time when @spi_transfer->ptp_sts_word_pre and 437 * @spi_transfer->ptp_sts_word_post were transmitted. 438 * If the driver does not set this, the SPI core takes the snapshot as 439 * close to the driver hand-over as possible. 440 * 441 * Each SPI controller can communicate with one or more @spi_device 442 * children. These make a small bus, sharing MOSI, MISO and SCK signals 443 * but not chip select signals. Each device may be configured to use a 444 * different clock rate, since those shared signals are ignored unless 445 * the chip is selected. 446 * 447 * The driver for an SPI controller manages access to those devices through 448 * a queue of spi_message transactions, copying data between CPU memory and 449 * an SPI slave device. For each such message it queues, it calls the 450 * message's completion function when the transaction completes. 451 */ 452 struct spi_controller { 453 struct device dev; 454 455 struct list_head list; 456 457 /* other than negative (== assign one dynamically), bus_num is fully 458 * board-specific. usually that simplifies to being SOC-specific. 459 * example: one SOC has three SPI controllers, numbered 0..2, 460 * and one board's schematics might show it using SPI-2. software 461 * would normally use bus_num=2 for that controller. 462 */ 463 s16 bus_num; 464 465 /* chipselects will be integral to many controllers; some others 466 * might use board-specific GPIOs. 467 */ 468 u16 num_chipselect; 469 470 /* some SPI controllers pose alignment requirements on DMAable 471 * buffers; let protocol drivers know about these requirements. 472 */ 473 u16 dma_alignment; 474 475 /* spi_device.mode flags understood by this controller driver */ 476 u32 mode_bits; 477 478 /* bitmask of supported bits_per_word for transfers */ 479 u32 bits_per_word_mask; 480 #define SPI_BPW_MASK(bits) BIT((bits) - 1) 481 #define SPI_BPW_RANGE_MASK(min, max) GENMASK((max) - 1, (min) - 1) 482 483 /* limits on transfer speed */ 484 u32 min_speed_hz; 485 u32 max_speed_hz; 486 487 /* other constraints relevant to this driver */ 488 u16 flags; 489 #define SPI_CONTROLLER_HALF_DUPLEX BIT(0) /* can't do full duplex */ 490 #define SPI_CONTROLLER_NO_RX BIT(1) /* can't do buffer read */ 491 #define SPI_CONTROLLER_NO_TX BIT(2) /* can't do buffer write */ 492 #define SPI_CONTROLLER_MUST_RX BIT(3) /* requires rx */ 493 #define SPI_CONTROLLER_MUST_TX BIT(4) /* requires tx */ 494 495 #define SPI_MASTER_GPIO_SS BIT(5) /* GPIO CS must select slave */ 496 497 /* flag indicating this is an SPI slave controller */ 498 bool slave; 499 500 /* 501 * on some hardware transfer / message size may be constrained 502 * the limit may depend on device transfer settings 503 */ 504 size_t (*max_transfer_size)(struct spi_device *spi); 505 size_t (*max_message_size)(struct spi_device *spi); 506 507 /* I/O mutex */ 508 struct mutex io_mutex; 509 510 /* lock and mutex for SPI bus locking */ 511 spinlock_t bus_lock_spinlock; 512 struct mutex bus_lock_mutex; 513 514 /* flag indicating that the SPI bus is locked for exclusive use */ 515 bool bus_lock_flag; 516 517 /* Setup mode and clock, etc (spi driver may call many times). 518 * 519 * IMPORTANT: this may be called when transfers to another 520 * device are active. DO NOT UPDATE SHARED REGISTERS in ways 521 * which could break those transfers. 522 */ 523 int (*setup)(struct spi_device *spi); 524 525 /* 526 * set_cs_timing() method is for SPI controllers that supports 527 * configuring CS timing. 528 * 529 * This hook allows SPI client drivers to request SPI controllers 530 * to configure specific CS timing through spi_set_cs_timing() after 531 * spi_setup(). 532 */ 533 int (*set_cs_timing)(struct spi_device *spi, struct spi_delay *setup, 534 struct spi_delay *hold, struct spi_delay *inactive); 535 536 /* bidirectional bulk transfers 537 * 538 * + The transfer() method may not sleep; its main role is 539 * just to add the message to the queue. 540 * + For now there's no remove-from-queue operation, or 541 * any other request management 542 * + To a given spi_device, message queueing is pure fifo 543 * 544 * + The controller's main job is to process its message queue, 545 * selecting a chip (for masters), then transferring data 546 * + If there are multiple spi_device children, the i/o queue 547 * arbitration algorithm is unspecified (round robin, fifo, 548 * priority, reservations, preemption, etc) 549 * 550 * + Chipselect stays active during the entire message 551 * (unless modified by spi_transfer.cs_change != 0). 552 * + The message transfers use clock and SPI mode parameters 553 * previously established by setup() for this device 554 */ 555 int (*transfer)(struct spi_device *spi, 556 struct spi_message *mesg); 557 558 /* called on release() to free memory provided by spi_controller */ 559 void (*cleanup)(struct spi_device *spi); 560 561 /* 562 * Used to enable core support for DMA handling, if can_dma() 563 * exists and returns true then the transfer will be mapped 564 * prior to transfer_one() being called. The driver should 565 * not modify or store xfer and dma_tx and dma_rx must be set 566 * while the device is prepared. 567 */ 568 bool (*can_dma)(struct spi_controller *ctlr, 569 struct spi_device *spi, 570 struct spi_transfer *xfer); 571 572 /* 573 * These hooks are for drivers that want to use the generic 574 * controller transfer queueing mechanism. If these are used, the 575 * transfer() function above must NOT be specified by the driver. 576 * Over time we expect SPI drivers to be phased over to this API. 577 */ 578 bool queued; 579 struct kthread_worker kworker; 580 struct task_struct *kworker_task; 581 struct kthread_work pump_messages; 582 spinlock_t queue_lock; 583 struct list_head queue; 584 struct spi_message *cur_msg; 585 bool idling; 586 bool busy; 587 bool running; 588 bool rt; 589 bool auto_runtime_pm; 590 bool cur_msg_prepared; 591 bool cur_msg_mapped; 592 struct completion xfer_completion; 593 size_t max_dma_len; 594 595 int (*prepare_transfer_hardware)(struct spi_controller *ctlr); 596 int (*transfer_one_message)(struct spi_controller *ctlr, 597 struct spi_message *mesg); 598 int (*unprepare_transfer_hardware)(struct spi_controller *ctlr); 599 int (*prepare_message)(struct spi_controller *ctlr, 600 struct spi_message *message); 601 int (*unprepare_message)(struct spi_controller *ctlr, 602 struct spi_message *message); 603 int (*slave_abort)(struct spi_controller *ctlr); 604 605 /* 606 * These hooks are for drivers that use a generic implementation 607 * of transfer_one_message() provied by the core. 608 */ 609 void (*set_cs)(struct spi_device *spi, bool enable); 610 int (*transfer_one)(struct spi_controller *ctlr, struct spi_device *spi, 611 struct spi_transfer *transfer); 612 void (*handle_err)(struct spi_controller *ctlr, 613 struct spi_message *message); 614 615 /* Optimized handlers for SPI memory-like operations. */ 616 const struct spi_controller_mem_ops *mem_ops; 617 618 /* CS delays */ 619 struct spi_delay cs_setup; 620 struct spi_delay cs_hold; 621 struct spi_delay cs_inactive; 622 623 /* gpio chip select */ 624 int *cs_gpios; 625 struct gpio_desc **cs_gpiods; 626 bool use_gpio_descriptors; 627 628 /* statistics */ 629 struct spi_statistics statistics; 630 631 /* DMA channels for use with core dmaengine helpers */ 632 struct dma_chan *dma_tx; 633 struct dma_chan *dma_rx; 634 635 /* dummy data for full duplex devices */ 636 void *dummy_rx; 637 void *dummy_tx; 638 639 int (*fw_translate_cs)(struct spi_controller *ctlr, unsigned cs); 640 641 /* 642 * Driver sets this field to indicate it is able to snapshot SPI 643 * transfers (needed e.g. for reading the time of POSIX clocks) 644 */ 645 bool ptp_sts_supported; 646 647 /* Interrupt enable state during PTP system timestamping */ 648 unsigned long irq_flags; 649 }; 650 651 static inline void *spi_controller_get_devdata(struct spi_controller *ctlr) 652 { 653 return dev_get_drvdata(&ctlr->dev); 654 } 655 656 static inline void spi_controller_set_devdata(struct spi_controller *ctlr, 657 void *data) 658 { 659 dev_set_drvdata(&ctlr->dev, data); 660 } 661 662 static inline struct spi_controller *spi_controller_get(struct spi_controller *ctlr) 663 { 664 if (!ctlr || !get_device(&ctlr->dev)) 665 return NULL; 666 return ctlr; 667 } 668 669 static inline void spi_controller_put(struct spi_controller *ctlr) 670 { 671 if (ctlr) 672 put_device(&ctlr->dev); 673 } 674 675 static inline bool spi_controller_is_slave(struct spi_controller *ctlr) 676 { 677 return IS_ENABLED(CONFIG_SPI_SLAVE) && ctlr->slave; 678 } 679 680 /* PM calls that need to be issued by the driver */ 681 extern int spi_controller_suspend(struct spi_controller *ctlr); 682 extern int spi_controller_resume(struct spi_controller *ctlr); 683 684 /* Calls the driver make to interact with the message queue */ 685 extern struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr); 686 extern void spi_finalize_current_message(struct spi_controller *ctlr); 687 extern void spi_finalize_current_transfer(struct spi_controller *ctlr); 688 689 /* Helper calls for driver to timestamp transfer */ 690 void spi_take_timestamp_pre(struct spi_controller *ctlr, 691 struct spi_transfer *xfer, 692 const void *tx, bool irqs_off); 693 void spi_take_timestamp_post(struct spi_controller *ctlr, 694 struct spi_transfer *xfer, 695 const void *tx, bool irqs_off); 696 697 /* the spi driver core manages memory for the spi_controller classdev */ 698 extern struct spi_controller *__spi_alloc_controller(struct device *host, 699 unsigned int size, bool slave); 700 701 static inline struct spi_controller *spi_alloc_master(struct device *host, 702 unsigned int size) 703 { 704 return __spi_alloc_controller(host, size, false); 705 } 706 707 static inline struct spi_controller *spi_alloc_slave(struct device *host, 708 unsigned int size) 709 { 710 if (!IS_ENABLED(CONFIG_SPI_SLAVE)) 711 return NULL; 712 713 return __spi_alloc_controller(host, size, true); 714 } 715 716 extern int spi_register_controller(struct spi_controller *ctlr); 717 extern int devm_spi_register_controller(struct device *dev, 718 struct spi_controller *ctlr); 719 extern void spi_unregister_controller(struct spi_controller *ctlr); 720 721 extern struct spi_controller *spi_busnum_to_master(u16 busnum); 722 723 /* 724 * SPI resource management while processing a SPI message 725 */ 726 727 typedef void (*spi_res_release_t)(struct spi_controller *ctlr, 728 struct spi_message *msg, 729 void *res); 730 731 /** 732 * struct spi_res - spi resource management structure 733 * @entry: list entry 734 * @release: release code called prior to freeing this resource 735 * @data: extra data allocated for the specific use-case 736 * 737 * this is based on ideas from devres, but focused on life-cycle 738 * management during spi_message processing 739 */ 740 struct spi_res { 741 struct list_head entry; 742 spi_res_release_t release; 743 unsigned long long data[]; /* guarantee ull alignment */ 744 }; 745 746 extern void *spi_res_alloc(struct spi_device *spi, 747 spi_res_release_t release, 748 size_t size, gfp_t gfp); 749 extern void spi_res_add(struct spi_message *message, void *res); 750 extern void spi_res_free(void *res); 751 752 extern void spi_res_release(struct spi_controller *ctlr, 753 struct spi_message *message); 754 755 /*---------------------------------------------------------------------------*/ 756 757 /* 758 * I/O INTERFACE between SPI controller and protocol drivers 759 * 760 * Protocol drivers use a queue of spi_messages, each transferring data 761 * between the controller and memory buffers. 762 * 763 * The spi_messages themselves consist of a series of read+write transfer 764 * segments. Those segments always read the same number of bits as they 765 * write; but one or the other is easily ignored by passing a null buffer 766 * pointer. (This is unlike most types of I/O API, because SPI hardware 767 * is full duplex.) 768 * 769 * NOTE: Allocation of spi_transfer and spi_message memory is entirely 770 * up to the protocol driver, which guarantees the integrity of both (as 771 * well as the data buffers) for as long as the message is queued. 772 */ 773 774 /** 775 * struct spi_transfer - a read/write buffer pair 776 * @tx_buf: data to be written (dma-safe memory), or NULL 777 * @rx_buf: data to be read (dma-safe memory), or NULL 778 * @tx_dma: DMA address of tx_buf, if @spi_message.is_dma_mapped 779 * @rx_dma: DMA address of rx_buf, if @spi_message.is_dma_mapped 780 * @tx_nbits: number of bits used for writing. If 0 the default 781 * (SPI_NBITS_SINGLE) is used. 782 * @rx_nbits: number of bits used for reading. If 0 the default 783 * (SPI_NBITS_SINGLE) is used. 784 * @len: size of rx and tx buffers (in bytes) 785 * @speed_hz: Select a speed other than the device default for this 786 * transfer. If 0 the default (from @spi_device) is used. 787 * @bits_per_word: select a bits_per_word other than the device default 788 * for this transfer. If 0 the default (from @spi_device) is used. 789 * @cs_change: affects chipselect after this transfer completes 790 * @cs_change_delay: delay between cs deassert and assert when 791 * @cs_change is set and @spi_transfer is not the last in @spi_message 792 * @delay: delay to be introduced after this transfer before 793 * (optionally) changing the chipselect status, then starting 794 * the next transfer or completing this @spi_message. 795 * @delay_usecs: microseconds to delay after this transfer before 796 * (optionally) changing the chipselect status, then starting 797 * the next transfer or completing this @spi_message. 798 * @word_delay: inter word delay to be introduced after each word size 799 * (set by bits_per_word) transmission. 800 * @effective_speed_hz: the effective SCK-speed that was used to 801 * transfer this transfer. Set to 0 if the spi bus driver does 802 * not support it. 803 * @transfer_list: transfers are sequenced through @spi_message.transfers 804 * @tx_sg: Scatterlist for transmit, currently not for client use 805 * @rx_sg: Scatterlist for receive, currently not for client use 806 * @ptp_sts_word_pre: The word (subject to bits_per_word semantics) offset 807 * within @tx_buf for which the SPI device is requesting that the time 808 * snapshot for this transfer begins. Upon completing the SPI transfer, 809 * this value may have changed compared to what was requested, depending 810 * on the available snapshotting resolution (DMA transfer, 811 * @ptp_sts_supported is false, etc). 812 * @ptp_sts_word_post: See @ptp_sts_word_post. The two can be equal (meaning 813 * that a single byte should be snapshotted). 814 * If the core takes care of the timestamp (if @ptp_sts_supported is false 815 * for this controller), it will set @ptp_sts_word_pre to 0, and 816 * @ptp_sts_word_post to the length of the transfer. This is done 817 * purposefully (instead of setting to spi_transfer->len - 1) to denote 818 * that a transfer-level snapshot taken from within the driver may still 819 * be of higher quality. 820 * @ptp_sts: Pointer to a memory location held by the SPI slave device where a 821 * PTP system timestamp structure may lie. If drivers use PIO or their 822 * hardware has some sort of assist for retrieving exact transfer timing, 823 * they can (and should) assert @ptp_sts_supported and populate this 824 * structure using the ptp_read_system_*ts helper functions. 825 * The timestamp must represent the time at which the SPI slave device has 826 * processed the word, i.e. the "pre" timestamp should be taken before 827 * transmitting the "pre" word, and the "post" timestamp after receiving 828 * transmit confirmation from the controller for the "post" word. 829 * @timestamped_pre: Set by the SPI controller driver to denote it has acted 830 * upon the @ptp_sts request. Not set when the SPI core has taken care of 831 * the task. SPI device drivers are free to print a warning if this comes 832 * back unset and they need the better resolution. 833 * @timestamped_post: See above. The reason why both exist is that these 834 * booleans are also used to keep state in the core SPI logic. 835 * 836 * SPI transfers always write the same number of bytes as they read. 837 * Protocol drivers should always provide @rx_buf and/or @tx_buf. 838 * In some cases, they may also want to provide DMA addresses for 839 * the data being transferred; that may reduce overhead, when the 840 * underlying driver uses dma. 841 * 842 * If the transmit buffer is null, zeroes will be shifted out 843 * while filling @rx_buf. If the receive buffer is null, the data 844 * shifted in will be discarded. Only "len" bytes shift out (or in). 845 * It's an error to try to shift out a partial word. (For example, by 846 * shifting out three bytes with word size of sixteen or twenty bits; 847 * the former uses two bytes per word, the latter uses four bytes.) 848 * 849 * In-memory data values are always in native CPU byte order, translated 850 * from the wire byte order (big-endian except with SPI_LSB_FIRST). So 851 * for example when bits_per_word is sixteen, buffers are 2N bytes long 852 * (@len = 2N) and hold N sixteen bit words in CPU byte order. 853 * 854 * When the word size of the SPI transfer is not a power-of-two multiple 855 * of eight bits, those in-memory words include extra bits. In-memory 856 * words are always seen by protocol drivers as right-justified, so the 857 * undefined (rx) or unused (tx) bits are always the most significant bits. 858 * 859 * All SPI transfers start with the relevant chipselect active. Normally 860 * it stays selected until after the last transfer in a message. Drivers 861 * can affect the chipselect signal using cs_change. 862 * 863 * (i) If the transfer isn't the last one in the message, this flag is 864 * used to make the chipselect briefly go inactive in the middle of the 865 * message. Toggling chipselect in this way may be needed to terminate 866 * a chip command, letting a single spi_message perform all of group of 867 * chip transactions together. 868 * 869 * (ii) When the transfer is the last one in the message, the chip may 870 * stay selected until the next transfer. On multi-device SPI busses 871 * with nothing blocking messages going to other devices, this is just 872 * a performance hint; starting a message to another device deselects 873 * this one. But in other cases, this can be used to ensure correctness. 874 * Some devices need protocol transactions to be built from a series of 875 * spi_message submissions, where the content of one message is determined 876 * by the results of previous messages and where the whole transaction 877 * ends when the chipselect goes intactive. 878 * 879 * When SPI can transfer in 1x,2x or 4x. It can get this transfer information 880 * from device through @tx_nbits and @rx_nbits. In Bi-direction, these 881 * two should both be set. User can set transfer mode with SPI_NBITS_SINGLE(1x) 882 * SPI_NBITS_DUAL(2x) and SPI_NBITS_QUAD(4x) to support these three transfer. 883 * 884 * The code that submits an spi_message (and its spi_transfers) 885 * to the lower layers is responsible for managing its memory. 886 * Zero-initialize every field you don't set up explicitly, to 887 * insulate against future API updates. After you submit a message 888 * and its transfers, ignore them until its completion callback. 889 */ 890 struct spi_transfer { 891 /* it's ok if tx_buf == rx_buf (right?) 892 * for MicroWire, one buffer must be null 893 * buffers must work with dma_*map_single() calls, unless 894 * spi_message.is_dma_mapped reports a pre-existing mapping 895 */ 896 const void *tx_buf; 897 void *rx_buf; 898 unsigned len; 899 900 dma_addr_t tx_dma; 901 dma_addr_t rx_dma; 902 struct sg_table tx_sg; 903 struct sg_table rx_sg; 904 905 unsigned cs_change:1; 906 unsigned tx_nbits:3; 907 unsigned rx_nbits:3; 908 #define SPI_NBITS_SINGLE 0x01 /* 1bit transfer */ 909 #define SPI_NBITS_DUAL 0x02 /* 2bits transfer */ 910 #define SPI_NBITS_QUAD 0x04 /* 4bits transfer */ 911 u8 bits_per_word; 912 u16 delay_usecs; 913 struct spi_delay delay; 914 struct spi_delay cs_change_delay; 915 struct spi_delay word_delay; 916 u32 speed_hz; 917 918 u32 effective_speed_hz; 919 920 unsigned int ptp_sts_word_pre; 921 unsigned int ptp_sts_word_post; 922 923 struct ptp_system_timestamp *ptp_sts; 924 925 bool timestamped_pre; 926 bool timestamped_post; 927 928 struct list_head transfer_list; 929 }; 930 931 /** 932 * struct spi_message - one multi-segment SPI transaction 933 * @transfers: list of transfer segments in this transaction 934 * @spi: SPI device to which the transaction is queued 935 * @is_dma_mapped: if true, the caller provided both dma and cpu virtual 936 * addresses for each transfer buffer 937 * @complete: called to report transaction completions 938 * @context: the argument to complete() when it's called 939 * @frame_length: the total number of bytes in the message 940 * @actual_length: the total number of bytes that were transferred in all 941 * successful segments 942 * @status: zero for success, else negative errno 943 * @queue: for use by whichever driver currently owns the message 944 * @state: for use by whichever driver currently owns the message 945 * @resources: for resource management when the spi message is processed 946 * 947 * A @spi_message is used to execute an atomic sequence of data transfers, 948 * each represented by a struct spi_transfer. The sequence is "atomic" 949 * in the sense that no other spi_message may use that SPI bus until that 950 * sequence completes. On some systems, many such sequences can execute as 951 * as single programmed DMA transfer. On all systems, these messages are 952 * queued, and might complete after transactions to other devices. Messages 953 * sent to a given spi_device are always executed in FIFO order. 954 * 955 * The code that submits an spi_message (and its spi_transfers) 956 * to the lower layers is responsible for managing its memory. 957 * Zero-initialize every field you don't set up explicitly, to 958 * insulate against future API updates. After you submit a message 959 * and its transfers, ignore them until its completion callback. 960 */ 961 struct spi_message { 962 struct list_head transfers; 963 964 struct spi_device *spi; 965 966 unsigned is_dma_mapped:1; 967 968 /* REVISIT: we might want a flag affecting the behavior of the 969 * last transfer ... allowing things like "read 16 bit length L" 970 * immediately followed by "read L bytes". Basically imposing 971 * a specific message scheduling algorithm. 972 * 973 * Some controller drivers (message-at-a-time queue processing) 974 * could provide that as their default scheduling algorithm. But 975 * others (with multi-message pipelines) could need a flag to 976 * tell them about such special cases. 977 */ 978 979 /* completion is reported through a callback */ 980 void (*complete)(void *context); 981 void *context; 982 unsigned frame_length; 983 unsigned actual_length; 984 int status; 985 986 /* for optional use by whatever driver currently owns the 987 * spi_message ... between calls to spi_async and then later 988 * complete(), that's the spi_controller controller driver. 989 */ 990 struct list_head queue; 991 void *state; 992 993 /* list of spi_res reources when the spi message is processed */ 994 struct list_head resources; 995 }; 996 997 static inline void spi_message_init_no_memset(struct spi_message *m) 998 { 999 INIT_LIST_HEAD(&m->transfers); 1000 INIT_LIST_HEAD(&m->resources); 1001 } 1002 1003 static inline void spi_message_init(struct spi_message *m) 1004 { 1005 memset(m, 0, sizeof *m); 1006 spi_message_init_no_memset(m); 1007 } 1008 1009 static inline void 1010 spi_message_add_tail(struct spi_transfer *t, struct spi_message *m) 1011 { 1012 list_add_tail(&t->transfer_list, &m->transfers); 1013 } 1014 1015 static inline void 1016 spi_transfer_del(struct spi_transfer *t) 1017 { 1018 list_del(&t->transfer_list); 1019 } 1020 1021 static inline int 1022 spi_transfer_delay_exec(struct spi_transfer *t) 1023 { 1024 struct spi_delay d; 1025 1026 if (t->delay_usecs) { 1027 d.value = t->delay_usecs; 1028 d.unit = SPI_DELAY_UNIT_USECS; 1029 return spi_delay_exec(&d, NULL); 1030 } 1031 1032 return spi_delay_exec(&t->delay, t); 1033 } 1034 1035 /** 1036 * spi_message_init_with_transfers - Initialize spi_message and append transfers 1037 * @m: spi_message to be initialized 1038 * @xfers: An array of spi transfers 1039 * @num_xfers: Number of items in the xfer array 1040 * 1041 * This function initializes the given spi_message and adds each spi_transfer in 1042 * the given array to the message. 1043 */ 1044 static inline void 1045 spi_message_init_with_transfers(struct spi_message *m, 1046 struct spi_transfer *xfers, unsigned int num_xfers) 1047 { 1048 unsigned int i; 1049 1050 spi_message_init(m); 1051 for (i = 0; i < num_xfers; ++i) 1052 spi_message_add_tail(&xfers[i], m); 1053 } 1054 1055 /* It's fine to embed message and transaction structures in other data 1056 * structures so long as you don't free them while they're in use. 1057 */ 1058 1059 static inline struct spi_message *spi_message_alloc(unsigned ntrans, gfp_t flags) 1060 { 1061 struct spi_message *m; 1062 1063 m = kzalloc(sizeof(struct spi_message) 1064 + ntrans * sizeof(struct spi_transfer), 1065 flags); 1066 if (m) { 1067 unsigned i; 1068 struct spi_transfer *t = (struct spi_transfer *)(m + 1); 1069 1070 spi_message_init_no_memset(m); 1071 for (i = 0; i < ntrans; i++, t++) 1072 spi_message_add_tail(t, m); 1073 } 1074 return m; 1075 } 1076 1077 static inline void spi_message_free(struct spi_message *m) 1078 { 1079 kfree(m); 1080 } 1081 1082 extern int spi_set_cs_timing(struct spi_device *spi, 1083 struct spi_delay *setup, 1084 struct spi_delay *hold, 1085 struct spi_delay *inactive); 1086 1087 extern int spi_setup(struct spi_device *spi); 1088 extern int spi_async(struct spi_device *spi, struct spi_message *message); 1089 extern int spi_async_locked(struct spi_device *spi, 1090 struct spi_message *message); 1091 extern int spi_slave_abort(struct spi_device *spi); 1092 1093 static inline size_t 1094 spi_max_message_size(struct spi_device *spi) 1095 { 1096 struct spi_controller *ctlr = spi->controller; 1097 1098 if (!ctlr->max_message_size) 1099 return SIZE_MAX; 1100 return ctlr->max_message_size(spi); 1101 } 1102 1103 static inline size_t 1104 spi_max_transfer_size(struct spi_device *spi) 1105 { 1106 struct spi_controller *ctlr = spi->controller; 1107 size_t tr_max = SIZE_MAX; 1108 size_t msg_max = spi_max_message_size(spi); 1109 1110 if (ctlr->max_transfer_size) 1111 tr_max = ctlr->max_transfer_size(spi); 1112 1113 /* transfer size limit must not be greater than messsage size limit */ 1114 return min(tr_max, msg_max); 1115 } 1116 1117 /** 1118 * spi_is_bpw_supported - Check if bits per word is supported 1119 * @spi: SPI device 1120 * @bpw: Bits per word 1121 * 1122 * This function checks to see if the SPI controller supports @bpw. 1123 * 1124 * Returns: 1125 * True if @bpw is supported, false otherwise. 1126 */ 1127 static inline bool spi_is_bpw_supported(struct spi_device *spi, u32 bpw) 1128 { 1129 u32 bpw_mask = spi->master->bits_per_word_mask; 1130 1131 if (bpw == 8 || (bpw <= 32 && bpw_mask & SPI_BPW_MASK(bpw))) 1132 return true; 1133 1134 return false; 1135 } 1136 1137 /*---------------------------------------------------------------------------*/ 1138 1139 /* SPI transfer replacement methods which make use of spi_res */ 1140 1141 struct spi_replaced_transfers; 1142 typedef void (*spi_replaced_release_t)(struct spi_controller *ctlr, 1143 struct spi_message *msg, 1144 struct spi_replaced_transfers *res); 1145 /** 1146 * struct spi_replaced_transfers - structure describing the spi_transfer 1147 * replacements that have occurred 1148 * so that they can get reverted 1149 * @release: some extra release code to get executed prior to 1150 * relasing this structure 1151 * @extradata: pointer to some extra data if requested or NULL 1152 * @replaced_transfers: transfers that have been replaced and which need 1153 * to get restored 1154 * @replaced_after: the transfer after which the @replaced_transfers 1155 * are to get re-inserted 1156 * @inserted: number of transfers inserted 1157 * @inserted_transfers: array of spi_transfers of array-size @inserted, 1158 * that have been replacing replaced_transfers 1159 * 1160 * note: that @extradata will point to @inserted_transfers[@inserted] 1161 * if some extra allocation is requested, so alignment will be the same 1162 * as for spi_transfers 1163 */ 1164 struct spi_replaced_transfers { 1165 spi_replaced_release_t release; 1166 void *extradata; 1167 struct list_head replaced_transfers; 1168 struct list_head *replaced_after; 1169 size_t inserted; 1170 struct spi_transfer inserted_transfers[]; 1171 }; 1172 1173 extern struct spi_replaced_transfers *spi_replace_transfers( 1174 struct spi_message *msg, 1175 struct spi_transfer *xfer_first, 1176 size_t remove, 1177 size_t insert, 1178 spi_replaced_release_t release, 1179 size_t extradatasize, 1180 gfp_t gfp); 1181 1182 /*---------------------------------------------------------------------------*/ 1183 1184 /* SPI transfer transformation methods */ 1185 1186 extern int spi_split_transfers_maxsize(struct spi_controller *ctlr, 1187 struct spi_message *msg, 1188 size_t maxsize, 1189 gfp_t gfp); 1190 1191 /*---------------------------------------------------------------------------*/ 1192 1193 /* All these synchronous SPI transfer routines are utilities layered 1194 * over the core async transfer primitive. Here, "synchronous" means 1195 * they will sleep uninterruptibly until the async transfer completes. 1196 */ 1197 1198 extern int spi_sync(struct spi_device *spi, struct spi_message *message); 1199 extern int spi_sync_locked(struct spi_device *spi, struct spi_message *message); 1200 extern int spi_bus_lock(struct spi_controller *ctlr); 1201 extern int spi_bus_unlock(struct spi_controller *ctlr); 1202 1203 /** 1204 * spi_sync_transfer - synchronous SPI data transfer 1205 * @spi: device with which data will be exchanged 1206 * @xfers: An array of spi_transfers 1207 * @num_xfers: Number of items in the xfer array 1208 * Context: can sleep 1209 * 1210 * Does a synchronous SPI data transfer of the given spi_transfer array. 1211 * 1212 * For more specific semantics see spi_sync(). 1213 * 1214 * Return: Return: zero on success, else a negative error code. 1215 */ 1216 static inline int 1217 spi_sync_transfer(struct spi_device *spi, struct spi_transfer *xfers, 1218 unsigned int num_xfers) 1219 { 1220 struct spi_message msg; 1221 1222 spi_message_init_with_transfers(&msg, xfers, num_xfers); 1223 1224 return spi_sync(spi, &msg); 1225 } 1226 1227 /** 1228 * spi_write - SPI synchronous write 1229 * @spi: device to which data will be written 1230 * @buf: data buffer 1231 * @len: data buffer size 1232 * Context: can sleep 1233 * 1234 * This function writes the buffer @buf. 1235 * Callable only from contexts that can sleep. 1236 * 1237 * Return: zero on success, else a negative error code. 1238 */ 1239 static inline int 1240 spi_write(struct spi_device *spi, const void *buf, size_t len) 1241 { 1242 struct spi_transfer t = { 1243 .tx_buf = buf, 1244 .len = len, 1245 }; 1246 1247 return spi_sync_transfer(spi, &t, 1); 1248 } 1249 1250 /** 1251 * spi_read - SPI synchronous read 1252 * @spi: device from which data will be read 1253 * @buf: data buffer 1254 * @len: data buffer size 1255 * Context: can sleep 1256 * 1257 * This function reads the buffer @buf. 1258 * Callable only from contexts that can sleep. 1259 * 1260 * Return: zero on success, else a negative error code. 1261 */ 1262 static inline int 1263 spi_read(struct spi_device *spi, void *buf, size_t len) 1264 { 1265 struct spi_transfer t = { 1266 .rx_buf = buf, 1267 .len = len, 1268 }; 1269 1270 return spi_sync_transfer(spi, &t, 1); 1271 } 1272 1273 /* this copies txbuf and rxbuf data; for small transfers only! */ 1274 extern int spi_write_then_read(struct spi_device *spi, 1275 const void *txbuf, unsigned n_tx, 1276 void *rxbuf, unsigned n_rx); 1277 1278 /** 1279 * spi_w8r8 - SPI synchronous 8 bit write followed by 8 bit read 1280 * @spi: device with which data will be exchanged 1281 * @cmd: command to be written before data is read back 1282 * Context: can sleep 1283 * 1284 * Callable only from contexts that can sleep. 1285 * 1286 * Return: the (unsigned) eight bit number returned by the 1287 * device, or else a negative error code. 1288 */ 1289 static inline ssize_t spi_w8r8(struct spi_device *spi, u8 cmd) 1290 { 1291 ssize_t status; 1292 u8 result; 1293 1294 status = spi_write_then_read(spi, &cmd, 1, &result, 1); 1295 1296 /* return negative errno or unsigned value */ 1297 return (status < 0) ? status : result; 1298 } 1299 1300 /** 1301 * spi_w8r16 - SPI synchronous 8 bit write followed by 16 bit read 1302 * @spi: device with which data will be exchanged 1303 * @cmd: command to be written before data is read back 1304 * Context: can sleep 1305 * 1306 * The number is returned in wire-order, which is at least sometimes 1307 * big-endian. 1308 * 1309 * Callable only from contexts that can sleep. 1310 * 1311 * Return: the (unsigned) sixteen bit number returned by the 1312 * device, or else a negative error code. 1313 */ 1314 static inline ssize_t spi_w8r16(struct spi_device *spi, u8 cmd) 1315 { 1316 ssize_t status; 1317 u16 result; 1318 1319 status = spi_write_then_read(spi, &cmd, 1, &result, 2); 1320 1321 /* return negative errno or unsigned value */ 1322 return (status < 0) ? status : result; 1323 } 1324 1325 /** 1326 * spi_w8r16be - SPI synchronous 8 bit write followed by 16 bit big-endian read 1327 * @spi: device with which data will be exchanged 1328 * @cmd: command to be written before data is read back 1329 * Context: can sleep 1330 * 1331 * This function is similar to spi_w8r16, with the exception that it will 1332 * convert the read 16 bit data word from big-endian to native endianness. 1333 * 1334 * Callable only from contexts that can sleep. 1335 * 1336 * Return: the (unsigned) sixteen bit number returned by the device in cpu 1337 * endianness, or else a negative error code. 1338 */ 1339 static inline ssize_t spi_w8r16be(struct spi_device *spi, u8 cmd) 1340 1341 { 1342 ssize_t status; 1343 __be16 result; 1344 1345 status = spi_write_then_read(spi, &cmd, 1, &result, 2); 1346 if (status < 0) 1347 return status; 1348 1349 return be16_to_cpu(result); 1350 } 1351 1352 /*---------------------------------------------------------------------------*/ 1353 1354 /* 1355 * INTERFACE between board init code and SPI infrastructure. 1356 * 1357 * No SPI driver ever sees these SPI device table segments, but 1358 * it's how the SPI core (or adapters that get hotplugged) grows 1359 * the driver model tree. 1360 * 1361 * As a rule, SPI devices can't be probed. Instead, board init code 1362 * provides a table listing the devices which are present, with enough 1363 * information to bind and set up the device's driver. There's basic 1364 * support for nonstatic configurations too; enough to handle adding 1365 * parport adapters, or microcontrollers acting as USB-to-SPI bridges. 1366 */ 1367 1368 /** 1369 * struct spi_board_info - board-specific template for a SPI device 1370 * @modalias: Initializes spi_device.modalias; identifies the driver. 1371 * @platform_data: Initializes spi_device.platform_data; the particular 1372 * data stored there is driver-specific. 1373 * @properties: Additional device properties for the device. 1374 * @controller_data: Initializes spi_device.controller_data; some 1375 * controllers need hints about hardware setup, e.g. for DMA. 1376 * @irq: Initializes spi_device.irq; depends on how the board is wired. 1377 * @max_speed_hz: Initializes spi_device.max_speed_hz; based on limits 1378 * from the chip datasheet and board-specific signal quality issues. 1379 * @bus_num: Identifies which spi_controller parents the spi_device; unused 1380 * by spi_new_device(), and otherwise depends on board wiring. 1381 * @chip_select: Initializes spi_device.chip_select; depends on how 1382 * the board is wired. 1383 * @mode: Initializes spi_device.mode; based on the chip datasheet, board 1384 * wiring (some devices support both 3WIRE and standard modes), and 1385 * possibly presence of an inverter in the chipselect path. 1386 * 1387 * When adding new SPI devices to the device tree, these structures serve 1388 * as a partial device template. They hold information which can't always 1389 * be determined by drivers. Information that probe() can establish (such 1390 * as the default transfer wordsize) is not included here. 1391 * 1392 * These structures are used in two places. Their primary role is to 1393 * be stored in tables of board-specific device descriptors, which are 1394 * declared early in board initialization and then used (much later) to 1395 * populate a controller's device tree after the that controller's driver 1396 * initializes. A secondary (and atypical) role is as a parameter to 1397 * spi_new_device() call, which happens after those controller drivers 1398 * are active in some dynamic board configuration models. 1399 */ 1400 struct spi_board_info { 1401 /* the device name and module name are coupled, like platform_bus; 1402 * "modalias" is normally the driver name. 1403 * 1404 * platform_data goes to spi_device.dev.platform_data, 1405 * controller_data goes to spi_device.controller_data, 1406 * device properties are copied and attached to spi_device, 1407 * irq is copied too 1408 */ 1409 char modalias[SPI_NAME_SIZE]; 1410 const void *platform_data; 1411 const struct property_entry *properties; 1412 void *controller_data; 1413 int irq; 1414 1415 /* slower signaling on noisy or low voltage boards */ 1416 u32 max_speed_hz; 1417 1418 1419 /* bus_num is board specific and matches the bus_num of some 1420 * spi_controller that will probably be registered later. 1421 * 1422 * chip_select reflects how this chip is wired to that master; 1423 * it's less than num_chipselect. 1424 */ 1425 u16 bus_num; 1426 u16 chip_select; 1427 1428 /* mode becomes spi_device.mode, and is essential for chips 1429 * where the default of SPI_CS_HIGH = 0 is wrong. 1430 */ 1431 u32 mode; 1432 1433 /* ... may need additional spi_device chip config data here. 1434 * avoid stuff protocol drivers can set; but include stuff 1435 * needed to behave without being bound to a driver: 1436 * - quirks like clock rate mattering when not selected 1437 */ 1438 }; 1439 1440 #ifdef CONFIG_SPI 1441 extern int 1442 spi_register_board_info(struct spi_board_info const *info, unsigned n); 1443 #else 1444 /* board init code may ignore whether SPI is configured or not */ 1445 static inline int 1446 spi_register_board_info(struct spi_board_info const *info, unsigned n) 1447 { return 0; } 1448 #endif 1449 1450 /* If you're hotplugging an adapter with devices (parport, usb, etc) 1451 * use spi_new_device() to describe each device. You can also call 1452 * spi_unregister_device() to start making that device vanish, but 1453 * normally that would be handled by spi_unregister_controller(). 1454 * 1455 * You can also use spi_alloc_device() and spi_add_device() to use a two 1456 * stage registration sequence for each spi_device. This gives the caller 1457 * some more control over the spi_device structure before it is registered, 1458 * but requires that caller to initialize fields that would otherwise 1459 * be defined using the board info. 1460 */ 1461 extern struct spi_device * 1462 spi_alloc_device(struct spi_controller *ctlr); 1463 1464 extern int 1465 spi_add_device(struct spi_device *spi); 1466 1467 extern struct spi_device * 1468 spi_new_device(struct spi_controller *, struct spi_board_info *); 1469 1470 extern void spi_unregister_device(struct spi_device *spi); 1471 1472 extern const struct spi_device_id * 1473 spi_get_device_id(const struct spi_device *sdev); 1474 1475 static inline bool 1476 spi_transfer_is_last(struct spi_controller *ctlr, struct spi_transfer *xfer) 1477 { 1478 return list_is_last(&xfer->transfer_list, &ctlr->cur_msg->transfers); 1479 } 1480 1481 /* OF support code */ 1482 #if IS_ENABLED(CONFIG_OF) 1483 1484 /* must call put_device() when done with returned spi_device device */ 1485 extern struct spi_device * 1486 of_find_spi_device_by_node(struct device_node *node); 1487 1488 #else 1489 1490 static inline struct spi_device * 1491 of_find_spi_device_by_node(struct device_node *node) 1492 { 1493 return NULL; 1494 } 1495 1496 #endif /* IS_ENABLED(CONFIG_OF) */ 1497 1498 /* Compatibility layer */ 1499 #define spi_master spi_controller 1500 1501 #define SPI_MASTER_HALF_DUPLEX SPI_CONTROLLER_HALF_DUPLEX 1502 #define SPI_MASTER_NO_RX SPI_CONTROLLER_NO_RX 1503 #define SPI_MASTER_NO_TX SPI_CONTROLLER_NO_TX 1504 #define SPI_MASTER_MUST_RX SPI_CONTROLLER_MUST_RX 1505 #define SPI_MASTER_MUST_TX SPI_CONTROLLER_MUST_TX 1506 1507 #define spi_master_get_devdata(_ctlr) spi_controller_get_devdata(_ctlr) 1508 #define spi_master_set_devdata(_ctlr, _data) \ 1509 spi_controller_set_devdata(_ctlr, _data) 1510 #define spi_master_get(_ctlr) spi_controller_get(_ctlr) 1511 #define spi_master_put(_ctlr) spi_controller_put(_ctlr) 1512 #define spi_master_suspend(_ctlr) spi_controller_suspend(_ctlr) 1513 #define spi_master_resume(_ctlr) spi_controller_resume(_ctlr) 1514 1515 #define spi_register_master(_ctlr) spi_register_controller(_ctlr) 1516 #define devm_spi_register_master(_dev, _ctlr) \ 1517 devm_spi_register_controller(_dev, _ctlr) 1518 #define spi_unregister_master(_ctlr) spi_unregister_controller(_ctlr) 1519 1520 #endif /* __LINUX_SPI_H */ 1521