1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * linux/include/linux/clk.h 4 * 5 * Copyright (C) 2004 ARM Limited. 6 * Written by Deep Blue Solutions Limited. 7 * Copyright (C) 2011-2012 Linaro Ltd <[email protected]> 8 */ 9 #ifndef __LINUX_CLK_H 10 #define __LINUX_CLK_H 11 12 #include <linux/err.h> 13 #include <linux/kernel.h> 14 #include <linux/notifier.h> 15 16 struct device; 17 struct clk; 18 struct device_node; 19 struct of_phandle_args; 20 21 /** 22 * DOC: clk notifier callback types 23 * 24 * PRE_RATE_CHANGE - called immediately before the clk rate is changed, 25 * to indicate that the rate change will proceed. Drivers must 26 * immediately terminate any operations that will be affected by the 27 * rate change. Callbacks may either return NOTIFY_DONE, NOTIFY_OK, 28 * NOTIFY_STOP or NOTIFY_BAD. 29 * 30 * ABORT_RATE_CHANGE: called if the rate change failed for some reason 31 * after PRE_RATE_CHANGE. In this case, all registered notifiers on 32 * the clk will be called with ABORT_RATE_CHANGE. Callbacks must 33 * always return NOTIFY_DONE or NOTIFY_OK. 34 * 35 * POST_RATE_CHANGE - called after the clk rate change has successfully 36 * completed. Callbacks must always return NOTIFY_DONE or NOTIFY_OK. 37 * 38 */ 39 #define PRE_RATE_CHANGE BIT(0) 40 #define POST_RATE_CHANGE BIT(1) 41 #define ABORT_RATE_CHANGE BIT(2) 42 43 /** 44 * struct clk_notifier - associate a clk with a notifier 45 * @clk: struct clk * to associate the notifier with 46 * @notifier_head: a blocking_notifier_head for this clk 47 * @node: linked list pointers 48 * 49 * A list of struct clk_notifier is maintained by the notifier code. 50 * An entry is created whenever code registers the first notifier on a 51 * particular @clk. Future notifiers on that @clk are added to the 52 * @notifier_head. 53 */ 54 struct clk_notifier { 55 struct clk *clk; 56 struct srcu_notifier_head notifier_head; 57 struct list_head node; 58 }; 59 60 /** 61 * struct clk_notifier_data - rate data to pass to the notifier callback 62 * @clk: struct clk * being changed 63 * @old_rate: previous rate of this clk 64 * @new_rate: new rate of this clk 65 * 66 * For a pre-notifier, old_rate is the clk's rate before this rate 67 * change, and new_rate is what the rate will be in the future. For a 68 * post-notifier, old_rate and new_rate are both set to the clk's 69 * current rate (this was done to optimize the implementation). 70 */ 71 struct clk_notifier_data { 72 struct clk *clk; 73 unsigned long old_rate; 74 unsigned long new_rate; 75 }; 76 77 /** 78 * struct clk_bulk_data - Data used for bulk clk operations. 79 * 80 * @id: clock consumer ID 81 * @clk: struct clk * to store the associated clock 82 * 83 * The CLK APIs provide a series of clk_bulk_() API calls as 84 * a convenience to consumers which require multiple clks. This 85 * structure is used to manage data for these calls. 86 */ 87 struct clk_bulk_data { 88 const char *id; 89 struct clk *clk; 90 }; 91 92 #ifdef CONFIG_COMMON_CLK 93 94 /** 95 * clk_notifier_register: register a clock rate-change notifier callback 96 * @clk: clock whose rate we are interested in 97 * @nb: notifier block with callback function pointer 98 * 99 * ProTip: debugging across notifier chains can be frustrating. Make sure that 100 * your notifier callback function prints a nice big warning in case of 101 * failure. 102 */ 103 int clk_notifier_register(struct clk *clk, struct notifier_block *nb); 104 105 /** 106 * clk_notifier_unregister: unregister a clock rate-change notifier callback 107 * @clk: clock whose rate we are no longer interested in 108 * @nb: notifier block which will be unregistered 109 */ 110 int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb); 111 112 /** 113 * clk_get_accuracy - obtain the clock accuracy in ppb (parts per billion) 114 * for a clock source. 115 * @clk: clock source 116 * 117 * This gets the clock source accuracy expressed in ppb. 118 * A perfect clock returns 0. 119 */ 120 long clk_get_accuracy(struct clk *clk); 121 122 /** 123 * clk_set_phase - adjust the phase shift of a clock signal 124 * @clk: clock signal source 125 * @degrees: number of degrees the signal is shifted 126 * 127 * Shifts the phase of a clock signal by the specified degrees. Returns 0 on 128 * success, -EERROR otherwise. 129 */ 130 int clk_set_phase(struct clk *clk, int degrees); 131 132 /** 133 * clk_get_phase - return the phase shift of a clock signal 134 * @clk: clock signal source 135 * 136 * Returns the phase shift of a clock node in degrees, otherwise returns 137 * -EERROR. 138 */ 139 int clk_get_phase(struct clk *clk); 140 141 /** 142 * clk_set_duty_cycle - adjust the duty cycle ratio of a clock signal 143 * @clk: clock signal source 144 * @num: numerator of the duty cycle ratio to be applied 145 * @den: denominator of the duty cycle ratio to be applied 146 * 147 * Adjust the duty cycle of a clock signal by the specified ratio. Returns 0 on 148 * success, -EERROR otherwise. 149 */ 150 int clk_set_duty_cycle(struct clk *clk, unsigned int num, unsigned int den); 151 152 /** 153 * clk_get_duty_cycle - return the duty cycle ratio of a clock signal 154 * @clk: clock signal source 155 * @scale: scaling factor to be applied to represent the ratio as an integer 156 * 157 * Returns the duty cycle ratio multiplied by the scale provided, otherwise 158 * returns -EERROR. 159 */ 160 int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale); 161 162 /** 163 * clk_is_match - check if two clk's point to the same hardware clock 164 * @p: clk compared against q 165 * @q: clk compared against p 166 * 167 * Returns true if the two struct clk pointers both point to the same hardware 168 * clock node. Put differently, returns true if @p and @q 169 * share the same &struct clk_core object. 170 * 171 * Returns false otherwise. Note that two NULL clks are treated as matching. 172 */ 173 bool clk_is_match(const struct clk *p, const struct clk *q); 174 175 #else 176 177 static inline int clk_notifier_register(struct clk *clk, 178 struct notifier_block *nb) 179 { 180 return -ENOTSUPP; 181 } 182 183 static inline int clk_notifier_unregister(struct clk *clk, 184 struct notifier_block *nb) 185 { 186 return -ENOTSUPP; 187 } 188 189 static inline long clk_get_accuracy(struct clk *clk) 190 { 191 return -ENOTSUPP; 192 } 193 194 static inline long clk_set_phase(struct clk *clk, int phase) 195 { 196 return -ENOTSUPP; 197 } 198 199 static inline long clk_get_phase(struct clk *clk) 200 { 201 return -ENOTSUPP; 202 } 203 204 static inline int clk_set_duty_cycle(struct clk *clk, unsigned int num, 205 unsigned int den) 206 { 207 return -ENOTSUPP; 208 } 209 210 static inline unsigned int clk_get_scaled_duty_cycle(struct clk *clk, 211 unsigned int scale) 212 { 213 return 0; 214 } 215 216 static inline bool clk_is_match(const struct clk *p, const struct clk *q) 217 { 218 return p == q; 219 } 220 221 #endif 222 223 /** 224 * clk_prepare - prepare a clock source 225 * @clk: clock source 226 * 227 * This prepares the clock source for use. 228 * 229 * Must not be called from within atomic context. 230 */ 231 #ifdef CONFIG_HAVE_CLK_PREPARE 232 int clk_prepare(struct clk *clk); 233 int __must_check clk_bulk_prepare(int num_clks, 234 const struct clk_bulk_data *clks); 235 #else 236 static inline int clk_prepare(struct clk *clk) 237 { 238 might_sleep(); 239 return 0; 240 } 241 242 static inline int __must_check clk_bulk_prepare(int num_clks, struct clk_bulk_data *clks) 243 { 244 might_sleep(); 245 return 0; 246 } 247 #endif 248 249 /** 250 * clk_unprepare - undo preparation of a clock source 251 * @clk: clock source 252 * 253 * This undoes a previously prepared clock. The caller must balance 254 * the number of prepare and unprepare calls. 255 * 256 * Must not be called from within atomic context. 257 */ 258 #ifdef CONFIG_HAVE_CLK_PREPARE 259 void clk_unprepare(struct clk *clk); 260 void clk_bulk_unprepare(int num_clks, const struct clk_bulk_data *clks); 261 #else 262 static inline void clk_unprepare(struct clk *clk) 263 { 264 might_sleep(); 265 } 266 static inline void clk_bulk_unprepare(int num_clks, struct clk_bulk_data *clks) 267 { 268 might_sleep(); 269 } 270 #endif 271 272 #ifdef CONFIG_HAVE_CLK 273 /** 274 * clk_get - lookup and obtain a reference to a clock producer. 275 * @dev: device for clock "consumer" 276 * @id: clock consumer ID 277 * 278 * Returns a struct clk corresponding to the clock producer, or 279 * valid IS_ERR() condition containing errno. The implementation 280 * uses @dev and @id to determine the clock consumer, and thereby 281 * the clock producer. (IOW, @id may be identical strings, but 282 * clk_get may return different clock producers depending on @dev.) 283 * 284 * Drivers must assume that the clock source is not enabled. 285 * 286 * clk_get should not be called from within interrupt context. 287 */ 288 struct clk *clk_get(struct device *dev, const char *id); 289 290 /** 291 * clk_bulk_get - lookup and obtain a number of references to clock producer. 292 * @dev: device for clock "consumer" 293 * @num_clks: the number of clk_bulk_data 294 * @clks: the clk_bulk_data table of consumer 295 * 296 * This helper function allows drivers to get several clk consumers in one 297 * operation. If any of the clk cannot be acquired then any clks 298 * that were obtained will be freed before returning to the caller. 299 * 300 * Returns 0 if all clocks specified in clk_bulk_data table are obtained 301 * successfully, or valid IS_ERR() condition containing errno. 302 * The implementation uses @dev and @clk_bulk_data.id to determine the 303 * clock consumer, and thereby the clock producer. 304 * The clock returned is stored in each @clk_bulk_data.clk field. 305 * 306 * Drivers must assume that the clock source is not enabled. 307 * 308 * clk_bulk_get should not be called from within interrupt context. 309 */ 310 int __must_check clk_bulk_get(struct device *dev, int num_clks, 311 struct clk_bulk_data *clks); 312 /** 313 * clk_bulk_get_all - lookup and obtain all available references to clock 314 * producer. 315 * @dev: device for clock "consumer" 316 * @clks: pointer to the clk_bulk_data table of consumer 317 * 318 * This helper function allows drivers to get all clk consumers in one 319 * operation. If any of the clk cannot be acquired then any clks 320 * that were obtained will be freed before returning to the caller. 321 * 322 * Returns a positive value for the number of clocks obtained while the 323 * clock references are stored in the clk_bulk_data table in @clks field. 324 * Returns 0 if there're none and a negative value if something failed. 325 * 326 * Drivers must assume that the clock source is not enabled. 327 * 328 * clk_bulk_get should not be called from within interrupt context. 329 */ 330 int __must_check clk_bulk_get_all(struct device *dev, 331 struct clk_bulk_data **clks); 332 333 /** 334 * clk_bulk_get_optional - lookup and obtain a number of references to clock producer 335 * @dev: device for clock "consumer" 336 * @num_clks: the number of clk_bulk_data 337 * @clks: the clk_bulk_data table of consumer 338 * 339 * Behaves the same as clk_bulk_get() except where there is no clock producer. 340 * In this case, instead of returning -ENOENT, the function returns 0 and 341 * NULL for a clk for which a clock producer could not be determined. 342 */ 343 int __must_check clk_bulk_get_optional(struct device *dev, int num_clks, 344 struct clk_bulk_data *clks); 345 /** 346 * devm_clk_bulk_get - managed get multiple clk consumers 347 * @dev: device for clock "consumer" 348 * @num_clks: the number of clk_bulk_data 349 * @clks: the clk_bulk_data table of consumer 350 * 351 * Return 0 on success, an errno on failure. 352 * 353 * This helper function allows drivers to get several clk 354 * consumers in one operation with management, the clks will 355 * automatically be freed when the device is unbound. 356 */ 357 int __must_check devm_clk_bulk_get(struct device *dev, int num_clks, 358 struct clk_bulk_data *clks); 359 /** 360 * devm_clk_bulk_get_optional - managed get multiple optional consumer clocks 361 * @dev: device for clock "consumer" 362 * @clks: pointer to the clk_bulk_data table of consumer 363 * 364 * Behaves the same as devm_clk_bulk_get() except where there is no clock 365 * producer. In this case, instead of returning -ENOENT, the function returns 366 * NULL for given clk. It is assumed all clocks in clk_bulk_data are optional. 367 * 368 * Returns 0 if all clocks specified in clk_bulk_data table are obtained 369 * successfully or for any clk there was no clk provider available, otherwise 370 * returns valid IS_ERR() condition containing errno. 371 * The implementation uses @dev and @clk_bulk_data.id to determine the 372 * clock consumer, and thereby the clock producer. 373 * The clock returned is stored in each @clk_bulk_data.clk field. 374 * 375 * Drivers must assume that the clock source is not enabled. 376 * 377 * clk_bulk_get should not be called from within interrupt context. 378 */ 379 int __must_check devm_clk_bulk_get_optional(struct device *dev, int num_clks, 380 struct clk_bulk_data *clks); 381 /** 382 * devm_clk_bulk_get_all - managed get multiple clk consumers 383 * @dev: device for clock "consumer" 384 * @clks: pointer to the clk_bulk_data table of consumer 385 * 386 * Returns a positive value for the number of clocks obtained while the 387 * clock references are stored in the clk_bulk_data table in @clks field. 388 * Returns 0 if there're none and a negative value if something failed. 389 * 390 * This helper function allows drivers to get several clk 391 * consumers in one operation with management, the clks will 392 * automatically be freed when the device is unbound. 393 */ 394 395 int __must_check devm_clk_bulk_get_all(struct device *dev, 396 struct clk_bulk_data **clks); 397 398 /** 399 * devm_clk_get - lookup and obtain a managed reference to a clock producer. 400 * @dev: device for clock "consumer" 401 * @id: clock consumer ID 402 * 403 * Returns a struct clk corresponding to the clock producer, or 404 * valid IS_ERR() condition containing errno. The implementation 405 * uses @dev and @id to determine the clock consumer, and thereby 406 * the clock producer. (IOW, @id may be identical strings, but 407 * clk_get may return different clock producers depending on @dev.) 408 * 409 * Drivers must assume that the clock source is not enabled. 410 * 411 * devm_clk_get should not be called from within interrupt context. 412 * 413 * The clock will automatically be freed when the device is unbound 414 * from the bus. 415 */ 416 struct clk *devm_clk_get(struct device *dev, const char *id); 417 418 /** 419 * devm_clk_get_optional - lookup and obtain a managed reference to an optional 420 * clock producer. 421 * @dev: device for clock "consumer" 422 * @id: clock consumer ID 423 * 424 * Behaves the same as devm_clk_get() except where there is no clock producer. 425 * In this case, instead of returning -ENOENT, the function returns NULL. 426 */ 427 struct clk *devm_clk_get_optional(struct device *dev, const char *id); 428 429 /** 430 * devm_get_clk_from_child - lookup and obtain a managed reference to a 431 * clock producer from child node. 432 * @dev: device for clock "consumer" 433 * @np: pointer to clock consumer node 434 * @con_id: clock consumer ID 435 * 436 * This function parses the clocks, and uses them to look up the 437 * struct clk from the registered list of clock providers by using 438 * @np and @con_id 439 * 440 * The clock will automatically be freed when the device is unbound 441 * from the bus. 442 */ 443 struct clk *devm_get_clk_from_child(struct device *dev, 444 struct device_node *np, const char *con_id); 445 /** 446 * clk_rate_exclusive_get - get exclusivity over the rate control of a 447 * producer 448 * @clk: clock source 449 * 450 * This function allows drivers to get exclusive control over the rate of a 451 * provider. It prevents any other consumer to execute, even indirectly, 452 * opereation which could alter the rate of the provider or cause glitches 453 * 454 * If exlusivity is claimed more than once on clock, even by the same driver, 455 * the rate effectively gets locked as exclusivity can't be preempted. 456 * 457 * Must not be called from within atomic context. 458 * 459 * Returns success (0) or negative errno. 460 */ 461 int clk_rate_exclusive_get(struct clk *clk); 462 463 /** 464 * clk_rate_exclusive_put - release exclusivity over the rate control of a 465 * producer 466 * @clk: clock source 467 * 468 * This function allows drivers to release the exclusivity it previously got 469 * from clk_rate_exclusive_get() 470 * 471 * The caller must balance the number of clk_rate_exclusive_get() and 472 * clk_rate_exclusive_put() calls. 473 * 474 * Must not be called from within atomic context. 475 */ 476 void clk_rate_exclusive_put(struct clk *clk); 477 478 /** 479 * clk_enable - inform the system when the clock source should be running. 480 * @clk: clock source 481 * 482 * If the clock can not be enabled/disabled, this should return success. 483 * 484 * May be called from atomic contexts. 485 * 486 * Returns success (0) or negative errno. 487 */ 488 int clk_enable(struct clk *clk); 489 490 /** 491 * clk_bulk_enable - inform the system when the set of clks should be running. 492 * @num_clks: the number of clk_bulk_data 493 * @clks: the clk_bulk_data table of consumer 494 * 495 * May be called from atomic contexts. 496 * 497 * Returns success (0) or negative errno. 498 */ 499 int __must_check clk_bulk_enable(int num_clks, 500 const struct clk_bulk_data *clks); 501 502 /** 503 * clk_disable - inform the system when the clock source is no longer required. 504 * @clk: clock source 505 * 506 * Inform the system that a clock source is no longer required by 507 * a driver and may be shut down. 508 * 509 * May be called from atomic contexts. 510 * 511 * Implementation detail: if the clock source is shared between 512 * multiple drivers, clk_enable() calls must be balanced by the 513 * same number of clk_disable() calls for the clock source to be 514 * disabled. 515 */ 516 void clk_disable(struct clk *clk); 517 518 /** 519 * clk_bulk_disable - inform the system when the set of clks is no 520 * longer required. 521 * @num_clks: the number of clk_bulk_data 522 * @clks: the clk_bulk_data table of consumer 523 * 524 * Inform the system that a set of clks is no longer required by 525 * a driver and may be shut down. 526 * 527 * May be called from atomic contexts. 528 * 529 * Implementation detail: if the set of clks is shared between 530 * multiple drivers, clk_bulk_enable() calls must be balanced by the 531 * same number of clk_bulk_disable() calls for the clock source to be 532 * disabled. 533 */ 534 void clk_bulk_disable(int num_clks, const struct clk_bulk_data *clks); 535 536 /** 537 * clk_get_rate - obtain the current clock rate (in Hz) for a clock source. 538 * This is only valid once the clock source has been enabled. 539 * @clk: clock source 540 */ 541 unsigned long clk_get_rate(struct clk *clk); 542 543 /** 544 * clk_put - "free" the clock source 545 * @clk: clock source 546 * 547 * Note: drivers must ensure that all clk_enable calls made on this 548 * clock source are balanced by clk_disable calls prior to calling 549 * this function. 550 * 551 * clk_put should not be called from within interrupt context. 552 */ 553 void clk_put(struct clk *clk); 554 555 /** 556 * clk_bulk_put - "free" the clock source 557 * @num_clks: the number of clk_bulk_data 558 * @clks: the clk_bulk_data table of consumer 559 * 560 * Note: drivers must ensure that all clk_bulk_enable calls made on this 561 * clock source are balanced by clk_bulk_disable calls prior to calling 562 * this function. 563 * 564 * clk_bulk_put should not be called from within interrupt context. 565 */ 566 void clk_bulk_put(int num_clks, struct clk_bulk_data *clks); 567 568 /** 569 * clk_bulk_put_all - "free" all the clock source 570 * @num_clks: the number of clk_bulk_data 571 * @clks: the clk_bulk_data table of consumer 572 * 573 * Note: drivers must ensure that all clk_bulk_enable calls made on this 574 * clock source are balanced by clk_bulk_disable calls prior to calling 575 * this function. 576 * 577 * clk_bulk_put_all should not be called from within interrupt context. 578 */ 579 void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks); 580 581 /** 582 * devm_clk_put - "free" a managed clock source 583 * @dev: device used to acquire the clock 584 * @clk: clock source acquired with devm_clk_get() 585 * 586 * Note: drivers must ensure that all clk_enable calls made on this 587 * clock source are balanced by clk_disable calls prior to calling 588 * this function. 589 * 590 * clk_put should not be called from within interrupt context. 591 */ 592 void devm_clk_put(struct device *dev, struct clk *clk); 593 594 /* 595 * The remaining APIs are optional for machine class support. 596 */ 597 598 599 /** 600 * clk_round_rate - adjust a rate to the exact rate a clock can provide 601 * @clk: clock source 602 * @rate: desired clock rate in Hz 603 * 604 * This answers the question "if I were to pass @rate to clk_set_rate(), 605 * what clock rate would I end up with?" without changing the hardware 606 * in any way. In other words: 607 * 608 * rate = clk_round_rate(clk, r); 609 * 610 * and: 611 * 612 * clk_set_rate(clk, r); 613 * rate = clk_get_rate(clk); 614 * 615 * are equivalent except the former does not modify the clock hardware 616 * in any way. 617 * 618 * Returns rounded clock rate in Hz, or negative errno. 619 */ 620 long clk_round_rate(struct clk *clk, unsigned long rate); 621 622 /** 623 * clk_set_rate - set the clock rate for a clock source 624 * @clk: clock source 625 * @rate: desired clock rate in Hz 626 * 627 * Returns success (0) or negative errno. 628 */ 629 int clk_set_rate(struct clk *clk, unsigned long rate); 630 631 /** 632 * clk_set_rate_exclusive- set the clock rate and claim exclusivity over 633 * clock source 634 * @clk: clock source 635 * @rate: desired clock rate in Hz 636 * 637 * This helper function allows drivers to atomically set the rate of a producer 638 * and claim exclusivity over the rate control of the producer. 639 * 640 * It is essentially a combination of clk_set_rate() and 641 * clk_rate_exclusite_get(). Caller must balance this call with a call to 642 * clk_rate_exclusive_put() 643 * 644 * Returns success (0) or negative errno. 645 */ 646 int clk_set_rate_exclusive(struct clk *clk, unsigned long rate); 647 648 /** 649 * clk_has_parent - check if a clock is a possible parent for another 650 * @clk: clock source 651 * @parent: parent clock source 652 * 653 * This function can be used in drivers that need to check that a clock can be 654 * the parent of another without actually changing the parent. 655 * 656 * Returns true if @parent is a possible parent for @clk, false otherwise. 657 */ 658 bool clk_has_parent(struct clk *clk, struct clk *parent); 659 660 /** 661 * clk_set_rate_range - set a rate range for a clock source 662 * @clk: clock source 663 * @min: desired minimum clock rate in Hz, inclusive 664 * @max: desired maximum clock rate in Hz, inclusive 665 * 666 * Returns success (0) or negative errno. 667 */ 668 int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max); 669 670 /** 671 * clk_set_min_rate - set a minimum clock rate for a clock source 672 * @clk: clock source 673 * @rate: desired minimum clock rate in Hz, inclusive 674 * 675 * Returns success (0) or negative errno. 676 */ 677 int clk_set_min_rate(struct clk *clk, unsigned long rate); 678 679 /** 680 * clk_set_max_rate - set a maximum clock rate for a clock source 681 * @clk: clock source 682 * @rate: desired maximum clock rate in Hz, inclusive 683 * 684 * Returns success (0) or negative errno. 685 */ 686 int clk_set_max_rate(struct clk *clk, unsigned long rate); 687 688 /** 689 * clk_set_parent - set the parent clock source for this clock 690 * @clk: clock source 691 * @parent: parent clock source 692 * 693 * Returns success (0) or negative errno. 694 */ 695 int clk_set_parent(struct clk *clk, struct clk *parent); 696 697 /** 698 * clk_get_parent - get the parent clock source for this clock 699 * @clk: clock source 700 * 701 * Returns struct clk corresponding to parent clock source, or 702 * valid IS_ERR() condition containing errno. 703 */ 704 struct clk *clk_get_parent(struct clk *clk); 705 706 /** 707 * clk_get_sys - get a clock based upon the device name 708 * @dev_id: device name 709 * @con_id: connection ID 710 * 711 * Returns a struct clk corresponding to the clock producer, or 712 * valid IS_ERR() condition containing errno. The implementation 713 * uses @dev_id and @con_id to determine the clock consumer, and 714 * thereby the clock producer. In contrast to clk_get() this function 715 * takes the device name instead of the device itself for identification. 716 * 717 * Drivers must assume that the clock source is not enabled. 718 * 719 * clk_get_sys should not be called from within interrupt context. 720 */ 721 struct clk *clk_get_sys(const char *dev_id, const char *con_id); 722 723 /** 724 * clk_save_context - save clock context for poweroff 725 * 726 * Saves the context of the clock register for powerstates in which the 727 * contents of the registers will be lost. Occurs deep within the suspend 728 * code so locking is not necessary. 729 */ 730 int clk_save_context(void); 731 732 /** 733 * clk_restore_context - restore clock context after poweroff 734 * 735 * This occurs with all clocks enabled. Occurs deep within the resume code 736 * so locking is not necessary. 737 */ 738 void clk_restore_context(void); 739 740 #else /* !CONFIG_HAVE_CLK */ 741 742 static inline struct clk *clk_get(struct device *dev, const char *id) 743 { 744 return NULL; 745 } 746 747 static inline int __must_check clk_bulk_get(struct device *dev, int num_clks, 748 struct clk_bulk_data *clks) 749 { 750 return 0; 751 } 752 753 static inline int __must_check clk_bulk_get_optional(struct device *dev, 754 int num_clks, struct clk_bulk_data *clks) 755 { 756 return 0; 757 } 758 759 static inline int __must_check clk_bulk_get_all(struct device *dev, 760 struct clk_bulk_data **clks) 761 { 762 return 0; 763 } 764 765 static inline struct clk *devm_clk_get(struct device *dev, const char *id) 766 { 767 return NULL; 768 } 769 770 static inline struct clk *devm_clk_get_optional(struct device *dev, 771 const char *id) 772 { 773 return NULL; 774 } 775 776 static inline int __must_check devm_clk_bulk_get(struct device *dev, int num_clks, 777 struct clk_bulk_data *clks) 778 { 779 return 0; 780 } 781 782 static inline int __must_check devm_clk_bulk_get_optional(struct device *dev, 783 int num_clks, struct clk_bulk_data *clks) 784 { 785 return 0; 786 } 787 788 static inline int __must_check devm_clk_bulk_get_all(struct device *dev, 789 struct clk_bulk_data **clks) 790 { 791 792 return 0; 793 } 794 795 static inline struct clk *devm_get_clk_from_child(struct device *dev, 796 struct device_node *np, const char *con_id) 797 { 798 return NULL; 799 } 800 801 static inline void clk_put(struct clk *clk) {} 802 803 static inline void clk_bulk_put(int num_clks, struct clk_bulk_data *clks) {} 804 805 static inline void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks) {} 806 807 static inline void devm_clk_put(struct device *dev, struct clk *clk) {} 808 809 810 static inline int clk_rate_exclusive_get(struct clk *clk) 811 { 812 return 0; 813 } 814 815 static inline void clk_rate_exclusive_put(struct clk *clk) {} 816 817 static inline int clk_enable(struct clk *clk) 818 { 819 return 0; 820 } 821 822 static inline int __must_check clk_bulk_enable(int num_clks, struct clk_bulk_data *clks) 823 { 824 return 0; 825 } 826 827 static inline void clk_disable(struct clk *clk) {} 828 829 830 static inline void clk_bulk_disable(int num_clks, 831 struct clk_bulk_data *clks) {} 832 833 static inline unsigned long clk_get_rate(struct clk *clk) 834 { 835 return 0; 836 } 837 838 static inline int clk_set_rate(struct clk *clk, unsigned long rate) 839 { 840 return 0; 841 } 842 843 static inline int clk_set_rate_exclusive(struct clk *clk, unsigned long rate) 844 { 845 return 0; 846 } 847 848 static inline long clk_round_rate(struct clk *clk, unsigned long rate) 849 { 850 return 0; 851 } 852 853 static inline bool clk_has_parent(struct clk *clk, struct clk *parent) 854 { 855 return true; 856 } 857 858 static inline int clk_set_rate_range(struct clk *clk, unsigned long min, 859 unsigned long max) 860 { 861 return 0; 862 } 863 864 static inline int clk_set_min_rate(struct clk *clk, unsigned long rate) 865 { 866 return 0; 867 } 868 869 static inline int clk_set_max_rate(struct clk *clk, unsigned long rate) 870 { 871 return 0; 872 } 873 874 static inline int clk_set_parent(struct clk *clk, struct clk *parent) 875 { 876 return 0; 877 } 878 879 static inline struct clk *clk_get_parent(struct clk *clk) 880 { 881 return NULL; 882 } 883 884 static inline struct clk *clk_get_sys(const char *dev_id, const char *con_id) 885 { 886 return NULL; 887 } 888 889 static inline int clk_save_context(void) 890 { 891 return 0; 892 } 893 894 static inline void clk_restore_context(void) {} 895 896 #endif 897 898 /* clk_prepare_enable helps cases using clk_enable in non-atomic context. */ 899 static inline int clk_prepare_enable(struct clk *clk) 900 { 901 int ret; 902 903 ret = clk_prepare(clk); 904 if (ret) 905 return ret; 906 ret = clk_enable(clk); 907 if (ret) 908 clk_unprepare(clk); 909 910 return ret; 911 } 912 913 /* clk_disable_unprepare helps cases using clk_disable in non-atomic context. */ 914 static inline void clk_disable_unprepare(struct clk *clk) 915 { 916 clk_disable(clk); 917 clk_unprepare(clk); 918 } 919 920 static inline int __must_check clk_bulk_prepare_enable(int num_clks, 921 struct clk_bulk_data *clks) 922 { 923 int ret; 924 925 ret = clk_bulk_prepare(num_clks, clks); 926 if (ret) 927 return ret; 928 ret = clk_bulk_enable(num_clks, clks); 929 if (ret) 930 clk_bulk_unprepare(num_clks, clks); 931 932 return ret; 933 } 934 935 static inline void clk_bulk_disable_unprepare(int num_clks, 936 struct clk_bulk_data *clks) 937 { 938 clk_bulk_disable(num_clks, clks); 939 clk_bulk_unprepare(num_clks, clks); 940 } 941 942 /** 943 * clk_get_optional - lookup and obtain a reference to an optional clock 944 * producer. 945 * @dev: device for clock "consumer" 946 * @id: clock consumer ID 947 * 948 * Behaves the same as clk_get() except where there is no clock producer. In 949 * this case, instead of returning -ENOENT, the function returns NULL. 950 */ 951 static inline struct clk *clk_get_optional(struct device *dev, const char *id) 952 { 953 struct clk *clk = clk_get(dev, id); 954 955 if (clk == ERR_PTR(-ENOENT)) 956 return NULL; 957 958 return clk; 959 } 960 961 #if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK) 962 struct clk *of_clk_get(struct device_node *np, int index); 963 struct clk *of_clk_get_by_name(struct device_node *np, const char *name); 964 struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec); 965 #else 966 static inline struct clk *of_clk_get(struct device_node *np, int index) 967 { 968 return ERR_PTR(-ENOENT); 969 } 970 static inline struct clk *of_clk_get_by_name(struct device_node *np, 971 const char *name) 972 { 973 return ERR_PTR(-ENOENT); 974 } 975 static inline struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec) 976 { 977 return ERR_PTR(-ENOENT); 978 } 979 #endif 980 981 #endif 982