1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * linux/include/linux/clk.h 4 * 5 * Copyright (C) 2004 ARM Limited. 6 * Written by Deep Blue Solutions Limited. 7 * Copyright (C) 2011-2012 Linaro Ltd <[email protected]> 8 */ 9 #ifndef __LINUX_CLK_H 10 #define __LINUX_CLK_H 11 12 #include <linux/err.h> 13 #include <linux/kernel.h> 14 #include <linux/notifier.h> 15 16 struct device; 17 struct clk; 18 struct device_node; 19 struct of_phandle_args; 20 21 /** 22 * DOC: clk notifier callback types 23 * 24 * PRE_RATE_CHANGE - called immediately before the clk rate is changed, 25 * to indicate that the rate change will proceed. Drivers must 26 * immediately terminate any operations that will be affected by the 27 * rate change. Callbacks may either return NOTIFY_DONE, NOTIFY_OK, 28 * NOTIFY_STOP or NOTIFY_BAD. 29 * 30 * ABORT_RATE_CHANGE: called if the rate change failed for some reason 31 * after PRE_RATE_CHANGE. In this case, all registered notifiers on 32 * the clk will be called with ABORT_RATE_CHANGE. Callbacks must 33 * always return NOTIFY_DONE or NOTIFY_OK. 34 * 35 * POST_RATE_CHANGE - called after the clk rate change has successfully 36 * completed. Callbacks must always return NOTIFY_DONE or NOTIFY_OK. 37 * 38 */ 39 #define PRE_RATE_CHANGE BIT(0) 40 #define POST_RATE_CHANGE BIT(1) 41 #define ABORT_RATE_CHANGE BIT(2) 42 43 /** 44 * struct clk_notifier - associate a clk with a notifier 45 * @clk: struct clk * to associate the notifier with 46 * @notifier_head: a blocking_notifier_head for this clk 47 * @node: linked list pointers 48 * 49 * A list of struct clk_notifier is maintained by the notifier code. 50 * An entry is created whenever code registers the first notifier on a 51 * particular @clk. Future notifiers on that @clk are added to the 52 * @notifier_head. 53 */ 54 struct clk_notifier { 55 struct clk *clk; 56 struct srcu_notifier_head notifier_head; 57 struct list_head node; 58 }; 59 60 /** 61 * struct clk_notifier_data - rate data to pass to the notifier callback 62 * @clk: struct clk * being changed 63 * @old_rate: previous rate of this clk 64 * @new_rate: new rate of this clk 65 * 66 * For a pre-notifier, old_rate is the clk's rate before this rate 67 * change, and new_rate is what the rate will be in the future. For a 68 * post-notifier, old_rate and new_rate are both set to the clk's 69 * current rate (this was done to optimize the implementation). 70 */ 71 struct clk_notifier_data { 72 struct clk *clk; 73 unsigned long old_rate; 74 unsigned long new_rate; 75 }; 76 77 /** 78 * struct clk_bulk_data - Data used for bulk clk operations. 79 * 80 * @id: clock consumer ID 81 * @clk: struct clk * to store the associated clock 82 * 83 * The CLK APIs provide a series of clk_bulk_() API calls as 84 * a convenience to consumers which require multiple clks. This 85 * structure is used to manage data for these calls. 86 */ 87 struct clk_bulk_data { 88 const char *id; 89 struct clk *clk; 90 }; 91 92 #ifdef CONFIG_COMMON_CLK 93 94 /** 95 * clk_notifier_register - register a clock rate-change notifier callback 96 * @clk: clock whose rate we are interested in 97 * @nb: notifier block with callback function pointer 98 * 99 * ProTip: debugging across notifier chains can be frustrating. Make sure that 100 * your notifier callback function prints a nice big warning in case of 101 * failure. 102 */ 103 int clk_notifier_register(struct clk *clk, struct notifier_block *nb); 104 105 /** 106 * clk_notifier_unregister - unregister a clock rate-change notifier callback 107 * @clk: clock whose rate we are no longer interested in 108 * @nb: notifier block which will be unregistered 109 */ 110 int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb); 111 112 /** 113 * devm_clk_notifier_register - register a managed rate-change notifier callback 114 * @dev: device for clock "consumer" 115 * @clk: clock whose rate we are interested in 116 * @nb: notifier block with callback function pointer 117 * 118 * Returns 0 on success, -EERROR otherwise 119 */ 120 int devm_clk_notifier_register(struct device *dev, struct clk *clk, 121 struct notifier_block *nb); 122 123 /** 124 * clk_get_accuracy - obtain the clock accuracy in ppb (parts per billion) 125 * for a clock source. 126 * @clk: clock source 127 * 128 * This gets the clock source accuracy expressed in ppb. 129 * A perfect clock returns 0. 130 */ 131 long clk_get_accuracy(struct clk *clk); 132 133 /** 134 * clk_set_phase - adjust the phase shift of a clock signal 135 * @clk: clock signal source 136 * @degrees: number of degrees the signal is shifted 137 * 138 * Shifts the phase of a clock signal by the specified degrees. Returns 0 on 139 * success, -EERROR otherwise. 140 */ 141 int clk_set_phase(struct clk *clk, int degrees); 142 143 /** 144 * clk_get_phase - return the phase shift of a clock signal 145 * @clk: clock signal source 146 * 147 * Returns the phase shift of a clock node in degrees, otherwise returns 148 * -EERROR. 149 */ 150 int clk_get_phase(struct clk *clk); 151 152 /** 153 * clk_set_duty_cycle - adjust the duty cycle ratio of a clock signal 154 * @clk: clock signal source 155 * @num: numerator of the duty cycle ratio to be applied 156 * @den: denominator of the duty cycle ratio to be applied 157 * 158 * Adjust the duty cycle of a clock signal by the specified ratio. Returns 0 on 159 * success, -EERROR otherwise. 160 */ 161 int clk_set_duty_cycle(struct clk *clk, unsigned int num, unsigned int den); 162 163 /** 164 * clk_get_scaled_duty_cycle - return the duty cycle ratio of a clock signal 165 * @clk: clock signal source 166 * @scale: scaling factor to be applied to represent the ratio as an integer 167 * 168 * Returns the duty cycle ratio multiplied by the scale provided, otherwise 169 * returns -EERROR. 170 */ 171 int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale); 172 173 /** 174 * clk_is_match - check if two clk's point to the same hardware clock 175 * @p: clk compared against q 176 * @q: clk compared against p 177 * 178 * Returns true if the two struct clk pointers both point to the same hardware 179 * clock node. Put differently, returns true if @p and @q 180 * share the same &struct clk_core object. 181 * 182 * Returns false otherwise. Note that two NULL clks are treated as matching. 183 */ 184 bool clk_is_match(const struct clk *p, const struct clk *q); 185 186 /** 187 * clk_rate_exclusive_get - get exclusivity over the rate control of a 188 * producer 189 * @clk: clock source 190 * 191 * This function allows drivers to get exclusive control over the rate of a 192 * provider. It prevents any other consumer to execute, even indirectly, 193 * opereation which could alter the rate of the provider or cause glitches 194 * 195 * If exlusivity is claimed more than once on clock, even by the same driver, 196 * the rate effectively gets locked as exclusivity can't be preempted. 197 * 198 * Must not be called from within atomic context. 199 * 200 * Returns success (0) or negative errno. 201 */ 202 int clk_rate_exclusive_get(struct clk *clk); 203 204 /** 205 * clk_rate_exclusive_put - release exclusivity over the rate control of a 206 * producer 207 * @clk: clock source 208 * 209 * This function allows drivers to release the exclusivity it previously got 210 * from clk_rate_exclusive_get() 211 * 212 * The caller must balance the number of clk_rate_exclusive_get() and 213 * clk_rate_exclusive_put() calls. 214 * 215 * Must not be called from within atomic context. 216 */ 217 void clk_rate_exclusive_put(struct clk *clk); 218 219 #else 220 221 static inline int clk_notifier_register(struct clk *clk, 222 struct notifier_block *nb) 223 { 224 return -ENOTSUPP; 225 } 226 227 static inline int clk_notifier_unregister(struct clk *clk, 228 struct notifier_block *nb) 229 { 230 return -ENOTSUPP; 231 } 232 233 static inline int devm_clk_notifier_register(struct device *dev, 234 struct clk *clk, 235 struct notifier_block *nb) 236 { 237 return -ENOTSUPP; 238 } 239 240 static inline long clk_get_accuracy(struct clk *clk) 241 { 242 return -ENOTSUPP; 243 } 244 245 static inline long clk_set_phase(struct clk *clk, int phase) 246 { 247 return -ENOTSUPP; 248 } 249 250 static inline long clk_get_phase(struct clk *clk) 251 { 252 return -ENOTSUPP; 253 } 254 255 static inline int clk_set_duty_cycle(struct clk *clk, unsigned int num, 256 unsigned int den) 257 { 258 return -ENOTSUPP; 259 } 260 261 static inline unsigned int clk_get_scaled_duty_cycle(struct clk *clk, 262 unsigned int scale) 263 { 264 return 0; 265 } 266 267 static inline bool clk_is_match(const struct clk *p, const struct clk *q) 268 { 269 return p == q; 270 } 271 272 static inline int clk_rate_exclusive_get(struct clk *clk) 273 { 274 return 0; 275 } 276 277 static inline void clk_rate_exclusive_put(struct clk *clk) {} 278 279 #endif 280 281 #ifdef CONFIG_HAVE_CLK_PREPARE 282 /** 283 * clk_prepare - prepare a clock source 284 * @clk: clock source 285 * 286 * This prepares the clock source for use. 287 * 288 * Must not be called from within atomic context. 289 */ 290 int clk_prepare(struct clk *clk); 291 int __must_check clk_bulk_prepare(int num_clks, 292 const struct clk_bulk_data *clks); 293 294 /** 295 * clk_is_enabled_when_prepared - indicate if preparing a clock also enables it. 296 * @clk: clock source 297 * 298 * Returns true if clk_prepare() implicitly enables the clock, effectively 299 * making clk_enable()/clk_disable() no-ops, false otherwise. 300 * 301 * This is of interest mainly to the power management code where actually 302 * disabling the clock also requires unpreparing it to have any material 303 * effect. 304 * 305 * Regardless of the value returned here, the caller must always invoke 306 * clk_enable() or clk_prepare_enable() and counterparts for usage counts 307 * to be right. 308 */ 309 bool clk_is_enabled_when_prepared(struct clk *clk); 310 #else 311 static inline int clk_prepare(struct clk *clk) 312 { 313 might_sleep(); 314 return 0; 315 } 316 317 static inline int __must_check 318 clk_bulk_prepare(int num_clks, const struct clk_bulk_data *clks) 319 { 320 might_sleep(); 321 return 0; 322 } 323 324 static inline bool clk_is_enabled_when_prepared(struct clk *clk) 325 { 326 return false; 327 } 328 #endif 329 330 /** 331 * clk_unprepare - undo preparation of a clock source 332 * @clk: clock source 333 * 334 * This undoes a previously prepared clock. The caller must balance 335 * the number of prepare and unprepare calls. 336 * 337 * Must not be called from within atomic context. 338 */ 339 #ifdef CONFIG_HAVE_CLK_PREPARE 340 void clk_unprepare(struct clk *clk); 341 void clk_bulk_unprepare(int num_clks, const struct clk_bulk_data *clks); 342 #else 343 static inline void clk_unprepare(struct clk *clk) 344 { 345 might_sleep(); 346 } 347 static inline void clk_bulk_unprepare(int num_clks, 348 const struct clk_bulk_data *clks) 349 { 350 might_sleep(); 351 } 352 #endif 353 354 #ifdef CONFIG_HAVE_CLK 355 /** 356 * clk_get - lookup and obtain a reference to a clock producer. 357 * @dev: device for clock "consumer" 358 * @id: clock consumer ID 359 * 360 * Returns a struct clk corresponding to the clock producer, or 361 * valid IS_ERR() condition containing errno. The implementation 362 * uses @dev and @id to determine the clock consumer, and thereby 363 * the clock producer. (IOW, @id may be identical strings, but 364 * clk_get may return different clock producers depending on @dev.) 365 * 366 * Drivers must assume that the clock source is not enabled. 367 * 368 * clk_get should not be called from within interrupt context. 369 */ 370 struct clk *clk_get(struct device *dev, const char *id); 371 372 /** 373 * clk_bulk_get - lookup and obtain a number of references to clock producer. 374 * @dev: device for clock "consumer" 375 * @num_clks: the number of clk_bulk_data 376 * @clks: the clk_bulk_data table of consumer 377 * 378 * This helper function allows drivers to get several clk consumers in one 379 * operation. If any of the clk cannot be acquired then any clks 380 * that were obtained will be freed before returning to the caller. 381 * 382 * Returns 0 if all clocks specified in clk_bulk_data table are obtained 383 * successfully, or valid IS_ERR() condition containing errno. 384 * The implementation uses @dev and @clk_bulk_data.id to determine the 385 * clock consumer, and thereby the clock producer. 386 * The clock returned is stored in each @clk_bulk_data.clk field. 387 * 388 * Drivers must assume that the clock source is not enabled. 389 * 390 * clk_bulk_get should not be called from within interrupt context. 391 */ 392 int __must_check clk_bulk_get(struct device *dev, int num_clks, 393 struct clk_bulk_data *clks); 394 /** 395 * clk_bulk_get_all - lookup and obtain all available references to clock 396 * producer. 397 * @dev: device for clock "consumer" 398 * @clks: pointer to the clk_bulk_data table of consumer 399 * 400 * This helper function allows drivers to get all clk consumers in one 401 * operation. If any of the clk cannot be acquired then any clks 402 * that were obtained will be freed before returning to the caller. 403 * 404 * Returns a positive value for the number of clocks obtained while the 405 * clock references are stored in the clk_bulk_data table in @clks field. 406 * Returns 0 if there're none and a negative value if something failed. 407 * 408 * Drivers must assume that the clock source is not enabled. 409 * 410 * clk_bulk_get should not be called from within interrupt context. 411 */ 412 int __must_check clk_bulk_get_all(struct device *dev, 413 struct clk_bulk_data **clks); 414 415 /** 416 * clk_bulk_get_optional - lookup and obtain a number of references to clock producer 417 * @dev: device for clock "consumer" 418 * @num_clks: the number of clk_bulk_data 419 * @clks: the clk_bulk_data table of consumer 420 * 421 * Behaves the same as clk_bulk_get() except where there is no clock producer. 422 * In this case, instead of returning -ENOENT, the function returns 0 and 423 * NULL for a clk for which a clock producer could not be determined. 424 */ 425 int __must_check clk_bulk_get_optional(struct device *dev, int num_clks, 426 struct clk_bulk_data *clks); 427 /** 428 * devm_clk_bulk_get - managed get multiple clk consumers 429 * @dev: device for clock "consumer" 430 * @num_clks: the number of clk_bulk_data 431 * @clks: the clk_bulk_data table of consumer 432 * 433 * Return 0 on success, an errno on failure. 434 * 435 * This helper function allows drivers to get several clk 436 * consumers in one operation with management, the clks will 437 * automatically be freed when the device is unbound. 438 */ 439 int __must_check devm_clk_bulk_get(struct device *dev, int num_clks, 440 struct clk_bulk_data *clks); 441 /** 442 * devm_clk_bulk_get_optional - managed get multiple optional consumer clocks 443 * @dev: device for clock "consumer" 444 * @num_clks: the number of clk_bulk_data 445 * @clks: pointer to the clk_bulk_data table of consumer 446 * 447 * Behaves the same as devm_clk_bulk_get() except where there is no clock 448 * producer. In this case, instead of returning -ENOENT, the function returns 449 * NULL for given clk. It is assumed all clocks in clk_bulk_data are optional. 450 * 451 * Returns 0 if all clocks specified in clk_bulk_data table are obtained 452 * successfully or for any clk there was no clk provider available, otherwise 453 * returns valid IS_ERR() condition containing errno. 454 * The implementation uses @dev and @clk_bulk_data.id to determine the 455 * clock consumer, and thereby the clock producer. 456 * The clock returned is stored in each @clk_bulk_data.clk field. 457 * 458 * Drivers must assume that the clock source is not enabled. 459 * 460 * clk_bulk_get should not be called from within interrupt context. 461 */ 462 int __must_check devm_clk_bulk_get_optional(struct device *dev, int num_clks, 463 struct clk_bulk_data *clks); 464 /** 465 * devm_clk_bulk_get_all - managed get multiple clk consumers 466 * @dev: device for clock "consumer" 467 * @clks: pointer to the clk_bulk_data table of consumer 468 * 469 * Returns a positive value for the number of clocks obtained while the 470 * clock references are stored in the clk_bulk_data table in @clks field. 471 * Returns 0 if there're none and a negative value if something failed. 472 * 473 * This helper function allows drivers to get several clk 474 * consumers in one operation with management, the clks will 475 * automatically be freed when the device is unbound. 476 */ 477 478 int __must_check devm_clk_bulk_get_all(struct device *dev, 479 struct clk_bulk_data **clks); 480 481 /** 482 * devm_clk_bulk_get_all_enable - Get and enable all clocks of the consumer (managed) 483 * @dev: device for clock "consumer" 484 * @clks: pointer to the clk_bulk_data table of consumer 485 * 486 * Returns success (0) or negative errno. 487 * 488 * This helper function allows drivers to get all clocks of the 489 * consumer and enables them in one operation with management. 490 * The clks will automatically be disabled and freed when the device 491 * is unbound. 492 */ 493 494 int __must_check devm_clk_bulk_get_all_enable(struct device *dev, 495 struct clk_bulk_data **clks); 496 497 /** 498 * devm_clk_get - lookup and obtain a managed reference to a clock producer. 499 * @dev: device for clock "consumer" 500 * @id: clock consumer ID 501 * 502 * Context: May sleep. 503 * 504 * Return: a struct clk corresponding to the clock producer, or 505 * valid IS_ERR() condition containing errno. The implementation 506 * uses @dev and @id to determine the clock consumer, and thereby 507 * the clock producer. (IOW, @id may be identical strings, but 508 * clk_get may return different clock producers depending on @dev.) 509 * 510 * Drivers must assume that the clock source is neither prepared nor 511 * enabled. 512 * 513 * The clock will automatically be freed when the device is unbound 514 * from the bus. 515 */ 516 struct clk *devm_clk_get(struct device *dev, const char *id); 517 518 /** 519 * devm_clk_get_prepared - devm_clk_get() + clk_prepare() 520 * @dev: device for clock "consumer" 521 * @id: clock consumer ID 522 * 523 * Context: May sleep. 524 * 525 * Return: a struct clk corresponding to the clock producer, or 526 * valid IS_ERR() condition containing errno. The implementation 527 * uses @dev and @id to determine the clock consumer, and thereby 528 * the clock producer. (IOW, @id may be identical strings, but 529 * clk_get may return different clock producers depending on @dev.) 530 * 531 * The returned clk (if valid) is prepared. Drivers must however assume 532 * that the clock is not enabled. 533 * 534 * The clock will automatically be unprepared and freed when the device 535 * is unbound from the bus. 536 */ 537 struct clk *devm_clk_get_prepared(struct device *dev, const char *id); 538 539 /** 540 * devm_clk_get_enabled - devm_clk_get() + clk_prepare_enable() 541 * @dev: device for clock "consumer" 542 * @id: clock consumer ID 543 * 544 * Context: May sleep. 545 * 546 * Return: a struct clk corresponding to the clock producer, or 547 * valid IS_ERR() condition containing errno. The implementation 548 * uses @dev and @id to determine the clock consumer, and thereby 549 * the clock producer. (IOW, @id may be identical strings, but 550 * clk_get may return different clock producers depending on @dev.) 551 * 552 * The returned clk (if valid) is prepared and enabled. 553 * 554 * The clock will automatically be disabled, unprepared and freed 555 * when the device is unbound from the bus. 556 */ 557 struct clk *devm_clk_get_enabled(struct device *dev, const char *id); 558 559 /** 560 * devm_clk_get_optional - lookup and obtain a managed reference to an optional 561 * clock producer. 562 * @dev: device for clock "consumer" 563 * @id: clock consumer ID 564 * 565 * Context: May sleep. 566 * 567 * Return: a struct clk corresponding to the clock producer, or 568 * valid IS_ERR() condition containing errno. The implementation 569 * uses @dev and @id to determine the clock consumer, and thereby 570 * the clock producer. If no such clk is found, it returns NULL 571 * which serves as a dummy clk. That's the only difference compared 572 * to devm_clk_get(). 573 * 574 * Drivers must assume that the clock source is neither prepared nor 575 * enabled. 576 * 577 * The clock will automatically be freed when the device is unbound 578 * from the bus. 579 */ 580 struct clk *devm_clk_get_optional(struct device *dev, const char *id); 581 582 /** 583 * devm_clk_get_optional_prepared - devm_clk_get_optional() + clk_prepare() 584 * @dev: device for clock "consumer" 585 * @id: clock consumer ID 586 * 587 * Context: May sleep. 588 * 589 * Return: a struct clk corresponding to the clock producer, or 590 * valid IS_ERR() condition containing errno. The implementation 591 * uses @dev and @id to determine the clock consumer, and thereby 592 * the clock producer. If no such clk is found, it returns NULL 593 * which serves as a dummy clk. That's the only difference compared 594 * to devm_clk_get_prepared(). 595 * 596 * The returned clk (if valid) is prepared. Drivers must however 597 * assume that the clock is not enabled. 598 * 599 * The clock will automatically be unprepared and freed when the 600 * device is unbound from the bus. 601 */ 602 struct clk *devm_clk_get_optional_prepared(struct device *dev, const char *id); 603 604 /** 605 * devm_clk_get_optional_enabled - devm_clk_get_optional() + 606 * clk_prepare_enable() 607 * @dev: device for clock "consumer" 608 * @id: clock consumer ID 609 * 610 * Context: May sleep. 611 * 612 * Return: a struct clk corresponding to the clock producer, or 613 * valid IS_ERR() condition containing errno. The implementation 614 * uses @dev and @id to determine the clock consumer, and thereby 615 * the clock producer. If no such clk is found, it returns NULL 616 * which serves as a dummy clk. That's the only difference compared 617 * to devm_clk_get_enabled(). 618 * 619 * The returned clk (if valid) is prepared and enabled. 620 * 621 * The clock will automatically be disabled, unprepared and freed 622 * when the device is unbound from the bus. 623 */ 624 struct clk *devm_clk_get_optional_enabled(struct device *dev, const char *id); 625 626 /** 627 * devm_get_clk_from_child - lookup and obtain a managed reference to a 628 * clock producer from child node. 629 * @dev: device for clock "consumer" 630 * @np: pointer to clock consumer node 631 * @con_id: clock consumer ID 632 * 633 * This function parses the clocks, and uses them to look up the 634 * struct clk from the registered list of clock providers by using 635 * @np and @con_id 636 * 637 * The clock will automatically be freed when the device is unbound 638 * from the bus. 639 */ 640 struct clk *devm_get_clk_from_child(struct device *dev, 641 struct device_node *np, const char *con_id); 642 643 /** 644 * clk_enable - inform the system when the clock source should be running. 645 * @clk: clock source 646 * 647 * If the clock can not be enabled/disabled, this should return success. 648 * 649 * May be called from atomic contexts. 650 * 651 * Returns success (0) or negative errno. 652 */ 653 int clk_enable(struct clk *clk); 654 655 /** 656 * clk_bulk_enable - inform the system when the set of clks should be running. 657 * @num_clks: the number of clk_bulk_data 658 * @clks: the clk_bulk_data table of consumer 659 * 660 * May be called from atomic contexts. 661 * 662 * Returns success (0) or negative errno. 663 */ 664 int __must_check clk_bulk_enable(int num_clks, 665 const struct clk_bulk_data *clks); 666 667 /** 668 * clk_disable - inform the system when the clock source is no longer required. 669 * @clk: clock source 670 * 671 * Inform the system that a clock source is no longer required by 672 * a driver and may be shut down. 673 * 674 * May be called from atomic contexts. 675 * 676 * Implementation detail: if the clock source is shared between 677 * multiple drivers, clk_enable() calls must be balanced by the 678 * same number of clk_disable() calls for the clock source to be 679 * disabled. 680 */ 681 void clk_disable(struct clk *clk); 682 683 /** 684 * clk_bulk_disable - inform the system when the set of clks is no 685 * longer required. 686 * @num_clks: the number of clk_bulk_data 687 * @clks: the clk_bulk_data table of consumer 688 * 689 * Inform the system that a set of clks is no longer required by 690 * a driver and may be shut down. 691 * 692 * May be called from atomic contexts. 693 * 694 * Implementation detail: if the set of clks is shared between 695 * multiple drivers, clk_bulk_enable() calls must be balanced by the 696 * same number of clk_bulk_disable() calls for the clock source to be 697 * disabled. 698 */ 699 void clk_bulk_disable(int num_clks, const struct clk_bulk_data *clks); 700 701 /** 702 * clk_get_rate - obtain the current clock rate (in Hz) for a clock source. 703 * This is only valid once the clock source has been enabled. 704 * @clk: clock source 705 */ 706 unsigned long clk_get_rate(struct clk *clk); 707 708 /** 709 * clk_put - "free" the clock source 710 * @clk: clock source 711 * 712 * Note: drivers must ensure that all clk_enable calls made on this 713 * clock source are balanced by clk_disable calls prior to calling 714 * this function. 715 * 716 * clk_put should not be called from within interrupt context. 717 */ 718 void clk_put(struct clk *clk); 719 720 /** 721 * clk_bulk_put - "free" the clock source 722 * @num_clks: the number of clk_bulk_data 723 * @clks: the clk_bulk_data table of consumer 724 * 725 * Note: drivers must ensure that all clk_bulk_enable calls made on this 726 * clock source are balanced by clk_bulk_disable calls prior to calling 727 * this function. 728 * 729 * clk_bulk_put should not be called from within interrupt context. 730 */ 731 void clk_bulk_put(int num_clks, struct clk_bulk_data *clks); 732 733 /** 734 * clk_bulk_put_all - "free" all the clock source 735 * @num_clks: the number of clk_bulk_data 736 * @clks: the clk_bulk_data table of consumer 737 * 738 * Note: drivers must ensure that all clk_bulk_enable calls made on this 739 * clock source are balanced by clk_bulk_disable calls prior to calling 740 * this function. 741 * 742 * clk_bulk_put_all should not be called from within interrupt context. 743 */ 744 void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks); 745 746 /** 747 * devm_clk_put - "free" a managed clock source 748 * @dev: device used to acquire the clock 749 * @clk: clock source acquired with devm_clk_get() 750 * 751 * Note: drivers must ensure that all clk_enable calls made on this 752 * clock source are balanced by clk_disable calls prior to calling 753 * this function. 754 * 755 * clk_put should not be called from within interrupt context. 756 */ 757 void devm_clk_put(struct device *dev, struct clk *clk); 758 759 /* 760 * The remaining APIs are optional for machine class support. 761 */ 762 763 764 /** 765 * clk_round_rate - adjust a rate to the exact rate a clock can provide 766 * @clk: clock source 767 * @rate: desired clock rate in Hz 768 * 769 * This answers the question "if I were to pass @rate to clk_set_rate(), 770 * what clock rate would I end up with?" without changing the hardware 771 * in any way. In other words: 772 * 773 * rate = clk_round_rate(clk, r); 774 * 775 * and: 776 * 777 * clk_set_rate(clk, r); 778 * rate = clk_get_rate(clk); 779 * 780 * are equivalent except the former does not modify the clock hardware 781 * in any way. 782 * 783 * Returns rounded clock rate in Hz, or negative errno. 784 */ 785 long clk_round_rate(struct clk *clk, unsigned long rate); 786 787 /** 788 * clk_set_rate - set the clock rate for a clock source 789 * @clk: clock source 790 * @rate: desired clock rate in Hz 791 * 792 * Updating the rate starts at the top-most affected clock and then 793 * walks the tree down to the bottom-most clock that needs updating. 794 * 795 * Returns success (0) or negative errno. 796 */ 797 int clk_set_rate(struct clk *clk, unsigned long rate); 798 799 /** 800 * clk_set_rate_exclusive- set the clock rate and claim exclusivity over 801 * clock source 802 * @clk: clock source 803 * @rate: desired clock rate in Hz 804 * 805 * This helper function allows drivers to atomically set the rate of a producer 806 * and claim exclusivity over the rate control of the producer. 807 * 808 * It is essentially a combination of clk_set_rate() and 809 * clk_rate_exclusite_get(). Caller must balance this call with a call to 810 * clk_rate_exclusive_put() 811 * 812 * Returns success (0) or negative errno. 813 */ 814 int clk_set_rate_exclusive(struct clk *clk, unsigned long rate); 815 816 /** 817 * clk_has_parent - check if a clock is a possible parent for another 818 * @clk: clock source 819 * @parent: parent clock source 820 * 821 * This function can be used in drivers that need to check that a clock can be 822 * the parent of another without actually changing the parent. 823 * 824 * Returns true if @parent is a possible parent for @clk, false otherwise. 825 */ 826 bool clk_has_parent(const struct clk *clk, const struct clk *parent); 827 828 /** 829 * clk_set_rate_range - set a rate range for a clock source 830 * @clk: clock source 831 * @min: desired minimum clock rate in Hz, inclusive 832 * @max: desired maximum clock rate in Hz, inclusive 833 * 834 * Returns success (0) or negative errno. 835 */ 836 int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max); 837 838 /** 839 * clk_set_min_rate - set a minimum clock rate for a clock source 840 * @clk: clock source 841 * @rate: desired minimum clock rate in Hz, inclusive 842 * 843 * Returns success (0) or negative errno. 844 */ 845 int clk_set_min_rate(struct clk *clk, unsigned long rate); 846 847 /** 848 * clk_set_max_rate - set a maximum clock rate for a clock source 849 * @clk: clock source 850 * @rate: desired maximum clock rate in Hz, inclusive 851 * 852 * Returns success (0) or negative errno. 853 */ 854 int clk_set_max_rate(struct clk *clk, unsigned long rate); 855 856 /** 857 * clk_set_parent - set the parent clock source for this clock 858 * @clk: clock source 859 * @parent: parent clock source 860 * 861 * Returns success (0) or negative errno. 862 */ 863 int clk_set_parent(struct clk *clk, struct clk *parent); 864 865 /** 866 * clk_get_parent - get the parent clock source for this clock 867 * @clk: clock source 868 * 869 * Returns struct clk corresponding to parent clock source, or 870 * valid IS_ERR() condition containing errno. 871 */ 872 struct clk *clk_get_parent(struct clk *clk); 873 874 /** 875 * clk_get_sys - get a clock based upon the device name 876 * @dev_id: device name 877 * @con_id: connection ID 878 * 879 * Returns a struct clk corresponding to the clock producer, or 880 * valid IS_ERR() condition containing errno. The implementation 881 * uses @dev_id and @con_id to determine the clock consumer, and 882 * thereby the clock producer. In contrast to clk_get() this function 883 * takes the device name instead of the device itself for identification. 884 * 885 * Drivers must assume that the clock source is not enabled. 886 * 887 * clk_get_sys should not be called from within interrupt context. 888 */ 889 struct clk *clk_get_sys(const char *dev_id, const char *con_id); 890 891 /** 892 * clk_save_context - save clock context for poweroff 893 * 894 * Saves the context of the clock register for powerstates in which the 895 * contents of the registers will be lost. Occurs deep within the suspend 896 * code so locking is not necessary. 897 */ 898 int clk_save_context(void); 899 900 /** 901 * clk_restore_context - restore clock context after poweroff 902 * 903 * This occurs with all clocks enabled. Occurs deep within the resume code 904 * so locking is not necessary. 905 */ 906 void clk_restore_context(void); 907 908 #else /* !CONFIG_HAVE_CLK */ 909 910 static inline struct clk *clk_get(struct device *dev, const char *id) 911 { 912 return NULL; 913 } 914 915 static inline int __must_check clk_bulk_get(struct device *dev, int num_clks, 916 struct clk_bulk_data *clks) 917 { 918 return 0; 919 } 920 921 static inline int __must_check clk_bulk_get_optional(struct device *dev, 922 int num_clks, struct clk_bulk_data *clks) 923 { 924 return 0; 925 } 926 927 static inline int __must_check clk_bulk_get_all(struct device *dev, 928 struct clk_bulk_data **clks) 929 { 930 return 0; 931 } 932 933 static inline struct clk *devm_clk_get(struct device *dev, const char *id) 934 { 935 return NULL; 936 } 937 938 static inline struct clk *devm_clk_get_prepared(struct device *dev, 939 const char *id) 940 { 941 return NULL; 942 } 943 944 static inline struct clk *devm_clk_get_enabled(struct device *dev, 945 const char *id) 946 { 947 return NULL; 948 } 949 950 static inline struct clk *devm_clk_get_optional(struct device *dev, 951 const char *id) 952 { 953 return NULL; 954 } 955 956 static inline struct clk *devm_clk_get_optional_prepared(struct device *dev, 957 const char *id) 958 { 959 return NULL; 960 } 961 962 static inline struct clk *devm_clk_get_optional_enabled(struct device *dev, 963 const char *id) 964 { 965 return NULL; 966 } 967 968 static inline int __must_check devm_clk_bulk_get(struct device *dev, int num_clks, 969 struct clk_bulk_data *clks) 970 { 971 return 0; 972 } 973 974 static inline int __must_check devm_clk_bulk_get_optional(struct device *dev, 975 int num_clks, struct clk_bulk_data *clks) 976 { 977 return 0; 978 } 979 980 static inline int __must_check devm_clk_bulk_get_all(struct device *dev, 981 struct clk_bulk_data **clks) 982 { 983 984 return 0; 985 } 986 987 static inline int __must_check devm_clk_bulk_get_all_enable(struct device *dev, 988 struct clk_bulk_data **clks) 989 { 990 return 0; 991 } 992 993 static inline struct clk *devm_get_clk_from_child(struct device *dev, 994 struct device_node *np, const char *con_id) 995 { 996 return NULL; 997 } 998 999 static inline void clk_put(struct clk *clk) {} 1000 1001 static inline void clk_bulk_put(int num_clks, struct clk_bulk_data *clks) {} 1002 1003 static inline void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks) {} 1004 1005 static inline void devm_clk_put(struct device *dev, struct clk *clk) {} 1006 1007 static inline int clk_enable(struct clk *clk) 1008 { 1009 return 0; 1010 } 1011 1012 static inline int __must_check clk_bulk_enable(int num_clks, 1013 const struct clk_bulk_data *clks) 1014 { 1015 return 0; 1016 } 1017 1018 static inline void clk_disable(struct clk *clk) {} 1019 1020 1021 static inline void clk_bulk_disable(int num_clks, 1022 const struct clk_bulk_data *clks) {} 1023 1024 static inline unsigned long clk_get_rate(struct clk *clk) 1025 { 1026 return 0; 1027 } 1028 1029 static inline int clk_set_rate(struct clk *clk, unsigned long rate) 1030 { 1031 return 0; 1032 } 1033 1034 static inline int clk_set_rate_exclusive(struct clk *clk, unsigned long rate) 1035 { 1036 return 0; 1037 } 1038 1039 static inline long clk_round_rate(struct clk *clk, unsigned long rate) 1040 { 1041 return 0; 1042 } 1043 1044 static inline bool clk_has_parent(struct clk *clk, struct clk *parent) 1045 { 1046 return true; 1047 } 1048 1049 static inline int clk_set_rate_range(struct clk *clk, unsigned long min, 1050 unsigned long max) 1051 { 1052 return 0; 1053 } 1054 1055 static inline int clk_set_min_rate(struct clk *clk, unsigned long rate) 1056 { 1057 return 0; 1058 } 1059 1060 static inline int clk_set_max_rate(struct clk *clk, unsigned long rate) 1061 { 1062 return 0; 1063 } 1064 1065 static inline int clk_set_parent(struct clk *clk, struct clk *parent) 1066 { 1067 return 0; 1068 } 1069 1070 static inline struct clk *clk_get_parent(struct clk *clk) 1071 { 1072 return NULL; 1073 } 1074 1075 static inline struct clk *clk_get_sys(const char *dev_id, const char *con_id) 1076 { 1077 return NULL; 1078 } 1079 1080 static inline int clk_save_context(void) 1081 { 1082 return 0; 1083 } 1084 1085 static inline void clk_restore_context(void) {} 1086 1087 #endif 1088 1089 /* clk_prepare_enable helps cases using clk_enable in non-atomic context. */ 1090 static inline int clk_prepare_enable(struct clk *clk) 1091 { 1092 int ret; 1093 1094 ret = clk_prepare(clk); 1095 if (ret) 1096 return ret; 1097 ret = clk_enable(clk); 1098 if (ret) 1099 clk_unprepare(clk); 1100 1101 return ret; 1102 } 1103 1104 /* clk_disable_unprepare helps cases using clk_disable in non-atomic context. */ 1105 static inline void clk_disable_unprepare(struct clk *clk) 1106 { 1107 clk_disable(clk); 1108 clk_unprepare(clk); 1109 } 1110 1111 static inline int __must_check 1112 clk_bulk_prepare_enable(int num_clks, const struct clk_bulk_data *clks) 1113 { 1114 int ret; 1115 1116 ret = clk_bulk_prepare(num_clks, clks); 1117 if (ret) 1118 return ret; 1119 ret = clk_bulk_enable(num_clks, clks); 1120 if (ret) 1121 clk_bulk_unprepare(num_clks, clks); 1122 1123 return ret; 1124 } 1125 1126 static inline void clk_bulk_disable_unprepare(int num_clks, 1127 const struct clk_bulk_data *clks) 1128 { 1129 clk_bulk_disable(num_clks, clks); 1130 clk_bulk_unprepare(num_clks, clks); 1131 } 1132 1133 /** 1134 * clk_drop_range - Reset any range set on that clock 1135 * @clk: clock source 1136 * 1137 * Returns success (0) or negative errno. 1138 */ 1139 static inline int clk_drop_range(struct clk *clk) 1140 { 1141 return clk_set_rate_range(clk, 0, ULONG_MAX); 1142 } 1143 1144 /** 1145 * clk_get_optional - lookup and obtain a reference to an optional clock 1146 * producer. 1147 * @dev: device for clock "consumer" 1148 * @id: clock consumer ID 1149 * 1150 * Behaves the same as clk_get() except where there is no clock producer. In 1151 * this case, instead of returning -ENOENT, the function returns NULL. 1152 */ 1153 static inline struct clk *clk_get_optional(struct device *dev, const char *id) 1154 { 1155 struct clk *clk = clk_get(dev, id); 1156 1157 if (clk == ERR_PTR(-ENOENT)) 1158 return NULL; 1159 1160 return clk; 1161 } 1162 1163 #if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK) 1164 struct clk *of_clk_get(struct device_node *np, int index); 1165 struct clk *of_clk_get_by_name(struct device_node *np, const char *name); 1166 struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec); 1167 #else 1168 static inline struct clk *of_clk_get(struct device_node *np, int index) 1169 { 1170 return ERR_PTR(-ENOENT); 1171 } 1172 static inline struct clk *of_clk_get_by_name(struct device_node *np, 1173 const char *name) 1174 { 1175 return ERR_PTR(-ENOENT); 1176 } 1177 static inline struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec) 1178 { 1179 return ERR_PTR(-ENOENT); 1180 } 1181 #endif 1182 1183 #endif 1184