1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * linux/include/linux/clk.h 4 * 5 * Copyright (C) 2004 ARM Limited. 6 * Written by Deep Blue Solutions Limited. 7 * Copyright (C) 2011-2012 Linaro Ltd <[email protected]> 8 */ 9 #ifndef __LINUX_CLK_H 10 #define __LINUX_CLK_H 11 12 #include <linux/err.h> 13 #include <linux/kernel.h> 14 #include <linux/notifier.h> 15 16 struct device; 17 struct clk; 18 struct device_node; 19 struct of_phandle_args; 20 21 /** 22 * DOC: clk notifier callback types 23 * 24 * PRE_RATE_CHANGE - called immediately before the clk rate is changed, 25 * to indicate that the rate change will proceed. Drivers must 26 * immediately terminate any operations that will be affected by the 27 * rate change. Callbacks may either return NOTIFY_DONE, NOTIFY_OK, 28 * NOTIFY_STOP or NOTIFY_BAD. 29 * 30 * ABORT_RATE_CHANGE: called if the rate change failed for some reason 31 * after PRE_RATE_CHANGE. In this case, all registered notifiers on 32 * the clk will be called with ABORT_RATE_CHANGE. Callbacks must 33 * always return NOTIFY_DONE or NOTIFY_OK. 34 * 35 * POST_RATE_CHANGE - called after the clk rate change has successfully 36 * completed. Callbacks must always return NOTIFY_DONE or NOTIFY_OK. 37 * 38 */ 39 #define PRE_RATE_CHANGE BIT(0) 40 #define POST_RATE_CHANGE BIT(1) 41 #define ABORT_RATE_CHANGE BIT(2) 42 43 /** 44 * struct clk_notifier - associate a clk with a notifier 45 * @clk: struct clk * to associate the notifier with 46 * @notifier_head: a blocking_notifier_head for this clk 47 * @node: linked list pointers 48 * 49 * A list of struct clk_notifier is maintained by the notifier code. 50 * An entry is created whenever code registers the first notifier on a 51 * particular @clk. Future notifiers on that @clk are added to the 52 * @notifier_head. 53 */ 54 struct clk_notifier { 55 struct clk *clk; 56 struct srcu_notifier_head notifier_head; 57 struct list_head node; 58 }; 59 60 /** 61 * struct clk_notifier_data - rate data to pass to the notifier callback 62 * @clk: struct clk * being changed 63 * @old_rate: previous rate of this clk 64 * @new_rate: new rate of this clk 65 * 66 * For a pre-notifier, old_rate is the clk's rate before this rate 67 * change, and new_rate is what the rate will be in the future. For a 68 * post-notifier, old_rate and new_rate are both set to the clk's 69 * current rate (this was done to optimize the implementation). 70 */ 71 struct clk_notifier_data { 72 struct clk *clk; 73 unsigned long old_rate; 74 unsigned long new_rate; 75 }; 76 77 /** 78 * struct clk_bulk_data - Data used for bulk clk operations. 79 * 80 * @id: clock consumer ID 81 * @clk: struct clk * to store the associated clock 82 * 83 * The CLK APIs provide a series of clk_bulk_() API calls as 84 * a convenience to consumers which require multiple clks. This 85 * structure is used to manage data for these calls. 86 */ 87 struct clk_bulk_data { 88 const char *id; 89 struct clk *clk; 90 }; 91 92 #ifdef CONFIG_COMMON_CLK 93 94 /** 95 * clk_notifier_register - register a clock rate-change notifier callback 96 * @clk: clock whose rate we are interested in 97 * @nb: notifier block with callback function pointer 98 * 99 * ProTip: debugging across notifier chains can be frustrating. Make sure that 100 * your notifier callback function prints a nice big warning in case of 101 * failure. 102 */ 103 int clk_notifier_register(struct clk *clk, struct notifier_block *nb); 104 105 /** 106 * clk_notifier_unregister - unregister a clock rate-change notifier callback 107 * @clk: clock whose rate we are no longer interested in 108 * @nb: notifier block which will be unregistered 109 */ 110 int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb); 111 112 /** 113 * devm_clk_notifier_register - register a managed rate-change notifier callback 114 * @dev: device for clock "consumer" 115 * @clk: clock whose rate we are interested in 116 * @nb: notifier block with callback function pointer 117 * 118 * Returns 0 on success, -EERROR otherwise 119 */ 120 int devm_clk_notifier_register(struct device *dev, struct clk *clk, 121 struct notifier_block *nb); 122 123 /** 124 * clk_get_accuracy - obtain the clock accuracy in ppb (parts per billion) 125 * for a clock source. 126 * @clk: clock source 127 * 128 * This gets the clock source accuracy expressed in ppb. 129 * A perfect clock returns 0. 130 */ 131 long clk_get_accuracy(struct clk *clk); 132 133 /** 134 * clk_set_phase - adjust the phase shift of a clock signal 135 * @clk: clock signal source 136 * @degrees: number of degrees the signal is shifted 137 * 138 * Shifts the phase of a clock signal by the specified degrees. Returns 0 on 139 * success, -EERROR otherwise. 140 */ 141 int clk_set_phase(struct clk *clk, int degrees); 142 143 /** 144 * clk_get_phase - return the phase shift of a clock signal 145 * @clk: clock signal source 146 * 147 * Returns the phase shift of a clock node in degrees, otherwise returns 148 * -EERROR. 149 */ 150 int clk_get_phase(struct clk *clk); 151 152 /** 153 * clk_set_duty_cycle - adjust the duty cycle ratio of a clock signal 154 * @clk: clock signal source 155 * @num: numerator of the duty cycle ratio to be applied 156 * @den: denominator of the duty cycle ratio to be applied 157 * 158 * Adjust the duty cycle of a clock signal by the specified ratio. Returns 0 on 159 * success, -EERROR otherwise. 160 */ 161 int clk_set_duty_cycle(struct clk *clk, unsigned int num, unsigned int den); 162 163 /** 164 * clk_get_scaled_duty_cycle - return the duty cycle ratio of a clock signal 165 * @clk: clock signal source 166 * @scale: scaling factor to be applied to represent the ratio as an integer 167 * 168 * Returns the duty cycle ratio multiplied by the scale provided, otherwise 169 * returns -EERROR. 170 */ 171 int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale); 172 173 /** 174 * clk_is_match - check if two clk's point to the same hardware clock 175 * @p: clk compared against q 176 * @q: clk compared against p 177 * 178 * Returns true if the two struct clk pointers both point to the same hardware 179 * clock node. Put differently, returns true if @p and @q 180 * share the same &struct clk_core object. 181 * 182 * Returns false otherwise. Note that two NULL clks are treated as matching. 183 */ 184 bool clk_is_match(const struct clk *p, const struct clk *q); 185 186 /** 187 * clk_rate_exclusive_get - get exclusivity over the rate control of a 188 * producer 189 * @clk: clock source 190 * 191 * This function allows drivers to get exclusive control over the rate of a 192 * provider. It prevents any other consumer to execute, even indirectly, 193 * opereation which could alter the rate of the provider or cause glitches 194 * 195 * If exlusivity is claimed more than once on clock, even by the same driver, 196 * the rate effectively gets locked as exclusivity can't be preempted. 197 * 198 * Must not be called from within atomic context. 199 * 200 * Returns success (0) or negative errno. 201 */ 202 int clk_rate_exclusive_get(struct clk *clk); 203 204 /** 205 * devm_clk_rate_exclusive_get - devm variant of clk_rate_exclusive_get 206 * @dev: device the exclusivity is bound to 207 * @clk: clock source 208 * 209 * Calls clk_rate_exclusive_get() on @clk and registers a devm cleanup handler 210 * on @dev to call clk_rate_exclusive_put(). 211 * 212 * Must not be called from within atomic context. 213 */ 214 int devm_clk_rate_exclusive_get(struct device *dev, struct clk *clk); 215 216 /** 217 * clk_rate_exclusive_put - release exclusivity over the rate control of a 218 * producer 219 * @clk: clock source 220 * 221 * This function allows drivers to release the exclusivity it previously got 222 * from clk_rate_exclusive_get() 223 * 224 * The caller must balance the number of clk_rate_exclusive_get() and 225 * clk_rate_exclusive_put() calls. 226 * 227 * Must not be called from within atomic context. 228 */ 229 void clk_rate_exclusive_put(struct clk *clk); 230 231 #else 232 233 static inline int clk_notifier_register(struct clk *clk, 234 struct notifier_block *nb) 235 { 236 return -ENOTSUPP; 237 } 238 239 static inline int clk_notifier_unregister(struct clk *clk, 240 struct notifier_block *nb) 241 { 242 return -ENOTSUPP; 243 } 244 245 static inline int devm_clk_notifier_register(struct device *dev, 246 struct clk *clk, 247 struct notifier_block *nb) 248 { 249 return -ENOTSUPP; 250 } 251 252 static inline long clk_get_accuracy(struct clk *clk) 253 { 254 return -ENOTSUPP; 255 } 256 257 static inline long clk_set_phase(struct clk *clk, int phase) 258 { 259 return -ENOTSUPP; 260 } 261 262 static inline long clk_get_phase(struct clk *clk) 263 { 264 return -ENOTSUPP; 265 } 266 267 static inline int clk_set_duty_cycle(struct clk *clk, unsigned int num, 268 unsigned int den) 269 { 270 return -ENOTSUPP; 271 } 272 273 static inline unsigned int clk_get_scaled_duty_cycle(struct clk *clk, 274 unsigned int scale) 275 { 276 return 0; 277 } 278 279 static inline bool clk_is_match(const struct clk *p, const struct clk *q) 280 { 281 return p == q; 282 } 283 284 static inline int clk_rate_exclusive_get(struct clk *clk) 285 { 286 return 0; 287 } 288 289 static inline void clk_rate_exclusive_put(struct clk *clk) {} 290 291 #endif 292 293 #ifdef CONFIG_HAVE_CLK_PREPARE 294 /** 295 * clk_prepare - prepare a clock source 296 * @clk: clock source 297 * 298 * This prepares the clock source for use. 299 * 300 * Must not be called from within atomic context. 301 */ 302 int clk_prepare(struct clk *clk); 303 int __must_check clk_bulk_prepare(int num_clks, 304 const struct clk_bulk_data *clks); 305 306 /** 307 * clk_is_enabled_when_prepared - indicate if preparing a clock also enables it. 308 * @clk: clock source 309 * 310 * Returns true if clk_prepare() implicitly enables the clock, effectively 311 * making clk_enable()/clk_disable() no-ops, false otherwise. 312 * 313 * This is of interest mainly to the power management code where actually 314 * disabling the clock also requires unpreparing it to have any material 315 * effect. 316 * 317 * Regardless of the value returned here, the caller must always invoke 318 * clk_enable() or clk_prepare_enable() and counterparts for usage counts 319 * to be right. 320 */ 321 bool clk_is_enabled_when_prepared(struct clk *clk); 322 #else 323 static inline int clk_prepare(struct clk *clk) 324 { 325 might_sleep(); 326 return 0; 327 } 328 329 static inline int __must_check 330 clk_bulk_prepare(int num_clks, const struct clk_bulk_data *clks) 331 { 332 might_sleep(); 333 return 0; 334 } 335 336 static inline bool clk_is_enabled_when_prepared(struct clk *clk) 337 { 338 return false; 339 } 340 #endif 341 342 /** 343 * clk_unprepare - undo preparation of a clock source 344 * @clk: clock source 345 * 346 * This undoes a previously prepared clock. The caller must balance 347 * the number of prepare and unprepare calls. 348 * 349 * Must not be called from within atomic context. 350 */ 351 #ifdef CONFIG_HAVE_CLK_PREPARE 352 void clk_unprepare(struct clk *clk); 353 void clk_bulk_unprepare(int num_clks, const struct clk_bulk_data *clks); 354 #else 355 static inline void clk_unprepare(struct clk *clk) 356 { 357 might_sleep(); 358 } 359 static inline void clk_bulk_unprepare(int num_clks, 360 const struct clk_bulk_data *clks) 361 { 362 might_sleep(); 363 } 364 #endif 365 366 #ifdef CONFIG_HAVE_CLK 367 /** 368 * clk_get - lookup and obtain a reference to a clock producer. 369 * @dev: device for clock "consumer" 370 * @id: clock consumer ID 371 * 372 * Returns a struct clk corresponding to the clock producer, or 373 * valid IS_ERR() condition containing errno. The implementation 374 * uses @dev and @id to determine the clock consumer, and thereby 375 * the clock producer. (IOW, @id may be identical strings, but 376 * clk_get may return different clock producers depending on @dev.) 377 * 378 * Drivers must assume that the clock source is not enabled. 379 * 380 * clk_get should not be called from within interrupt context. 381 */ 382 struct clk *clk_get(struct device *dev, const char *id); 383 384 /** 385 * clk_bulk_get - lookup and obtain a number of references to clock producer. 386 * @dev: device for clock "consumer" 387 * @num_clks: the number of clk_bulk_data 388 * @clks: the clk_bulk_data table of consumer 389 * 390 * This helper function allows drivers to get several clk consumers in one 391 * operation. If any of the clk cannot be acquired then any clks 392 * that were obtained will be freed before returning to the caller. 393 * 394 * Returns 0 if all clocks specified in clk_bulk_data table are obtained 395 * successfully, or valid IS_ERR() condition containing errno. 396 * The implementation uses @dev and @clk_bulk_data.id to determine the 397 * clock consumer, and thereby the clock producer. 398 * The clock returned is stored in each @clk_bulk_data.clk field. 399 * 400 * Drivers must assume that the clock source is not enabled. 401 * 402 * clk_bulk_get should not be called from within interrupt context. 403 */ 404 int __must_check clk_bulk_get(struct device *dev, int num_clks, 405 struct clk_bulk_data *clks); 406 /** 407 * clk_bulk_get_all - lookup and obtain all available references to clock 408 * producer. 409 * @dev: device for clock "consumer" 410 * @clks: pointer to the clk_bulk_data table of consumer 411 * 412 * This helper function allows drivers to get all clk consumers in one 413 * operation. If any of the clk cannot be acquired then any clks 414 * that were obtained will be freed before returning to the caller. 415 * 416 * Returns a positive value for the number of clocks obtained while the 417 * clock references are stored in the clk_bulk_data table in @clks field. 418 * Returns 0 if there're none and a negative value if something failed. 419 * 420 * Drivers must assume that the clock source is not enabled. 421 * 422 * clk_bulk_get should not be called from within interrupt context. 423 */ 424 int __must_check clk_bulk_get_all(struct device *dev, 425 struct clk_bulk_data **clks); 426 427 /** 428 * clk_bulk_get_optional - lookup and obtain a number of references to clock producer 429 * @dev: device for clock "consumer" 430 * @num_clks: the number of clk_bulk_data 431 * @clks: the clk_bulk_data table of consumer 432 * 433 * Behaves the same as clk_bulk_get() except where there is no clock producer. 434 * In this case, instead of returning -ENOENT, the function returns 0 and 435 * NULL for a clk for which a clock producer could not be determined. 436 */ 437 int __must_check clk_bulk_get_optional(struct device *dev, int num_clks, 438 struct clk_bulk_data *clks); 439 /** 440 * devm_clk_bulk_get - managed get multiple clk consumers 441 * @dev: device for clock "consumer" 442 * @num_clks: the number of clk_bulk_data 443 * @clks: the clk_bulk_data table of consumer 444 * 445 * Return 0 on success, an errno on failure. 446 * 447 * This helper function allows drivers to get several clk 448 * consumers in one operation with management, the clks will 449 * automatically be freed when the device is unbound. 450 */ 451 int __must_check devm_clk_bulk_get(struct device *dev, int num_clks, 452 struct clk_bulk_data *clks); 453 /** 454 * devm_clk_bulk_get_optional - managed get multiple optional consumer clocks 455 * @dev: device for clock "consumer" 456 * @num_clks: the number of clk_bulk_data 457 * @clks: pointer to the clk_bulk_data table of consumer 458 * 459 * Behaves the same as devm_clk_bulk_get() except where there is no clock 460 * producer. In this case, instead of returning -ENOENT, the function returns 461 * NULL for given clk. It is assumed all clocks in clk_bulk_data are optional. 462 * 463 * Returns 0 if all clocks specified in clk_bulk_data table are obtained 464 * successfully or for any clk there was no clk provider available, otherwise 465 * returns valid IS_ERR() condition containing errno. 466 * The implementation uses @dev and @clk_bulk_data.id to determine the 467 * clock consumer, and thereby the clock producer. 468 * The clock returned is stored in each @clk_bulk_data.clk field. 469 * 470 * Drivers must assume that the clock source is not enabled. 471 * 472 * clk_bulk_get should not be called from within interrupt context. 473 */ 474 int __must_check devm_clk_bulk_get_optional(struct device *dev, int num_clks, 475 struct clk_bulk_data *clks); 476 /** 477 * devm_clk_bulk_get_all - managed get multiple clk consumers 478 * @dev: device for clock "consumer" 479 * @clks: pointer to the clk_bulk_data table of consumer 480 * 481 * Returns a positive value for the number of clocks obtained while the 482 * clock references are stored in the clk_bulk_data table in @clks field. 483 * Returns 0 if there're none and a negative value if something failed. 484 * 485 * This helper function allows drivers to get several clk 486 * consumers in one operation with management, the clks will 487 * automatically be freed when the device is unbound. 488 */ 489 490 int __must_check devm_clk_bulk_get_all(struct device *dev, 491 struct clk_bulk_data **clks); 492 493 /** 494 * devm_clk_bulk_get_all_enable - Get and enable all clocks of the consumer (managed) 495 * @dev: device for clock "consumer" 496 * @clks: pointer to the clk_bulk_data table of consumer 497 * 498 * Returns success (0) or negative errno. 499 * 500 * This helper function allows drivers to get all clocks of the 501 * consumer and enables them in one operation with management. 502 * The clks will automatically be disabled and freed when the device 503 * is unbound. 504 */ 505 506 int __must_check devm_clk_bulk_get_all_enable(struct device *dev, 507 struct clk_bulk_data **clks); 508 509 /** 510 * devm_clk_get - lookup and obtain a managed reference to a clock producer. 511 * @dev: device for clock "consumer" 512 * @id: clock consumer ID 513 * 514 * Context: May sleep. 515 * 516 * Return: a struct clk corresponding to the clock producer, or 517 * valid IS_ERR() condition containing errno. The implementation 518 * uses @dev and @id to determine the clock consumer, and thereby 519 * the clock producer. (IOW, @id may be identical strings, but 520 * clk_get may return different clock producers depending on @dev.) 521 * 522 * Drivers must assume that the clock source is neither prepared nor 523 * enabled. 524 * 525 * The clock will automatically be freed when the device is unbound 526 * from the bus. 527 */ 528 struct clk *devm_clk_get(struct device *dev, const char *id); 529 530 /** 531 * devm_clk_get_prepared - devm_clk_get() + clk_prepare() 532 * @dev: device for clock "consumer" 533 * @id: clock consumer ID 534 * 535 * Context: May sleep. 536 * 537 * Return: a struct clk corresponding to the clock producer, or 538 * valid IS_ERR() condition containing errno. The implementation 539 * uses @dev and @id to determine the clock consumer, and thereby 540 * the clock producer. (IOW, @id may be identical strings, but 541 * clk_get may return different clock producers depending on @dev.) 542 * 543 * The returned clk (if valid) is prepared. Drivers must however assume 544 * that the clock is not enabled. 545 * 546 * The clock will automatically be unprepared and freed when the device 547 * is unbound from the bus. 548 */ 549 struct clk *devm_clk_get_prepared(struct device *dev, const char *id); 550 551 /** 552 * devm_clk_get_enabled - devm_clk_get() + clk_prepare_enable() 553 * @dev: device for clock "consumer" 554 * @id: clock consumer ID 555 * 556 * Context: May sleep. 557 * 558 * Return: a struct clk corresponding to the clock producer, or 559 * valid IS_ERR() condition containing errno. The implementation 560 * uses @dev and @id to determine the clock consumer, and thereby 561 * the clock producer. (IOW, @id may be identical strings, but 562 * clk_get may return different clock producers depending on @dev.) 563 * 564 * The returned clk (if valid) is prepared and enabled. 565 * 566 * The clock will automatically be disabled, unprepared and freed 567 * when the device is unbound from the bus. 568 */ 569 struct clk *devm_clk_get_enabled(struct device *dev, const char *id); 570 571 /** 572 * devm_clk_get_optional - lookup and obtain a managed reference to an optional 573 * clock producer. 574 * @dev: device for clock "consumer" 575 * @id: clock consumer ID 576 * 577 * Context: May sleep. 578 * 579 * Return: a struct clk corresponding to the clock producer, or 580 * valid IS_ERR() condition containing errno. The implementation 581 * uses @dev and @id to determine the clock consumer, and thereby 582 * the clock producer. If no such clk is found, it returns NULL 583 * which serves as a dummy clk. That's the only difference compared 584 * to devm_clk_get(). 585 * 586 * Drivers must assume that the clock source is neither prepared nor 587 * enabled. 588 * 589 * The clock will automatically be freed when the device is unbound 590 * from the bus. 591 */ 592 struct clk *devm_clk_get_optional(struct device *dev, const char *id); 593 594 /** 595 * devm_clk_get_optional_prepared - devm_clk_get_optional() + clk_prepare() 596 * @dev: device for clock "consumer" 597 * @id: clock consumer ID 598 * 599 * Context: May sleep. 600 * 601 * Return: a struct clk corresponding to the clock producer, or 602 * valid IS_ERR() condition containing errno. The implementation 603 * uses @dev and @id to determine the clock consumer, and thereby 604 * the clock producer. If no such clk is found, it returns NULL 605 * which serves as a dummy clk. That's the only difference compared 606 * to devm_clk_get_prepared(). 607 * 608 * The returned clk (if valid) is prepared. Drivers must however 609 * assume that the clock is not enabled. 610 * 611 * The clock will automatically be unprepared and freed when the 612 * device is unbound from the bus. 613 */ 614 struct clk *devm_clk_get_optional_prepared(struct device *dev, const char *id); 615 616 /** 617 * devm_clk_get_optional_enabled - devm_clk_get_optional() + 618 * clk_prepare_enable() 619 * @dev: device for clock "consumer" 620 * @id: clock consumer ID 621 * 622 * Context: May sleep. 623 * 624 * Return: a struct clk corresponding to the clock producer, or 625 * valid IS_ERR() condition containing errno. The implementation 626 * uses @dev and @id to determine the clock consumer, and thereby 627 * the clock producer. If no such clk is found, it returns NULL 628 * which serves as a dummy clk. That's the only difference compared 629 * to devm_clk_get_enabled(). 630 * 631 * The returned clk (if valid) is prepared and enabled. 632 * 633 * The clock will automatically be disabled, unprepared and freed 634 * when the device is unbound from the bus. 635 */ 636 struct clk *devm_clk_get_optional_enabled(struct device *dev, const char *id); 637 638 /** 639 * devm_get_clk_from_child - lookup and obtain a managed reference to a 640 * clock producer from child node. 641 * @dev: device for clock "consumer" 642 * @np: pointer to clock consumer node 643 * @con_id: clock consumer ID 644 * 645 * This function parses the clocks, and uses them to look up the 646 * struct clk from the registered list of clock providers by using 647 * @np and @con_id 648 * 649 * The clock will automatically be freed when the device is unbound 650 * from the bus. 651 */ 652 struct clk *devm_get_clk_from_child(struct device *dev, 653 struct device_node *np, const char *con_id); 654 655 /** 656 * clk_enable - inform the system when the clock source should be running. 657 * @clk: clock source 658 * 659 * If the clock can not be enabled/disabled, this should return success. 660 * 661 * May be called from atomic contexts. 662 * 663 * Returns success (0) or negative errno. 664 */ 665 int clk_enable(struct clk *clk); 666 667 /** 668 * clk_bulk_enable - inform the system when the set of clks should be running. 669 * @num_clks: the number of clk_bulk_data 670 * @clks: the clk_bulk_data table of consumer 671 * 672 * May be called from atomic contexts. 673 * 674 * Returns success (0) or negative errno. 675 */ 676 int __must_check clk_bulk_enable(int num_clks, 677 const struct clk_bulk_data *clks); 678 679 /** 680 * clk_disable - inform the system when the clock source is no longer required. 681 * @clk: clock source 682 * 683 * Inform the system that a clock source is no longer required by 684 * a driver and may be shut down. 685 * 686 * May be called from atomic contexts. 687 * 688 * Implementation detail: if the clock source is shared between 689 * multiple drivers, clk_enable() calls must be balanced by the 690 * same number of clk_disable() calls for the clock source to be 691 * disabled. 692 */ 693 void clk_disable(struct clk *clk); 694 695 /** 696 * clk_bulk_disable - inform the system when the set of clks is no 697 * longer required. 698 * @num_clks: the number of clk_bulk_data 699 * @clks: the clk_bulk_data table of consumer 700 * 701 * Inform the system that a set of clks is no longer required by 702 * a driver and may be shut down. 703 * 704 * May be called from atomic contexts. 705 * 706 * Implementation detail: if the set of clks is shared between 707 * multiple drivers, clk_bulk_enable() calls must be balanced by the 708 * same number of clk_bulk_disable() calls for the clock source to be 709 * disabled. 710 */ 711 void clk_bulk_disable(int num_clks, const struct clk_bulk_data *clks); 712 713 /** 714 * clk_get_rate - obtain the current clock rate (in Hz) for a clock source. 715 * This is only valid once the clock source has been enabled. 716 * @clk: clock source 717 */ 718 unsigned long clk_get_rate(struct clk *clk); 719 720 /** 721 * clk_put - "free" the clock source 722 * @clk: clock source 723 * 724 * Note: drivers must ensure that all clk_enable calls made on this 725 * clock source are balanced by clk_disable calls prior to calling 726 * this function. 727 * 728 * clk_put should not be called from within interrupt context. 729 */ 730 void clk_put(struct clk *clk); 731 732 /** 733 * clk_bulk_put - "free" the clock source 734 * @num_clks: the number of clk_bulk_data 735 * @clks: the clk_bulk_data table of consumer 736 * 737 * Note: drivers must ensure that all clk_bulk_enable calls made on this 738 * clock source are balanced by clk_bulk_disable calls prior to calling 739 * this function. 740 * 741 * clk_bulk_put should not be called from within interrupt context. 742 */ 743 void clk_bulk_put(int num_clks, struct clk_bulk_data *clks); 744 745 /** 746 * clk_bulk_put_all - "free" all the clock source 747 * @num_clks: the number of clk_bulk_data 748 * @clks: the clk_bulk_data table of consumer 749 * 750 * Note: drivers must ensure that all clk_bulk_enable calls made on this 751 * clock source are balanced by clk_bulk_disable calls prior to calling 752 * this function. 753 * 754 * clk_bulk_put_all should not be called from within interrupt context. 755 */ 756 void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks); 757 758 /** 759 * devm_clk_put - "free" a managed clock source 760 * @dev: device used to acquire the clock 761 * @clk: clock source acquired with devm_clk_get() 762 * 763 * Note: drivers must ensure that all clk_enable calls made on this 764 * clock source are balanced by clk_disable calls prior to calling 765 * this function. 766 * 767 * clk_put should not be called from within interrupt context. 768 */ 769 void devm_clk_put(struct device *dev, struct clk *clk); 770 771 /* 772 * The remaining APIs are optional for machine class support. 773 */ 774 775 776 /** 777 * clk_round_rate - adjust a rate to the exact rate a clock can provide 778 * @clk: clock source 779 * @rate: desired clock rate in Hz 780 * 781 * This answers the question "if I were to pass @rate to clk_set_rate(), 782 * what clock rate would I end up with?" without changing the hardware 783 * in any way. In other words: 784 * 785 * rate = clk_round_rate(clk, r); 786 * 787 * and: 788 * 789 * clk_set_rate(clk, r); 790 * rate = clk_get_rate(clk); 791 * 792 * are equivalent except the former does not modify the clock hardware 793 * in any way. 794 * 795 * Returns rounded clock rate in Hz, or negative errno. 796 */ 797 long clk_round_rate(struct clk *clk, unsigned long rate); 798 799 /** 800 * clk_set_rate - set the clock rate for a clock source 801 * @clk: clock source 802 * @rate: desired clock rate in Hz 803 * 804 * Updating the rate starts at the top-most affected clock and then 805 * walks the tree down to the bottom-most clock that needs updating. 806 * 807 * Returns success (0) or negative errno. 808 */ 809 int clk_set_rate(struct clk *clk, unsigned long rate); 810 811 /** 812 * clk_set_rate_exclusive- set the clock rate and claim exclusivity over 813 * clock source 814 * @clk: clock source 815 * @rate: desired clock rate in Hz 816 * 817 * This helper function allows drivers to atomically set the rate of a producer 818 * and claim exclusivity over the rate control of the producer. 819 * 820 * It is essentially a combination of clk_set_rate() and 821 * clk_rate_exclusite_get(). Caller must balance this call with a call to 822 * clk_rate_exclusive_put() 823 * 824 * Returns success (0) or negative errno. 825 */ 826 int clk_set_rate_exclusive(struct clk *clk, unsigned long rate); 827 828 /** 829 * clk_has_parent - check if a clock is a possible parent for another 830 * @clk: clock source 831 * @parent: parent clock source 832 * 833 * This function can be used in drivers that need to check that a clock can be 834 * the parent of another without actually changing the parent. 835 * 836 * Returns true if @parent is a possible parent for @clk, false otherwise. 837 */ 838 bool clk_has_parent(const struct clk *clk, const struct clk *parent); 839 840 /** 841 * clk_set_rate_range - set a rate range for a clock source 842 * @clk: clock source 843 * @min: desired minimum clock rate in Hz, inclusive 844 * @max: desired maximum clock rate in Hz, inclusive 845 * 846 * Returns success (0) or negative errno. 847 */ 848 int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max); 849 850 /** 851 * clk_set_min_rate - set a minimum clock rate for a clock source 852 * @clk: clock source 853 * @rate: desired minimum clock rate in Hz, inclusive 854 * 855 * Returns success (0) or negative errno. 856 */ 857 int clk_set_min_rate(struct clk *clk, unsigned long rate); 858 859 /** 860 * clk_set_max_rate - set a maximum clock rate for a clock source 861 * @clk: clock source 862 * @rate: desired maximum clock rate in Hz, inclusive 863 * 864 * Returns success (0) or negative errno. 865 */ 866 int clk_set_max_rate(struct clk *clk, unsigned long rate); 867 868 /** 869 * clk_set_parent - set the parent clock source for this clock 870 * @clk: clock source 871 * @parent: parent clock source 872 * 873 * Returns success (0) or negative errno. 874 */ 875 int clk_set_parent(struct clk *clk, struct clk *parent); 876 877 /** 878 * clk_get_parent - get the parent clock source for this clock 879 * @clk: clock source 880 * 881 * Returns struct clk corresponding to parent clock source, or 882 * valid IS_ERR() condition containing errno. 883 */ 884 struct clk *clk_get_parent(struct clk *clk); 885 886 /** 887 * clk_get_sys - get a clock based upon the device name 888 * @dev_id: device name 889 * @con_id: connection ID 890 * 891 * Returns a struct clk corresponding to the clock producer, or 892 * valid IS_ERR() condition containing errno. The implementation 893 * uses @dev_id and @con_id to determine the clock consumer, and 894 * thereby the clock producer. In contrast to clk_get() this function 895 * takes the device name instead of the device itself for identification. 896 * 897 * Drivers must assume that the clock source is not enabled. 898 * 899 * clk_get_sys should not be called from within interrupt context. 900 */ 901 struct clk *clk_get_sys(const char *dev_id, const char *con_id); 902 903 /** 904 * clk_save_context - save clock context for poweroff 905 * 906 * Saves the context of the clock register for powerstates in which the 907 * contents of the registers will be lost. Occurs deep within the suspend 908 * code so locking is not necessary. 909 */ 910 int clk_save_context(void); 911 912 /** 913 * clk_restore_context - restore clock context after poweroff 914 * 915 * This occurs with all clocks enabled. Occurs deep within the resume code 916 * so locking is not necessary. 917 */ 918 void clk_restore_context(void); 919 920 #else /* !CONFIG_HAVE_CLK */ 921 922 static inline struct clk *clk_get(struct device *dev, const char *id) 923 { 924 return NULL; 925 } 926 927 static inline int __must_check clk_bulk_get(struct device *dev, int num_clks, 928 struct clk_bulk_data *clks) 929 { 930 return 0; 931 } 932 933 static inline int __must_check clk_bulk_get_optional(struct device *dev, 934 int num_clks, struct clk_bulk_data *clks) 935 { 936 return 0; 937 } 938 939 static inline int __must_check clk_bulk_get_all(struct device *dev, 940 struct clk_bulk_data **clks) 941 { 942 return 0; 943 } 944 945 static inline struct clk *devm_clk_get(struct device *dev, const char *id) 946 { 947 return NULL; 948 } 949 950 static inline struct clk *devm_clk_get_prepared(struct device *dev, 951 const char *id) 952 { 953 return NULL; 954 } 955 956 static inline struct clk *devm_clk_get_enabled(struct device *dev, 957 const char *id) 958 { 959 return NULL; 960 } 961 962 static inline struct clk *devm_clk_get_optional(struct device *dev, 963 const char *id) 964 { 965 return NULL; 966 } 967 968 static inline struct clk *devm_clk_get_optional_prepared(struct device *dev, 969 const char *id) 970 { 971 return NULL; 972 } 973 974 static inline struct clk *devm_clk_get_optional_enabled(struct device *dev, 975 const char *id) 976 { 977 return NULL; 978 } 979 980 static inline int __must_check devm_clk_bulk_get(struct device *dev, int num_clks, 981 struct clk_bulk_data *clks) 982 { 983 return 0; 984 } 985 986 static inline int __must_check devm_clk_bulk_get_optional(struct device *dev, 987 int num_clks, struct clk_bulk_data *clks) 988 { 989 return 0; 990 } 991 992 static inline int __must_check devm_clk_bulk_get_all(struct device *dev, 993 struct clk_bulk_data **clks) 994 { 995 996 return 0; 997 } 998 999 static inline int __must_check devm_clk_bulk_get_all_enable(struct device *dev, 1000 struct clk_bulk_data **clks) 1001 { 1002 return 0; 1003 } 1004 1005 static inline struct clk *devm_get_clk_from_child(struct device *dev, 1006 struct device_node *np, const char *con_id) 1007 { 1008 return NULL; 1009 } 1010 1011 static inline void clk_put(struct clk *clk) {} 1012 1013 static inline void clk_bulk_put(int num_clks, struct clk_bulk_data *clks) {} 1014 1015 static inline void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks) {} 1016 1017 static inline void devm_clk_put(struct device *dev, struct clk *clk) {} 1018 1019 static inline int clk_enable(struct clk *clk) 1020 { 1021 return 0; 1022 } 1023 1024 static inline int __must_check clk_bulk_enable(int num_clks, 1025 const struct clk_bulk_data *clks) 1026 { 1027 return 0; 1028 } 1029 1030 static inline void clk_disable(struct clk *clk) {} 1031 1032 1033 static inline void clk_bulk_disable(int num_clks, 1034 const struct clk_bulk_data *clks) {} 1035 1036 static inline unsigned long clk_get_rate(struct clk *clk) 1037 { 1038 return 0; 1039 } 1040 1041 static inline int clk_set_rate(struct clk *clk, unsigned long rate) 1042 { 1043 return 0; 1044 } 1045 1046 static inline int clk_set_rate_exclusive(struct clk *clk, unsigned long rate) 1047 { 1048 return 0; 1049 } 1050 1051 static inline long clk_round_rate(struct clk *clk, unsigned long rate) 1052 { 1053 return 0; 1054 } 1055 1056 static inline bool clk_has_parent(struct clk *clk, struct clk *parent) 1057 { 1058 return true; 1059 } 1060 1061 static inline int clk_set_rate_range(struct clk *clk, unsigned long min, 1062 unsigned long max) 1063 { 1064 return 0; 1065 } 1066 1067 static inline int clk_set_min_rate(struct clk *clk, unsigned long rate) 1068 { 1069 return 0; 1070 } 1071 1072 static inline int clk_set_max_rate(struct clk *clk, unsigned long rate) 1073 { 1074 return 0; 1075 } 1076 1077 static inline int clk_set_parent(struct clk *clk, struct clk *parent) 1078 { 1079 return 0; 1080 } 1081 1082 static inline struct clk *clk_get_parent(struct clk *clk) 1083 { 1084 return NULL; 1085 } 1086 1087 static inline struct clk *clk_get_sys(const char *dev_id, const char *con_id) 1088 { 1089 return NULL; 1090 } 1091 1092 static inline int clk_save_context(void) 1093 { 1094 return 0; 1095 } 1096 1097 static inline void clk_restore_context(void) {} 1098 1099 #endif 1100 1101 /* clk_prepare_enable helps cases using clk_enable in non-atomic context. */ 1102 static inline int clk_prepare_enable(struct clk *clk) 1103 { 1104 int ret; 1105 1106 ret = clk_prepare(clk); 1107 if (ret) 1108 return ret; 1109 ret = clk_enable(clk); 1110 if (ret) 1111 clk_unprepare(clk); 1112 1113 return ret; 1114 } 1115 1116 /* clk_disable_unprepare helps cases using clk_disable in non-atomic context. */ 1117 static inline void clk_disable_unprepare(struct clk *clk) 1118 { 1119 clk_disable(clk); 1120 clk_unprepare(clk); 1121 } 1122 1123 static inline int __must_check 1124 clk_bulk_prepare_enable(int num_clks, const struct clk_bulk_data *clks) 1125 { 1126 int ret; 1127 1128 ret = clk_bulk_prepare(num_clks, clks); 1129 if (ret) 1130 return ret; 1131 ret = clk_bulk_enable(num_clks, clks); 1132 if (ret) 1133 clk_bulk_unprepare(num_clks, clks); 1134 1135 return ret; 1136 } 1137 1138 static inline void clk_bulk_disable_unprepare(int num_clks, 1139 const struct clk_bulk_data *clks) 1140 { 1141 clk_bulk_disable(num_clks, clks); 1142 clk_bulk_unprepare(num_clks, clks); 1143 } 1144 1145 /** 1146 * clk_drop_range - Reset any range set on that clock 1147 * @clk: clock source 1148 * 1149 * Returns success (0) or negative errno. 1150 */ 1151 static inline int clk_drop_range(struct clk *clk) 1152 { 1153 return clk_set_rate_range(clk, 0, ULONG_MAX); 1154 } 1155 1156 /** 1157 * clk_get_optional - lookup and obtain a reference to an optional clock 1158 * producer. 1159 * @dev: device for clock "consumer" 1160 * @id: clock consumer ID 1161 * 1162 * Behaves the same as clk_get() except where there is no clock producer. In 1163 * this case, instead of returning -ENOENT, the function returns NULL. 1164 */ 1165 static inline struct clk *clk_get_optional(struct device *dev, const char *id) 1166 { 1167 struct clk *clk = clk_get(dev, id); 1168 1169 if (clk == ERR_PTR(-ENOENT)) 1170 return NULL; 1171 1172 return clk; 1173 } 1174 1175 #if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK) 1176 struct clk *of_clk_get(struct device_node *np, int index); 1177 struct clk *of_clk_get_by_name(struct device_node *np, const char *name); 1178 struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec); 1179 #else 1180 static inline struct clk *of_clk_get(struct device_node *np, int index) 1181 { 1182 return ERR_PTR(-ENOENT); 1183 } 1184 static inline struct clk *of_clk_get_by_name(struct device_node *np, 1185 const char *name) 1186 { 1187 return ERR_PTR(-ENOENT); 1188 } 1189 static inline struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec) 1190 { 1191 return ERR_PTR(-ENOENT); 1192 } 1193 #endif 1194 1195 #endif 1196