xref: /linux-6.15/drivers/devfreq/devfreq.c (revision adfe3b76)
1 /*
2  * devfreq: Generic Dynamic Voltage and Frequency Scaling (DVFS) Framework
3  *	    for Non-CPU Devices.
4  *
5  * Copyright (C) 2011 Samsung Electronics
6  *	MyungJoo Ham <[email protected]>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12 
13 #include <linux/kernel.h>
14 #include <linux/kmod.h>
15 #include <linux/sched.h>
16 #include <linux/errno.h>
17 #include <linux/err.h>
18 #include <linux/init.h>
19 #include <linux/export.h>
20 #include <linux/slab.h>
21 #include <linux/stat.h>
22 #include <linux/pm_opp.h>
23 #include <linux/devfreq.h>
24 #include <linux/workqueue.h>
25 #include <linux/platform_device.h>
26 #include <linux/list.h>
27 #include <linux/printk.h>
28 #include <linux/hrtimer.h>
29 #include <linux/of.h>
30 #include "governor.h"
31 
32 static struct class *devfreq_class;
33 
34 /*
35  * devfreq core provides delayed work based load monitoring helper
36  * functions. Governors can use these or can implement their own
37  * monitoring mechanism.
38  */
39 static struct workqueue_struct *devfreq_wq;
40 
41 /* The list of all device-devfreq governors */
42 static LIST_HEAD(devfreq_governor_list);
43 /* The list of all device-devfreq */
44 static LIST_HEAD(devfreq_list);
45 static DEFINE_MUTEX(devfreq_list_lock);
46 
47 /**
48  * find_device_devfreq() - find devfreq struct using device pointer
49  * @dev:	device pointer used to lookup device devfreq.
50  *
51  * Search the list of device devfreqs and return the matched device's
52  * devfreq info. devfreq_list_lock should be held by the caller.
53  */
54 static struct devfreq *find_device_devfreq(struct device *dev)
55 {
56 	struct devfreq *tmp_devfreq;
57 
58 	if (IS_ERR_OR_NULL(dev)) {
59 		pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
60 		return ERR_PTR(-EINVAL);
61 	}
62 	WARN(!mutex_is_locked(&devfreq_list_lock),
63 	     "devfreq_list_lock must be locked.");
64 
65 	list_for_each_entry(tmp_devfreq, &devfreq_list, node) {
66 		if (tmp_devfreq->dev.parent == dev)
67 			return tmp_devfreq;
68 	}
69 
70 	return ERR_PTR(-ENODEV);
71 }
72 
73 static unsigned long find_available_min_freq(struct devfreq *devfreq)
74 {
75 	struct dev_pm_opp *opp;
76 	unsigned long min_freq = 0;
77 
78 	opp = dev_pm_opp_find_freq_ceil(devfreq->dev.parent, &min_freq);
79 	if (IS_ERR(opp))
80 		min_freq = 0;
81 	else
82 		dev_pm_opp_put(opp);
83 
84 	return min_freq;
85 }
86 
87 static unsigned long find_available_max_freq(struct devfreq *devfreq)
88 {
89 	struct dev_pm_opp *opp;
90 	unsigned long max_freq = ULONG_MAX;
91 
92 	opp = dev_pm_opp_find_freq_floor(devfreq->dev.parent, &max_freq);
93 	if (IS_ERR(opp))
94 		max_freq = 0;
95 	else
96 		dev_pm_opp_put(opp);
97 
98 	return max_freq;
99 }
100 
101 /**
102  * devfreq_get_freq_level() - Lookup freq_table for the frequency
103  * @devfreq:	the devfreq instance
104  * @freq:	the target frequency
105  */
106 static int devfreq_get_freq_level(struct devfreq *devfreq, unsigned long freq)
107 {
108 	int lev;
109 
110 	for (lev = 0; lev < devfreq->profile->max_state; lev++)
111 		if (freq == devfreq->profile->freq_table[lev])
112 			return lev;
113 
114 	return -EINVAL;
115 }
116 
117 static int set_freq_table(struct devfreq *devfreq)
118 {
119 	struct devfreq_dev_profile *profile = devfreq->profile;
120 	struct dev_pm_opp *opp;
121 	unsigned long freq;
122 	int i, count;
123 
124 	/* Initialize the freq_table from OPP table */
125 	count = dev_pm_opp_get_opp_count(devfreq->dev.parent);
126 	if (count <= 0)
127 		return -EINVAL;
128 
129 	profile->max_state = count;
130 	profile->freq_table = devm_kcalloc(devfreq->dev.parent,
131 					profile->max_state,
132 					sizeof(*profile->freq_table),
133 					GFP_KERNEL);
134 	if (!profile->freq_table) {
135 		profile->max_state = 0;
136 		return -ENOMEM;
137 	}
138 
139 	for (i = 0, freq = 0; i < profile->max_state; i++, freq++) {
140 		opp = dev_pm_opp_find_freq_ceil(devfreq->dev.parent, &freq);
141 		if (IS_ERR(opp)) {
142 			devm_kfree(devfreq->dev.parent, profile->freq_table);
143 			profile->max_state = 0;
144 			return PTR_ERR(opp);
145 		}
146 		dev_pm_opp_put(opp);
147 		profile->freq_table[i] = freq;
148 	}
149 
150 	return 0;
151 }
152 
153 /**
154  * devfreq_update_status() - Update statistics of devfreq behavior
155  * @devfreq:	the devfreq instance
156  * @freq:	the update target frequency
157  */
158 int devfreq_update_status(struct devfreq *devfreq, unsigned long freq)
159 {
160 	int lev, prev_lev, ret = 0;
161 	unsigned long cur_time;
162 
163 	cur_time = jiffies;
164 
165 	/* Immediately exit if previous_freq is not initialized yet. */
166 	if (!devfreq->previous_freq)
167 		goto out;
168 
169 	prev_lev = devfreq_get_freq_level(devfreq, devfreq->previous_freq);
170 	if (prev_lev < 0) {
171 		ret = prev_lev;
172 		goto out;
173 	}
174 
175 	devfreq->time_in_state[prev_lev] +=
176 			 cur_time - devfreq->last_stat_updated;
177 
178 	lev = devfreq_get_freq_level(devfreq, freq);
179 	if (lev < 0) {
180 		ret = lev;
181 		goto out;
182 	}
183 
184 	if (lev != prev_lev) {
185 		devfreq->trans_table[(prev_lev *
186 				devfreq->profile->max_state) + lev]++;
187 		devfreq->total_trans++;
188 	}
189 
190 out:
191 	devfreq->last_stat_updated = cur_time;
192 	return ret;
193 }
194 EXPORT_SYMBOL(devfreq_update_status);
195 
196 /**
197  * find_devfreq_governor() - find devfreq governor from name
198  * @name:	name of the governor
199  *
200  * Search the list of devfreq governors and return the matched
201  * governor's pointer. devfreq_list_lock should be held by the caller.
202  */
203 static struct devfreq_governor *find_devfreq_governor(const char *name)
204 {
205 	struct devfreq_governor *tmp_governor;
206 
207 	if (IS_ERR_OR_NULL(name)) {
208 		pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
209 		return ERR_PTR(-EINVAL);
210 	}
211 	WARN(!mutex_is_locked(&devfreq_list_lock),
212 	     "devfreq_list_lock must be locked.");
213 
214 	list_for_each_entry(tmp_governor, &devfreq_governor_list, node) {
215 		if (!strncmp(tmp_governor->name, name, DEVFREQ_NAME_LEN))
216 			return tmp_governor;
217 	}
218 
219 	return ERR_PTR(-ENODEV);
220 }
221 
222 /**
223  * try_then_request_governor() - Try to find the governor and request the
224  *                               module if is not found.
225  * @name:	name of the governor
226  *
227  * Search the list of devfreq governors and request the module and try again
228  * if is not found. This can happen when both drivers (the governor driver
229  * and the driver that call devfreq_add_device) are built as modules.
230  * devfreq_list_lock should be held by the caller. Returns the matched
231  * governor's pointer or an error pointer.
232  */
233 static struct devfreq_governor *try_then_request_governor(const char *name)
234 {
235 	struct devfreq_governor *governor;
236 	int err = 0;
237 
238 	if (IS_ERR_OR_NULL(name)) {
239 		pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
240 		return ERR_PTR(-EINVAL);
241 	}
242 	WARN(!mutex_is_locked(&devfreq_list_lock),
243 	     "devfreq_list_lock must be locked.");
244 
245 	governor = find_devfreq_governor(name);
246 	if (IS_ERR(governor)) {
247 		mutex_unlock(&devfreq_list_lock);
248 
249 		if (!strncmp(name, DEVFREQ_GOV_SIMPLE_ONDEMAND,
250 			     DEVFREQ_NAME_LEN))
251 			err = request_module("governor_%s", "simpleondemand");
252 		else
253 			err = request_module("governor_%s", name);
254 		/* Restore previous state before return */
255 		mutex_lock(&devfreq_list_lock);
256 		if (err)
257 			return ERR_PTR(err);
258 
259 		governor = find_devfreq_governor(name);
260 	}
261 
262 	return governor;
263 }
264 
265 static int devfreq_notify_transition(struct devfreq *devfreq,
266 		struct devfreq_freqs *freqs, unsigned int state)
267 {
268 	if (!devfreq)
269 		return -EINVAL;
270 
271 	switch (state) {
272 	case DEVFREQ_PRECHANGE:
273 		srcu_notifier_call_chain(&devfreq->transition_notifier_list,
274 				DEVFREQ_PRECHANGE, freqs);
275 		break;
276 
277 	case DEVFREQ_POSTCHANGE:
278 		srcu_notifier_call_chain(&devfreq->transition_notifier_list,
279 				DEVFREQ_POSTCHANGE, freqs);
280 		break;
281 	default:
282 		return -EINVAL;
283 	}
284 
285 	return 0;
286 }
287 
288 static int devfreq_set_target(struct devfreq *devfreq, unsigned long new_freq,
289 			      u32 flags)
290 {
291 	struct devfreq_freqs freqs;
292 	unsigned long cur_freq;
293 	int err = 0;
294 
295 	if (devfreq->profile->get_cur_freq)
296 		devfreq->profile->get_cur_freq(devfreq->dev.parent, &cur_freq);
297 	else
298 		cur_freq = devfreq->previous_freq;
299 
300 	freqs.old = cur_freq;
301 	freqs.new = new_freq;
302 	devfreq_notify_transition(devfreq, &freqs, DEVFREQ_PRECHANGE);
303 
304 	err = devfreq->profile->target(devfreq->dev.parent, &new_freq, flags);
305 	if (err) {
306 		freqs.new = cur_freq;
307 		devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE);
308 		return err;
309 	}
310 
311 	freqs.new = new_freq;
312 	devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE);
313 
314 	if (devfreq_update_status(devfreq, new_freq))
315 		dev_err(&devfreq->dev,
316 			"Couldn't update frequency transition information.\n");
317 
318 	devfreq->previous_freq = new_freq;
319 
320 	if (devfreq->suspend_freq)
321 		devfreq->resume_freq = cur_freq;
322 
323 	return err;
324 }
325 
326 /* Load monitoring helper functions for governors use */
327 
328 /**
329  * update_devfreq() - Reevaluate the device and configure frequency.
330  * @devfreq:	the devfreq instance.
331  *
332  * Note: Lock devfreq->lock before calling update_devfreq
333  *	 This function is exported for governors.
334  */
335 int update_devfreq(struct devfreq *devfreq)
336 {
337 	unsigned long freq, min_freq, max_freq;
338 	int err = 0;
339 	u32 flags = 0;
340 
341 	if (!mutex_is_locked(&devfreq->lock)) {
342 		WARN(true, "devfreq->lock must be locked by the caller.\n");
343 		return -EINVAL;
344 	}
345 
346 	if (!devfreq->governor)
347 		return -EINVAL;
348 
349 	/* Reevaluate the proper frequency */
350 	err = devfreq->governor->get_target_freq(devfreq, &freq);
351 	if (err)
352 		return err;
353 
354 	/*
355 	 * Adjust the frequency with user freq, QoS and available freq.
356 	 *
357 	 * List from the highest priority
358 	 * max_freq
359 	 * min_freq
360 	 */
361 	max_freq = min(devfreq->scaling_max_freq, devfreq->max_freq);
362 	min_freq = max(devfreq->scaling_min_freq, devfreq->min_freq);
363 
364 	if (freq < min_freq) {
365 		freq = min_freq;
366 		flags &= ~DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use GLB */
367 	}
368 	if (freq > max_freq) {
369 		freq = max_freq;
370 		flags |= DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use LUB */
371 	}
372 
373 	return devfreq_set_target(devfreq, freq, flags);
374 
375 }
376 EXPORT_SYMBOL(update_devfreq);
377 
378 /**
379  * devfreq_monitor() - Periodically poll devfreq objects.
380  * @work:	the work struct used to run devfreq_monitor periodically.
381  *
382  */
383 static void devfreq_monitor(struct work_struct *work)
384 {
385 	int err;
386 	struct devfreq *devfreq = container_of(work,
387 					struct devfreq, work.work);
388 
389 	mutex_lock(&devfreq->lock);
390 	err = update_devfreq(devfreq);
391 	if (err)
392 		dev_err(&devfreq->dev, "dvfs failed with (%d) error\n", err);
393 
394 	queue_delayed_work(devfreq_wq, &devfreq->work,
395 				msecs_to_jiffies(devfreq->profile->polling_ms));
396 	mutex_unlock(&devfreq->lock);
397 }
398 
399 /**
400  * devfreq_monitor_start() - Start load monitoring of devfreq instance
401  * @devfreq:	the devfreq instance.
402  *
403  * Helper function for starting devfreq device load monitoing. By
404  * default delayed work based monitoring is supported. Function
405  * to be called from governor in response to DEVFREQ_GOV_START
406  * event when device is added to devfreq framework.
407  */
408 void devfreq_monitor_start(struct devfreq *devfreq)
409 {
410 	INIT_DEFERRABLE_WORK(&devfreq->work, devfreq_monitor);
411 	if (devfreq->profile->polling_ms)
412 		queue_delayed_work(devfreq_wq, &devfreq->work,
413 			msecs_to_jiffies(devfreq->profile->polling_ms));
414 }
415 EXPORT_SYMBOL(devfreq_monitor_start);
416 
417 /**
418  * devfreq_monitor_stop() - Stop load monitoring of a devfreq instance
419  * @devfreq:	the devfreq instance.
420  *
421  * Helper function to stop devfreq device load monitoing. Function
422  * to be called from governor in response to DEVFREQ_GOV_STOP
423  * event when device is removed from devfreq framework.
424  */
425 void devfreq_monitor_stop(struct devfreq *devfreq)
426 {
427 	cancel_delayed_work_sync(&devfreq->work);
428 }
429 EXPORT_SYMBOL(devfreq_monitor_stop);
430 
431 /**
432  * devfreq_monitor_suspend() - Suspend load monitoring of a devfreq instance
433  * @devfreq:	the devfreq instance.
434  *
435  * Helper function to suspend devfreq device load monitoing. Function
436  * to be called from governor in response to DEVFREQ_GOV_SUSPEND
437  * event or when polling interval is set to zero.
438  *
439  * Note: Though this function is same as devfreq_monitor_stop(),
440  * intentionally kept separate to provide hooks for collecting
441  * transition statistics.
442  */
443 void devfreq_monitor_suspend(struct devfreq *devfreq)
444 {
445 	mutex_lock(&devfreq->lock);
446 	if (devfreq->stop_polling) {
447 		mutex_unlock(&devfreq->lock);
448 		return;
449 	}
450 
451 	devfreq_update_status(devfreq, devfreq->previous_freq);
452 	devfreq->stop_polling = true;
453 	mutex_unlock(&devfreq->lock);
454 	cancel_delayed_work_sync(&devfreq->work);
455 }
456 EXPORT_SYMBOL(devfreq_monitor_suspend);
457 
458 /**
459  * devfreq_monitor_resume() - Resume load monitoring of a devfreq instance
460  * @devfreq:    the devfreq instance.
461  *
462  * Helper function to resume devfreq device load monitoing. Function
463  * to be called from governor in response to DEVFREQ_GOV_RESUME
464  * event or when polling interval is set to non-zero.
465  */
466 void devfreq_monitor_resume(struct devfreq *devfreq)
467 {
468 	unsigned long freq;
469 
470 	mutex_lock(&devfreq->lock);
471 	if (!devfreq->stop_polling)
472 		goto out;
473 
474 	if (!delayed_work_pending(&devfreq->work) &&
475 			devfreq->profile->polling_ms)
476 		queue_delayed_work(devfreq_wq, &devfreq->work,
477 			msecs_to_jiffies(devfreq->profile->polling_ms));
478 
479 	devfreq->last_stat_updated = jiffies;
480 	devfreq->stop_polling = false;
481 
482 	if (devfreq->profile->get_cur_freq &&
483 		!devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq))
484 		devfreq->previous_freq = freq;
485 
486 out:
487 	mutex_unlock(&devfreq->lock);
488 }
489 EXPORT_SYMBOL(devfreq_monitor_resume);
490 
491 /**
492  * devfreq_interval_update() - Update device devfreq monitoring interval
493  * @devfreq:    the devfreq instance.
494  * @delay:      new polling interval to be set.
495  *
496  * Helper function to set new load monitoring polling interval. Function
497  * to be called from governor in response to DEVFREQ_GOV_INTERVAL event.
498  */
499 void devfreq_interval_update(struct devfreq *devfreq, unsigned int *delay)
500 {
501 	unsigned int cur_delay = devfreq->profile->polling_ms;
502 	unsigned int new_delay = *delay;
503 
504 	mutex_lock(&devfreq->lock);
505 	devfreq->profile->polling_ms = new_delay;
506 
507 	if (devfreq->stop_polling)
508 		goto out;
509 
510 	/* if new delay is zero, stop polling */
511 	if (!new_delay) {
512 		mutex_unlock(&devfreq->lock);
513 		cancel_delayed_work_sync(&devfreq->work);
514 		return;
515 	}
516 
517 	/* if current delay is zero, start polling with new delay */
518 	if (!cur_delay) {
519 		queue_delayed_work(devfreq_wq, &devfreq->work,
520 			msecs_to_jiffies(devfreq->profile->polling_ms));
521 		goto out;
522 	}
523 
524 	/* if current delay is greater than new delay, restart polling */
525 	if (cur_delay > new_delay) {
526 		mutex_unlock(&devfreq->lock);
527 		cancel_delayed_work_sync(&devfreq->work);
528 		mutex_lock(&devfreq->lock);
529 		if (!devfreq->stop_polling)
530 			queue_delayed_work(devfreq_wq, &devfreq->work,
531 				msecs_to_jiffies(devfreq->profile->polling_ms));
532 	}
533 out:
534 	mutex_unlock(&devfreq->lock);
535 }
536 EXPORT_SYMBOL(devfreq_interval_update);
537 
538 /**
539  * devfreq_notifier_call() - Notify that the device frequency requirements
540  *			     has been changed out of devfreq framework.
541  * @nb:		the notifier_block (supposed to be devfreq->nb)
542  * @type:	not used
543  * @devp:	not used
544  *
545  * Called by a notifier that uses devfreq->nb.
546  */
547 static int devfreq_notifier_call(struct notifier_block *nb, unsigned long type,
548 				 void *devp)
549 {
550 	struct devfreq *devfreq = container_of(nb, struct devfreq, nb);
551 	int ret;
552 
553 	mutex_lock(&devfreq->lock);
554 
555 	devfreq->scaling_min_freq = find_available_min_freq(devfreq);
556 	if (!devfreq->scaling_min_freq) {
557 		mutex_unlock(&devfreq->lock);
558 		return -EINVAL;
559 	}
560 
561 	devfreq->scaling_max_freq = find_available_max_freq(devfreq);
562 	if (!devfreq->scaling_max_freq) {
563 		mutex_unlock(&devfreq->lock);
564 		return -EINVAL;
565 	}
566 
567 	ret = update_devfreq(devfreq);
568 	mutex_unlock(&devfreq->lock);
569 
570 	return ret;
571 }
572 
573 /**
574  * devfreq_dev_release() - Callback for struct device to release the device.
575  * @dev:	the devfreq device
576  *
577  * Remove devfreq from the list and release its resources.
578  */
579 static void devfreq_dev_release(struct device *dev)
580 {
581 	struct devfreq *devfreq = to_devfreq(dev);
582 
583 	mutex_lock(&devfreq_list_lock);
584 	if (IS_ERR(find_device_devfreq(devfreq->dev.parent))) {
585 		mutex_unlock(&devfreq_list_lock);
586 		dev_warn(&devfreq->dev, "releasing devfreq which doesn't exist\n");
587 		return;
588 	}
589 	list_del(&devfreq->node);
590 	mutex_unlock(&devfreq_list_lock);
591 
592 	if (devfreq->profile->exit)
593 		devfreq->profile->exit(devfreq->dev.parent);
594 
595 	mutex_destroy(&devfreq->lock);
596 	kfree(devfreq);
597 }
598 
599 /**
600  * devfreq_add_device() - Add devfreq feature to the device
601  * @dev:	the device to add devfreq feature.
602  * @profile:	device-specific profile to run devfreq.
603  * @governor_name:	name of the policy to choose frequency.
604  * @data:	private data for the governor. The devfreq framework does not
605  *		touch this value.
606  */
607 struct devfreq *devfreq_add_device(struct device *dev,
608 				   struct devfreq_dev_profile *profile,
609 				   const char *governor_name,
610 				   void *data)
611 {
612 	struct devfreq *devfreq;
613 	struct devfreq_governor *governor;
614 	static atomic_t devfreq_no = ATOMIC_INIT(-1);
615 	int err = 0;
616 
617 	if (!dev || !profile || !governor_name) {
618 		dev_err(dev, "%s: Invalid parameters.\n", __func__);
619 		return ERR_PTR(-EINVAL);
620 	}
621 
622 	mutex_lock(&devfreq_list_lock);
623 	devfreq = find_device_devfreq(dev);
624 	mutex_unlock(&devfreq_list_lock);
625 	if (!IS_ERR(devfreq)) {
626 		dev_err(dev, "%s: Unable to create devfreq for the device.\n",
627 			__func__);
628 		err = -EINVAL;
629 		goto err_out;
630 	}
631 
632 	devfreq = kzalloc(sizeof(struct devfreq), GFP_KERNEL);
633 	if (!devfreq) {
634 		err = -ENOMEM;
635 		goto err_out;
636 	}
637 
638 	mutex_init(&devfreq->lock);
639 	mutex_lock(&devfreq->lock);
640 	devfreq->dev.parent = dev;
641 	devfreq->dev.class = devfreq_class;
642 	devfreq->dev.release = devfreq_dev_release;
643 	devfreq->profile = profile;
644 	strncpy(devfreq->governor_name, governor_name, DEVFREQ_NAME_LEN);
645 	devfreq->previous_freq = profile->initial_freq;
646 	devfreq->last_status.current_frequency = profile->initial_freq;
647 	devfreq->data = data;
648 	devfreq->nb.notifier_call = devfreq_notifier_call;
649 
650 	if (!devfreq->profile->max_state && !devfreq->profile->freq_table) {
651 		mutex_unlock(&devfreq->lock);
652 		err = set_freq_table(devfreq);
653 		if (err < 0)
654 			goto err_dev;
655 		mutex_lock(&devfreq->lock);
656 	}
657 
658 	devfreq->scaling_min_freq = find_available_min_freq(devfreq);
659 	if (!devfreq->scaling_min_freq) {
660 		mutex_unlock(&devfreq->lock);
661 		err = -EINVAL;
662 		goto err_dev;
663 	}
664 	devfreq->min_freq = devfreq->scaling_min_freq;
665 
666 	devfreq->scaling_max_freq = find_available_max_freq(devfreq);
667 	if (!devfreq->scaling_max_freq) {
668 		mutex_unlock(&devfreq->lock);
669 		err = -EINVAL;
670 		goto err_dev;
671 	}
672 	devfreq->max_freq = devfreq->scaling_max_freq;
673 
674 	devfreq->suspend_freq = dev_pm_opp_get_suspend_opp_freq(dev);
675 	atomic_set(&devfreq->suspend_count, 0);
676 
677 	dev_set_name(&devfreq->dev, "devfreq%d",
678 				atomic_inc_return(&devfreq_no));
679 	err = device_register(&devfreq->dev);
680 	if (err) {
681 		mutex_unlock(&devfreq->lock);
682 		put_device(&devfreq->dev);
683 		goto err_out;
684 	}
685 
686 	devfreq->trans_table = devm_kzalloc(&devfreq->dev,
687 			array3_size(sizeof(unsigned int),
688 				    devfreq->profile->max_state,
689 				    devfreq->profile->max_state),
690 			GFP_KERNEL);
691 	if (!devfreq->trans_table) {
692 		mutex_unlock(&devfreq->lock);
693 		err = -ENOMEM;
694 		goto err_devfreq;
695 	}
696 
697 	devfreq->time_in_state = devm_kcalloc(&devfreq->dev,
698 			devfreq->profile->max_state,
699 			sizeof(unsigned long),
700 			GFP_KERNEL);
701 	if (!devfreq->time_in_state) {
702 		mutex_unlock(&devfreq->lock);
703 		err = -ENOMEM;
704 		goto err_devfreq;
705 	}
706 
707 	devfreq->last_stat_updated = jiffies;
708 
709 	srcu_init_notifier_head(&devfreq->transition_notifier_list);
710 
711 	mutex_unlock(&devfreq->lock);
712 
713 	mutex_lock(&devfreq_list_lock);
714 
715 	governor = try_then_request_governor(devfreq->governor_name);
716 	if (IS_ERR(governor)) {
717 		dev_err(dev, "%s: Unable to find governor for the device\n",
718 			__func__);
719 		err = PTR_ERR(governor);
720 		goto err_init;
721 	}
722 
723 	devfreq->governor = governor;
724 	err = devfreq->governor->event_handler(devfreq, DEVFREQ_GOV_START,
725 						NULL);
726 	if (err) {
727 		dev_err(dev, "%s: Unable to start governor for the device\n",
728 			__func__);
729 		goto err_init;
730 	}
731 
732 	list_add(&devfreq->node, &devfreq_list);
733 
734 	mutex_unlock(&devfreq_list_lock);
735 
736 	return devfreq;
737 
738 err_init:
739 	mutex_unlock(&devfreq_list_lock);
740 err_devfreq:
741 	devfreq_remove_device(devfreq);
742 	devfreq = NULL;
743 err_dev:
744 	kfree(devfreq);
745 err_out:
746 	return ERR_PTR(err);
747 }
748 EXPORT_SYMBOL(devfreq_add_device);
749 
750 /**
751  * devfreq_remove_device() - Remove devfreq feature from a device.
752  * @devfreq:	the devfreq instance to be removed
753  *
754  * The opposite of devfreq_add_device().
755  */
756 int devfreq_remove_device(struct devfreq *devfreq)
757 {
758 	if (!devfreq)
759 		return -EINVAL;
760 
761 	if (devfreq->governor)
762 		devfreq->governor->event_handler(devfreq,
763 						 DEVFREQ_GOV_STOP, NULL);
764 	device_unregister(&devfreq->dev);
765 
766 	return 0;
767 }
768 EXPORT_SYMBOL(devfreq_remove_device);
769 
770 static int devm_devfreq_dev_match(struct device *dev, void *res, void *data)
771 {
772 	struct devfreq **r = res;
773 
774 	if (WARN_ON(!r || !*r))
775 		return 0;
776 
777 	return *r == data;
778 }
779 
780 static void devm_devfreq_dev_release(struct device *dev, void *res)
781 {
782 	devfreq_remove_device(*(struct devfreq **)res);
783 }
784 
785 /**
786  * devm_devfreq_add_device() - Resource-managed devfreq_add_device()
787  * @dev:	the device to add devfreq feature.
788  * @profile:	device-specific profile to run devfreq.
789  * @governor_name:	name of the policy to choose frequency.
790  * @data:	private data for the governor. The devfreq framework does not
791  *		touch this value.
792  *
793  * This function manages automatically the memory of devfreq device using device
794  * resource management and simplify the free operation for memory of devfreq
795  * device.
796  */
797 struct devfreq *devm_devfreq_add_device(struct device *dev,
798 					struct devfreq_dev_profile *profile,
799 					const char *governor_name,
800 					void *data)
801 {
802 	struct devfreq **ptr, *devfreq;
803 
804 	ptr = devres_alloc(devm_devfreq_dev_release, sizeof(*ptr), GFP_KERNEL);
805 	if (!ptr)
806 		return ERR_PTR(-ENOMEM);
807 
808 	devfreq = devfreq_add_device(dev, profile, governor_name, data);
809 	if (IS_ERR(devfreq)) {
810 		devres_free(ptr);
811 		return devfreq;
812 	}
813 
814 	*ptr = devfreq;
815 	devres_add(dev, ptr);
816 
817 	return devfreq;
818 }
819 EXPORT_SYMBOL(devm_devfreq_add_device);
820 
821 #ifdef CONFIG_OF
822 /*
823  * devfreq_get_devfreq_by_phandle - Get the devfreq device from devicetree
824  * @dev - instance to the given device
825  * @index - index into list of devfreq
826  *
827  * return the instance of devfreq device
828  */
829 struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, int index)
830 {
831 	struct device_node *node;
832 	struct devfreq *devfreq;
833 
834 	if (!dev)
835 		return ERR_PTR(-EINVAL);
836 
837 	if (!dev->of_node)
838 		return ERR_PTR(-EINVAL);
839 
840 	node = of_parse_phandle(dev->of_node, "devfreq", index);
841 	if (!node)
842 		return ERR_PTR(-ENODEV);
843 
844 	mutex_lock(&devfreq_list_lock);
845 	list_for_each_entry(devfreq, &devfreq_list, node) {
846 		if (devfreq->dev.parent
847 			&& devfreq->dev.parent->of_node == node) {
848 			mutex_unlock(&devfreq_list_lock);
849 			of_node_put(node);
850 			return devfreq;
851 		}
852 	}
853 	mutex_unlock(&devfreq_list_lock);
854 	of_node_put(node);
855 
856 	return ERR_PTR(-EPROBE_DEFER);
857 }
858 #else
859 struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, int index)
860 {
861 	return ERR_PTR(-ENODEV);
862 }
863 #endif /* CONFIG_OF */
864 EXPORT_SYMBOL_GPL(devfreq_get_devfreq_by_phandle);
865 
866 /**
867  * devm_devfreq_remove_device() - Resource-managed devfreq_remove_device()
868  * @dev:	the device to add devfreq feature.
869  * @devfreq:	the devfreq instance to be removed
870  */
871 void devm_devfreq_remove_device(struct device *dev, struct devfreq *devfreq)
872 {
873 	WARN_ON(devres_release(dev, devm_devfreq_dev_release,
874 			       devm_devfreq_dev_match, devfreq));
875 }
876 EXPORT_SYMBOL(devm_devfreq_remove_device);
877 
878 /**
879  * devfreq_suspend_device() - Suspend devfreq of a device.
880  * @devfreq: the devfreq instance to be suspended
881  *
882  * This function is intended to be called by the pm callbacks
883  * (e.g., runtime_suspend, suspend) of the device driver that
884  * holds the devfreq.
885  */
886 int devfreq_suspend_device(struct devfreq *devfreq)
887 {
888 	int ret;
889 
890 	if (!devfreq)
891 		return -EINVAL;
892 
893 	if (atomic_inc_return(&devfreq->suspend_count) > 1)
894 		return 0;
895 
896 	if (devfreq->governor) {
897 		ret = devfreq->governor->event_handler(devfreq,
898 					DEVFREQ_GOV_SUSPEND, NULL);
899 		if (ret)
900 			return ret;
901 	}
902 
903 	if (devfreq->suspend_freq) {
904 		ret = devfreq_set_target(devfreq, devfreq->suspend_freq, 0);
905 		if (ret)
906 			return ret;
907 	}
908 
909 	return 0;
910 }
911 EXPORT_SYMBOL(devfreq_suspend_device);
912 
913 /**
914  * devfreq_resume_device() - Resume devfreq of a device.
915  * @devfreq: the devfreq instance to be resumed
916  *
917  * This function is intended to be called by the pm callbacks
918  * (e.g., runtime_resume, resume) of the device driver that
919  * holds the devfreq.
920  */
921 int devfreq_resume_device(struct devfreq *devfreq)
922 {
923 	int ret;
924 
925 	if (!devfreq)
926 		return -EINVAL;
927 
928 	if (atomic_dec_return(&devfreq->suspend_count) >= 1)
929 		return 0;
930 
931 	if (devfreq->resume_freq) {
932 		ret = devfreq_set_target(devfreq, devfreq->resume_freq, 0);
933 		if (ret)
934 			return ret;
935 	}
936 
937 	if (devfreq->governor) {
938 		ret = devfreq->governor->event_handler(devfreq,
939 					DEVFREQ_GOV_RESUME, NULL);
940 		if (ret)
941 			return ret;
942 	}
943 
944 	return 0;
945 }
946 EXPORT_SYMBOL(devfreq_resume_device);
947 
948 /**
949  * devfreq_suspend() - Suspend devfreq governors and devices
950  *
951  * Called during system wide Suspend/Hibernate cycles for suspending governors
952  * and devices preserving the state for resume. On some platforms the devfreq
953  * device must have precise state (frequency) after resume in order to provide
954  * fully operating setup.
955  */
956 void devfreq_suspend(void)
957 {
958 	struct devfreq *devfreq;
959 	int ret;
960 
961 	mutex_lock(&devfreq_list_lock);
962 	list_for_each_entry(devfreq, &devfreq_list, node) {
963 		ret = devfreq_suspend_device(devfreq);
964 		if (ret)
965 			dev_err(&devfreq->dev,
966 				"failed to suspend devfreq device\n");
967 	}
968 	mutex_unlock(&devfreq_list_lock);
969 }
970 
971 /**
972  * devfreq_resume() - Resume devfreq governors and devices
973  *
974  * Called during system wide Suspend/Hibernate cycle for resuming governors and
975  * devices that are suspended with devfreq_suspend().
976  */
977 void devfreq_resume(void)
978 {
979 	struct devfreq *devfreq;
980 	int ret;
981 
982 	mutex_lock(&devfreq_list_lock);
983 	list_for_each_entry(devfreq, &devfreq_list, node) {
984 		ret = devfreq_resume_device(devfreq);
985 		if (ret)
986 			dev_warn(&devfreq->dev,
987 				 "failed to resume devfreq device\n");
988 	}
989 	mutex_unlock(&devfreq_list_lock);
990 }
991 
992 /**
993  * devfreq_add_governor() - Add devfreq governor
994  * @governor:	the devfreq governor to be added
995  */
996 int devfreq_add_governor(struct devfreq_governor *governor)
997 {
998 	struct devfreq_governor *g;
999 	struct devfreq *devfreq;
1000 	int err = 0;
1001 
1002 	if (!governor) {
1003 		pr_err("%s: Invalid parameters.\n", __func__);
1004 		return -EINVAL;
1005 	}
1006 
1007 	mutex_lock(&devfreq_list_lock);
1008 	g = find_devfreq_governor(governor->name);
1009 	if (!IS_ERR(g)) {
1010 		pr_err("%s: governor %s already registered\n", __func__,
1011 		       g->name);
1012 		err = -EINVAL;
1013 		goto err_out;
1014 	}
1015 
1016 	list_add(&governor->node, &devfreq_governor_list);
1017 
1018 	list_for_each_entry(devfreq, &devfreq_list, node) {
1019 		int ret = 0;
1020 		struct device *dev = devfreq->dev.parent;
1021 
1022 		if (!strncmp(devfreq->governor_name, governor->name,
1023 			     DEVFREQ_NAME_LEN)) {
1024 			/* The following should never occur */
1025 			if (devfreq->governor) {
1026 				dev_warn(dev,
1027 					 "%s: Governor %s already present\n",
1028 					 __func__, devfreq->governor->name);
1029 				ret = devfreq->governor->event_handler(devfreq,
1030 							DEVFREQ_GOV_STOP, NULL);
1031 				if (ret) {
1032 					dev_warn(dev,
1033 						 "%s: Governor %s stop = %d\n",
1034 						 __func__,
1035 						 devfreq->governor->name, ret);
1036 				}
1037 				/* Fall through */
1038 			}
1039 			devfreq->governor = governor;
1040 			ret = devfreq->governor->event_handler(devfreq,
1041 						DEVFREQ_GOV_START, NULL);
1042 			if (ret) {
1043 				dev_warn(dev, "%s: Governor %s start=%d\n",
1044 					 __func__, devfreq->governor->name,
1045 					 ret);
1046 			}
1047 		}
1048 	}
1049 
1050 err_out:
1051 	mutex_unlock(&devfreq_list_lock);
1052 
1053 	return err;
1054 }
1055 EXPORT_SYMBOL(devfreq_add_governor);
1056 
1057 /**
1058  * devfreq_remove_governor() - Remove devfreq feature from a device.
1059  * @governor:	the devfreq governor to be removed
1060  */
1061 int devfreq_remove_governor(struct devfreq_governor *governor)
1062 {
1063 	struct devfreq_governor *g;
1064 	struct devfreq *devfreq;
1065 	int err = 0;
1066 
1067 	if (!governor) {
1068 		pr_err("%s: Invalid parameters.\n", __func__);
1069 		return -EINVAL;
1070 	}
1071 
1072 	mutex_lock(&devfreq_list_lock);
1073 	g = find_devfreq_governor(governor->name);
1074 	if (IS_ERR(g)) {
1075 		pr_err("%s: governor %s not registered\n", __func__,
1076 		       governor->name);
1077 		err = PTR_ERR(g);
1078 		goto err_out;
1079 	}
1080 	list_for_each_entry(devfreq, &devfreq_list, node) {
1081 		int ret;
1082 		struct device *dev = devfreq->dev.parent;
1083 
1084 		if (!strncmp(devfreq->governor_name, governor->name,
1085 			     DEVFREQ_NAME_LEN)) {
1086 			/* we should have a devfreq governor! */
1087 			if (!devfreq->governor) {
1088 				dev_warn(dev, "%s: Governor %s NOT present\n",
1089 					 __func__, governor->name);
1090 				continue;
1091 				/* Fall through */
1092 			}
1093 			ret = devfreq->governor->event_handler(devfreq,
1094 						DEVFREQ_GOV_STOP, NULL);
1095 			if (ret) {
1096 				dev_warn(dev, "%s: Governor %s stop=%d\n",
1097 					 __func__, devfreq->governor->name,
1098 					 ret);
1099 			}
1100 			devfreq->governor = NULL;
1101 		}
1102 	}
1103 
1104 	list_del(&governor->node);
1105 err_out:
1106 	mutex_unlock(&devfreq_list_lock);
1107 
1108 	return err;
1109 }
1110 EXPORT_SYMBOL(devfreq_remove_governor);
1111 
1112 static ssize_t governor_show(struct device *dev,
1113 			     struct device_attribute *attr, char *buf)
1114 {
1115 	if (!to_devfreq(dev)->governor)
1116 		return -EINVAL;
1117 
1118 	return sprintf(buf, "%s\n", to_devfreq(dev)->governor->name);
1119 }
1120 
1121 static ssize_t governor_store(struct device *dev, struct device_attribute *attr,
1122 			      const char *buf, size_t count)
1123 {
1124 	struct devfreq *df = to_devfreq(dev);
1125 	int ret;
1126 	char str_governor[DEVFREQ_NAME_LEN + 1];
1127 	const struct devfreq_governor *governor, *prev_governor;
1128 
1129 	ret = sscanf(buf, "%" __stringify(DEVFREQ_NAME_LEN) "s", str_governor);
1130 	if (ret != 1)
1131 		return -EINVAL;
1132 
1133 	mutex_lock(&devfreq_list_lock);
1134 	governor = try_then_request_governor(str_governor);
1135 	if (IS_ERR(governor)) {
1136 		ret = PTR_ERR(governor);
1137 		goto out;
1138 	}
1139 	if (df->governor == governor) {
1140 		ret = 0;
1141 		goto out;
1142 	} else if ((df->governor && df->governor->immutable) ||
1143 					governor->immutable) {
1144 		ret = -EINVAL;
1145 		goto out;
1146 	}
1147 
1148 	if (df->governor) {
1149 		ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL);
1150 		if (ret) {
1151 			dev_warn(dev, "%s: Governor %s not stopped(%d)\n",
1152 				 __func__, df->governor->name, ret);
1153 			goto out;
1154 		}
1155 	}
1156 	prev_governor = df->governor;
1157 	df->governor = governor;
1158 	strncpy(df->governor_name, governor->name, DEVFREQ_NAME_LEN);
1159 	ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL);
1160 	if (ret) {
1161 		dev_warn(dev, "%s: Governor %s not started(%d)\n",
1162 			 __func__, df->governor->name, ret);
1163 		df->governor = prev_governor;
1164 		strncpy(df->governor_name, prev_governor->name,
1165 			DEVFREQ_NAME_LEN);
1166 		ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL);
1167 		if (ret) {
1168 			dev_err(dev,
1169 				"%s: reverting to Governor %s failed (%d)\n",
1170 				__func__, df->governor_name, ret);
1171 			df->governor = NULL;
1172 		}
1173 	}
1174 out:
1175 	mutex_unlock(&devfreq_list_lock);
1176 
1177 	if (!ret)
1178 		ret = count;
1179 	return ret;
1180 }
1181 static DEVICE_ATTR_RW(governor);
1182 
1183 static ssize_t available_governors_show(struct device *d,
1184 					struct device_attribute *attr,
1185 					char *buf)
1186 {
1187 	struct devfreq *df = to_devfreq(d);
1188 	ssize_t count = 0;
1189 
1190 	mutex_lock(&devfreq_list_lock);
1191 
1192 	/*
1193 	 * The devfreq with immutable governor (e.g., passive) shows
1194 	 * only own governor.
1195 	 */
1196 	if (df->governor->immutable) {
1197 		count = scnprintf(&buf[count], DEVFREQ_NAME_LEN,
1198 				  "%s ", df->governor_name);
1199 	/*
1200 	 * The devfreq device shows the registered governor except for
1201 	 * immutable governors such as passive governor .
1202 	 */
1203 	} else {
1204 		struct devfreq_governor *governor;
1205 
1206 		list_for_each_entry(governor, &devfreq_governor_list, node) {
1207 			if (governor->immutable)
1208 				continue;
1209 			count += scnprintf(&buf[count], (PAGE_SIZE - count - 2),
1210 					   "%s ", governor->name);
1211 		}
1212 	}
1213 
1214 	mutex_unlock(&devfreq_list_lock);
1215 
1216 	/* Truncate the trailing space */
1217 	if (count)
1218 		count--;
1219 
1220 	count += sprintf(&buf[count], "\n");
1221 
1222 	return count;
1223 }
1224 static DEVICE_ATTR_RO(available_governors);
1225 
1226 static ssize_t cur_freq_show(struct device *dev, struct device_attribute *attr,
1227 			     char *buf)
1228 {
1229 	unsigned long freq;
1230 	struct devfreq *devfreq = to_devfreq(dev);
1231 
1232 	if (devfreq->profile->get_cur_freq &&
1233 		!devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq))
1234 		return sprintf(buf, "%lu\n", freq);
1235 
1236 	return sprintf(buf, "%lu\n", devfreq->previous_freq);
1237 }
1238 static DEVICE_ATTR_RO(cur_freq);
1239 
1240 static ssize_t target_freq_show(struct device *dev,
1241 				struct device_attribute *attr, char *buf)
1242 {
1243 	return sprintf(buf, "%lu\n", to_devfreq(dev)->previous_freq);
1244 }
1245 static DEVICE_ATTR_RO(target_freq);
1246 
1247 static ssize_t polling_interval_show(struct device *dev,
1248 				     struct device_attribute *attr, char *buf)
1249 {
1250 	return sprintf(buf, "%d\n", to_devfreq(dev)->profile->polling_ms);
1251 }
1252 
1253 static ssize_t polling_interval_store(struct device *dev,
1254 				      struct device_attribute *attr,
1255 				      const char *buf, size_t count)
1256 {
1257 	struct devfreq *df = to_devfreq(dev);
1258 	unsigned int value;
1259 	int ret;
1260 
1261 	if (!df->governor)
1262 		return -EINVAL;
1263 
1264 	ret = sscanf(buf, "%u", &value);
1265 	if (ret != 1)
1266 		return -EINVAL;
1267 
1268 	df->governor->event_handler(df, DEVFREQ_GOV_INTERVAL, &value);
1269 	ret = count;
1270 
1271 	return ret;
1272 }
1273 static DEVICE_ATTR_RW(polling_interval);
1274 
1275 static ssize_t min_freq_store(struct device *dev, struct device_attribute *attr,
1276 			      const char *buf, size_t count)
1277 {
1278 	struct devfreq *df = to_devfreq(dev);
1279 	unsigned long value;
1280 	int ret;
1281 
1282 	ret = sscanf(buf, "%lu", &value);
1283 	if (ret != 1)
1284 		return -EINVAL;
1285 
1286 	mutex_lock(&df->lock);
1287 
1288 	if (value) {
1289 		if (value > df->max_freq) {
1290 			ret = -EINVAL;
1291 			goto unlock;
1292 		}
1293 	} else {
1294 		unsigned long *freq_table = df->profile->freq_table;
1295 
1296 		/* Get minimum frequency according to sorting order */
1297 		if (freq_table[0] < freq_table[df->profile->max_state - 1])
1298 			value = freq_table[0];
1299 		else
1300 			value = freq_table[df->profile->max_state - 1];
1301 	}
1302 
1303 	df->min_freq = value;
1304 	update_devfreq(df);
1305 	ret = count;
1306 unlock:
1307 	mutex_unlock(&df->lock);
1308 	return ret;
1309 }
1310 
1311 static ssize_t min_freq_show(struct device *dev, struct device_attribute *attr,
1312 			     char *buf)
1313 {
1314 	struct devfreq *df = to_devfreq(dev);
1315 
1316 	return sprintf(buf, "%lu\n", max(df->scaling_min_freq, df->min_freq));
1317 }
1318 
1319 static ssize_t max_freq_store(struct device *dev, struct device_attribute *attr,
1320 			      const char *buf, size_t count)
1321 {
1322 	struct devfreq *df = to_devfreq(dev);
1323 	unsigned long value;
1324 	int ret;
1325 
1326 	ret = sscanf(buf, "%lu", &value);
1327 	if (ret != 1)
1328 		return -EINVAL;
1329 
1330 	mutex_lock(&df->lock);
1331 
1332 	if (value) {
1333 		if (value < df->min_freq) {
1334 			ret = -EINVAL;
1335 			goto unlock;
1336 		}
1337 	} else {
1338 		unsigned long *freq_table = df->profile->freq_table;
1339 
1340 		/* Get maximum frequency according to sorting order */
1341 		if (freq_table[0] < freq_table[df->profile->max_state - 1])
1342 			value = freq_table[df->profile->max_state - 1];
1343 		else
1344 			value = freq_table[0];
1345 	}
1346 
1347 	df->max_freq = value;
1348 	update_devfreq(df);
1349 	ret = count;
1350 unlock:
1351 	mutex_unlock(&df->lock);
1352 	return ret;
1353 }
1354 static DEVICE_ATTR_RW(min_freq);
1355 
1356 static ssize_t max_freq_show(struct device *dev, struct device_attribute *attr,
1357 			     char *buf)
1358 {
1359 	struct devfreq *df = to_devfreq(dev);
1360 
1361 	return sprintf(buf, "%lu\n", min(df->scaling_max_freq, df->max_freq));
1362 }
1363 static DEVICE_ATTR_RW(max_freq);
1364 
1365 static ssize_t available_frequencies_show(struct device *d,
1366 					  struct device_attribute *attr,
1367 					  char *buf)
1368 {
1369 	struct devfreq *df = to_devfreq(d);
1370 	ssize_t count = 0;
1371 	int i;
1372 
1373 	mutex_lock(&df->lock);
1374 
1375 	for (i = 0; i < df->profile->max_state; i++)
1376 		count += scnprintf(&buf[count], (PAGE_SIZE - count - 2),
1377 				"%lu ", df->profile->freq_table[i]);
1378 
1379 	mutex_unlock(&df->lock);
1380 	/* Truncate the trailing space */
1381 	if (count)
1382 		count--;
1383 
1384 	count += sprintf(&buf[count], "\n");
1385 
1386 	return count;
1387 }
1388 static DEVICE_ATTR_RO(available_frequencies);
1389 
1390 static ssize_t trans_stat_show(struct device *dev,
1391 			       struct device_attribute *attr, char *buf)
1392 {
1393 	struct devfreq *devfreq = to_devfreq(dev);
1394 	ssize_t len;
1395 	int i, j;
1396 	unsigned int max_state = devfreq->profile->max_state;
1397 
1398 	if (!devfreq->stop_polling &&
1399 			devfreq_update_status(devfreq, devfreq->previous_freq))
1400 		return 0;
1401 	if (max_state == 0)
1402 		return sprintf(buf, "Not Supported.\n");
1403 
1404 	len = sprintf(buf, "     From  :   To\n");
1405 	len += sprintf(buf + len, "           :");
1406 	for (i = 0; i < max_state; i++)
1407 		len += sprintf(buf + len, "%10lu",
1408 				devfreq->profile->freq_table[i]);
1409 
1410 	len += sprintf(buf + len, "   time(ms)\n");
1411 
1412 	for (i = 0; i < max_state; i++) {
1413 		if (devfreq->profile->freq_table[i]
1414 					== devfreq->previous_freq) {
1415 			len += sprintf(buf + len, "*");
1416 		} else {
1417 			len += sprintf(buf + len, " ");
1418 		}
1419 		len += sprintf(buf + len, "%10lu:",
1420 				devfreq->profile->freq_table[i]);
1421 		for (j = 0; j < max_state; j++)
1422 			len += sprintf(buf + len, "%10u",
1423 				devfreq->trans_table[(i * max_state) + j]);
1424 		len += sprintf(buf + len, "%10u\n",
1425 			jiffies_to_msecs(devfreq->time_in_state[i]));
1426 	}
1427 
1428 	len += sprintf(buf + len, "Total transition : %u\n",
1429 					devfreq->total_trans);
1430 	return len;
1431 }
1432 static DEVICE_ATTR_RO(trans_stat);
1433 
1434 static struct attribute *devfreq_attrs[] = {
1435 	&dev_attr_governor.attr,
1436 	&dev_attr_available_governors.attr,
1437 	&dev_attr_cur_freq.attr,
1438 	&dev_attr_available_frequencies.attr,
1439 	&dev_attr_target_freq.attr,
1440 	&dev_attr_polling_interval.attr,
1441 	&dev_attr_min_freq.attr,
1442 	&dev_attr_max_freq.attr,
1443 	&dev_attr_trans_stat.attr,
1444 	NULL,
1445 };
1446 ATTRIBUTE_GROUPS(devfreq);
1447 
1448 static int __init devfreq_init(void)
1449 {
1450 	devfreq_class = class_create(THIS_MODULE, "devfreq");
1451 	if (IS_ERR(devfreq_class)) {
1452 		pr_err("%s: couldn't create class\n", __FILE__);
1453 		return PTR_ERR(devfreq_class);
1454 	}
1455 
1456 	devfreq_wq = create_freezable_workqueue("devfreq_wq");
1457 	if (!devfreq_wq) {
1458 		class_destroy(devfreq_class);
1459 		pr_err("%s: couldn't create workqueue\n", __FILE__);
1460 		return -ENOMEM;
1461 	}
1462 	devfreq_class->dev_groups = devfreq_groups;
1463 
1464 	return 0;
1465 }
1466 subsys_initcall(devfreq_init);
1467 
1468 /*
1469  * The following are helper functions for devfreq user device drivers with
1470  * OPP framework.
1471  */
1472 
1473 /**
1474  * devfreq_recommended_opp() - Helper function to get proper OPP for the
1475  *			     freq value given to target callback.
1476  * @dev:	The devfreq user device. (parent of devfreq)
1477  * @freq:	The frequency given to target function
1478  * @flags:	Flags handed from devfreq framework.
1479  *
1480  * The callers are required to call dev_pm_opp_put() for the returned OPP after
1481  * use.
1482  */
1483 struct dev_pm_opp *devfreq_recommended_opp(struct device *dev,
1484 					   unsigned long *freq,
1485 					   u32 flags)
1486 {
1487 	struct dev_pm_opp *opp;
1488 
1489 	if (flags & DEVFREQ_FLAG_LEAST_UPPER_BOUND) {
1490 		/* The freq is an upper bound. opp should be lower */
1491 		opp = dev_pm_opp_find_freq_floor(dev, freq);
1492 
1493 		/* If not available, use the closest opp */
1494 		if (opp == ERR_PTR(-ERANGE))
1495 			opp = dev_pm_opp_find_freq_ceil(dev, freq);
1496 	} else {
1497 		/* The freq is an lower bound. opp should be higher */
1498 		opp = dev_pm_opp_find_freq_ceil(dev, freq);
1499 
1500 		/* If not available, use the closest opp */
1501 		if (opp == ERR_PTR(-ERANGE))
1502 			opp = dev_pm_opp_find_freq_floor(dev, freq);
1503 	}
1504 
1505 	return opp;
1506 }
1507 EXPORT_SYMBOL(devfreq_recommended_opp);
1508 
1509 /**
1510  * devfreq_register_opp_notifier() - Helper function to get devfreq notified
1511  *				     for any changes in the OPP availability
1512  *				     changes
1513  * @dev:	The devfreq user device. (parent of devfreq)
1514  * @devfreq:	The devfreq object.
1515  */
1516 int devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq)
1517 {
1518 	return dev_pm_opp_register_notifier(dev, &devfreq->nb);
1519 }
1520 EXPORT_SYMBOL(devfreq_register_opp_notifier);
1521 
1522 /**
1523  * devfreq_unregister_opp_notifier() - Helper function to stop getting devfreq
1524  *				       notified for any changes in the OPP
1525  *				       availability changes anymore.
1526  * @dev:	The devfreq user device. (parent of devfreq)
1527  * @devfreq:	The devfreq object.
1528  *
1529  * At exit() callback of devfreq_dev_profile, this must be included if
1530  * devfreq_recommended_opp is used.
1531  */
1532 int devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq)
1533 {
1534 	return dev_pm_opp_unregister_notifier(dev, &devfreq->nb);
1535 }
1536 EXPORT_SYMBOL(devfreq_unregister_opp_notifier);
1537 
1538 static void devm_devfreq_opp_release(struct device *dev, void *res)
1539 {
1540 	devfreq_unregister_opp_notifier(dev, *(struct devfreq **)res);
1541 }
1542 
1543 /**
1544  * devm_devfreq_register_opp_notifier() - Resource-managed
1545  *					  devfreq_register_opp_notifier()
1546  * @dev:	The devfreq user device. (parent of devfreq)
1547  * @devfreq:	The devfreq object.
1548  */
1549 int devm_devfreq_register_opp_notifier(struct device *dev,
1550 				       struct devfreq *devfreq)
1551 {
1552 	struct devfreq **ptr;
1553 	int ret;
1554 
1555 	ptr = devres_alloc(devm_devfreq_opp_release, sizeof(*ptr), GFP_KERNEL);
1556 	if (!ptr)
1557 		return -ENOMEM;
1558 
1559 	ret = devfreq_register_opp_notifier(dev, devfreq);
1560 	if (ret) {
1561 		devres_free(ptr);
1562 		return ret;
1563 	}
1564 
1565 	*ptr = devfreq;
1566 	devres_add(dev, ptr);
1567 
1568 	return 0;
1569 }
1570 EXPORT_SYMBOL(devm_devfreq_register_opp_notifier);
1571 
1572 /**
1573  * devm_devfreq_unregister_opp_notifier() - Resource-managed
1574  *					    devfreq_unregister_opp_notifier()
1575  * @dev:	The devfreq user device. (parent of devfreq)
1576  * @devfreq:	The devfreq object.
1577  */
1578 void devm_devfreq_unregister_opp_notifier(struct device *dev,
1579 					 struct devfreq *devfreq)
1580 {
1581 	WARN_ON(devres_release(dev, devm_devfreq_opp_release,
1582 			       devm_devfreq_dev_match, devfreq));
1583 }
1584 EXPORT_SYMBOL(devm_devfreq_unregister_opp_notifier);
1585 
1586 /**
1587  * devfreq_register_notifier() - Register a driver with devfreq
1588  * @devfreq:	The devfreq object.
1589  * @nb:		The notifier block to register.
1590  * @list:	DEVFREQ_TRANSITION_NOTIFIER.
1591  */
1592 int devfreq_register_notifier(struct devfreq *devfreq,
1593 			      struct notifier_block *nb,
1594 			      unsigned int list)
1595 {
1596 	int ret = 0;
1597 
1598 	if (!devfreq)
1599 		return -EINVAL;
1600 
1601 	switch (list) {
1602 	case DEVFREQ_TRANSITION_NOTIFIER:
1603 		ret = srcu_notifier_chain_register(
1604 				&devfreq->transition_notifier_list, nb);
1605 		break;
1606 	default:
1607 		ret = -EINVAL;
1608 	}
1609 
1610 	return ret;
1611 }
1612 EXPORT_SYMBOL(devfreq_register_notifier);
1613 
1614 /*
1615  * devfreq_unregister_notifier() - Unregister a driver with devfreq
1616  * @devfreq:	The devfreq object.
1617  * @nb:		The notifier block to be unregistered.
1618  * @list:	DEVFREQ_TRANSITION_NOTIFIER.
1619  */
1620 int devfreq_unregister_notifier(struct devfreq *devfreq,
1621 				struct notifier_block *nb,
1622 				unsigned int list)
1623 {
1624 	int ret = 0;
1625 
1626 	if (!devfreq)
1627 		return -EINVAL;
1628 
1629 	switch (list) {
1630 	case DEVFREQ_TRANSITION_NOTIFIER:
1631 		ret = srcu_notifier_chain_unregister(
1632 				&devfreq->transition_notifier_list, nb);
1633 		break;
1634 	default:
1635 		ret = -EINVAL;
1636 	}
1637 
1638 	return ret;
1639 }
1640 EXPORT_SYMBOL(devfreq_unregister_notifier);
1641 
1642 struct devfreq_notifier_devres {
1643 	struct devfreq *devfreq;
1644 	struct notifier_block *nb;
1645 	unsigned int list;
1646 };
1647 
1648 static void devm_devfreq_notifier_release(struct device *dev, void *res)
1649 {
1650 	struct devfreq_notifier_devres *this = res;
1651 
1652 	devfreq_unregister_notifier(this->devfreq, this->nb, this->list);
1653 }
1654 
1655 /**
1656  * devm_devfreq_register_notifier()
1657 	- Resource-managed devfreq_register_notifier()
1658  * @dev:	The devfreq user device. (parent of devfreq)
1659  * @devfreq:	The devfreq object.
1660  * @nb:		The notifier block to be unregistered.
1661  * @list:	DEVFREQ_TRANSITION_NOTIFIER.
1662  */
1663 int devm_devfreq_register_notifier(struct device *dev,
1664 				struct devfreq *devfreq,
1665 				struct notifier_block *nb,
1666 				unsigned int list)
1667 {
1668 	struct devfreq_notifier_devres *ptr;
1669 	int ret;
1670 
1671 	ptr = devres_alloc(devm_devfreq_notifier_release, sizeof(*ptr),
1672 				GFP_KERNEL);
1673 	if (!ptr)
1674 		return -ENOMEM;
1675 
1676 	ret = devfreq_register_notifier(devfreq, nb, list);
1677 	if (ret) {
1678 		devres_free(ptr);
1679 		return ret;
1680 	}
1681 
1682 	ptr->devfreq = devfreq;
1683 	ptr->nb = nb;
1684 	ptr->list = list;
1685 	devres_add(dev, ptr);
1686 
1687 	return 0;
1688 }
1689 EXPORT_SYMBOL(devm_devfreq_register_notifier);
1690 
1691 /**
1692  * devm_devfreq_unregister_notifier()
1693 	- Resource-managed devfreq_unregister_notifier()
1694  * @dev:	The devfreq user device. (parent of devfreq)
1695  * @devfreq:	The devfreq object.
1696  * @nb:		The notifier block to be unregistered.
1697  * @list:	DEVFREQ_TRANSITION_NOTIFIER.
1698  */
1699 void devm_devfreq_unregister_notifier(struct device *dev,
1700 				      struct devfreq *devfreq,
1701 				      struct notifier_block *nb,
1702 				      unsigned int list)
1703 {
1704 	WARN_ON(devres_release(dev, devm_devfreq_notifier_release,
1705 			       devm_devfreq_dev_match, devfreq));
1706 }
1707 EXPORT_SYMBOL(devm_devfreq_unregister_notifier);
1708