21
21
#include <linux/of.h>
22
22
#include <linux/device.h>
23
23
#include <linux/init.h>
24
+ #include <linux/pm_runtime.h>
24
25
#include <linux/sched.h>
25
26
#include <linux/clkdev.h>
26
27
@@ -46,6 +47,7 @@ struct clk_core {
46
47
const struct clk_ops * ops ;
47
48
struct clk_hw * hw ;
48
49
struct module * owner ;
50
+ struct device * dev ;
49
51
struct clk_core * parent ;
50
52
const char * * parent_names ;
51
53
struct clk_core * * parents ;
@@ -87,6 +89,26 @@ struct clk {
87
89
struct hlist_node clks_node ;
88
90
};
89
91
92
+ /*** runtime pm ***/
93
+ static int clk_pm_runtime_get (struct clk_core * core )
94
+ {
95
+ int ret = 0 ;
96
+
97
+ if (!core -> dev )
98
+ return 0 ;
99
+
100
+ ret = pm_runtime_get_sync (core -> dev );
101
+ return ret < 0 ? ret : 0 ;
102
+ }
103
+
104
+ static void clk_pm_runtime_put (struct clk_core * core )
105
+ {
106
+ if (!core -> dev )
107
+ return ;
108
+
109
+ pm_runtime_put_sync (core -> dev );
110
+ }
111
+
90
112
/*** locking ***/
91
113
static void clk_prepare_lock (void )
92
114
{
@@ -150,26 +172,57 @@ static void clk_enable_unlock(unsigned long flags)
150
172
151
173
static bool clk_core_is_prepared (struct clk_core * core )
152
174
{
175
+ bool ret = false;
176
+
153
177
/*
154
178
* .is_prepared is optional for clocks that can prepare
155
179
* fall back to software usage counter if it is missing
156
180
*/
157
181
if (!core -> ops -> is_prepared )
158
182
return core -> prepare_count ;
159
183
160
- return core -> ops -> is_prepared (core -> hw );
184
+ if (!clk_pm_runtime_get (core )) {
185
+ ret = core -> ops -> is_prepared (core -> hw );
186
+ clk_pm_runtime_put (core );
187
+ }
188
+
189
+ return ret ;
161
190
}
162
191
163
192
static bool clk_core_is_enabled (struct clk_core * core )
164
193
{
194
+ bool ret = false;
195
+
165
196
/*
166
197
* .is_enabled is only mandatory for clocks that gate
167
198
* fall back to software usage counter if .is_enabled is missing
168
199
*/
169
200
if (!core -> ops -> is_enabled )
170
201
return core -> enable_count ;
171
202
172
- return core -> ops -> is_enabled (core -> hw );
203
+ /*
204
+ * Check if clock controller's device is runtime active before
205
+ * calling .is_enabled callback. If not, assume that clock is
206
+ * disabled, because we might be called from atomic context, from
207
+ * which pm_runtime_get() is not allowed.
208
+ * This function is called mainly from clk_disable_unused_subtree,
209
+ * which ensures proper runtime pm activation of controller before
210
+ * taking enable spinlock, but the below check is needed if one tries
211
+ * to call it from other places.
212
+ */
213
+ if (core -> dev ) {
214
+ pm_runtime_get_noresume (core -> dev );
215
+ if (!pm_runtime_active (core -> dev )) {
216
+ ret = false;
217
+ goto done ;
218
+ }
219
+ }
220
+
221
+ ret = core -> ops -> is_enabled (core -> hw );
222
+ done :
223
+ clk_pm_runtime_put (core );
224
+
225
+ return ret ;
173
226
}
174
227
175
228
/*** helper functions ***/
@@ -489,6 +542,8 @@ static void clk_core_unprepare(struct clk_core *core)
489
542
if (core -> ops -> unprepare )
490
543
core -> ops -> unprepare (core -> hw );
491
544
545
+ clk_pm_runtime_put (core );
546
+
492
547
trace_clk_unprepare_complete (core );
493
548
clk_core_unprepare (core -> parent );
494
549
}
@@ -530,26 +585,33 @@ static int clk_core_prepare(struct clk_core *core)
530
585
return 0 ;
531
586
532
587
if (core -> prepare_count == 0 ) {
533
- ret = clk_core_prepare (core -> parent );
588
+ ret = clk_pm_runtime_get (core );
534
589
if (ret )
535
590
return ret ;
536
591
592
+ ret = clk_core_prepare (core -> parent );
593
+ if (ret )
594
+ goto runtime_put ;
595
+
537
596
trace_clk_prepare (core );
538
597
539
598
if (core -> ops -> prepare )
540
599
ret = core -> ops -> prepare (core -> hw );
541
600
542
601
trace_clk_prepare_complete (core );
543
602
544
- if (ret ) {
545
- clk_core_unprepare (core -> parent );
546
- return ret ;
547
- }
603
+ if (ret )
604
+ goto unprepare ;
548
605
}
549
606
550
607
core -> prepare_count ++ ;
551
608
552
609
return 0 ;
610
+ unprepare :
611
+ clk_core_unprepare (core -> parent );
612
+ runtime_put :
613
+ clk_pm_runtime_put (core );
614
+ return ret ;
553
615
}
554
616
555
617
static int clk_core_prepare_lock (struct clk_core * core )
@@ -745,6 +807,9 @@ static void clk_unprepare_unused_subtree(struct clk_core *core)
745
807
if (core -> flags & CLK_IGNORE_UNUSED )
746
808
return ;
747
809
810
+ if (clk_pm_runtime_get (core ))
811
+ return ;
812
+
748
813
if (clk_core_is_prepared (core )) {
749
814
trace_clk_unprepare (core );
750
815
if (core -> ops -> unprepare_unused )
@@ -753,6 +818,8 @@ static void clk_unprepare_unused_subtree(struct clk_core *core)
753
818
core -> ops -> unprepare (core -> hw );
754
819
trace_clk_unprepare_complete (core );
755
820
}
821
+
822
+ clk_pm_runtime_put (core );
756
823
}
757
824
758
825
static void clk_disable_unused_subtree (struct clk_core * core )
@@ -768,6 +835,9 @@ static void clk_disable_unused_subtree(struct clk_core *core)
768
835
if (core -> flags & CLK_OPS_PARENT_ENABLE )
769
836
clk_core_prepare_enable (core -> parent );
770
837
838
+ if (clk_pm_runtime_get (core ))
839
+ goto unprepare_out ;
840
+
771
841
flags = clk_enable_lock ();
772
842
773
843
if (core -> enable_count )
@@ -792,6 +862,8 @@ static void clk_disable_unused_subtree(struct clk_core *core)
792
862
793
863
unlock_out :
794
864
clk_enable_unlock (flags );
865
+ clk_pm_runtime_put (core );
866
+ unprepare_out :
795
867
if (core -> flags & CLK_OPS_PARENT_ENABLE )
796
868
clk_core_disable_unprepare (core -> parent );
797
869
}
@@ -1038,9 +1110,13 @@ EXPORT_SYMBOL_GPL(clk_get_accuracy);
1038
1110
static unsigned long clk_recalc (struct clk_core * core ,
1039
1111
unsigned long parent_rate )
1040
1112
{
1041
- if (core -> ops -> recalc_rate )
1042
- return core -> ops -> recalc_rate (core -> hw , parent_rate );
1043
- return parent_rate ;
1113
+ unsigned long rate = parent_rate ;
1114
+
1115
+ if (core -> ops -> recalc_rate && !clk_pm_runtime_get (core )) {
1116
+ rate = core -> ops -> recalc_rate (core -> hw , parent_rate );
1117
+ clk_pm_runtime_put (core );
1118
+ }
1119
+ return rate ;
1044
1120
}
1045
1121
1046
1122
/**
@@ -1565,6 +1641,7 @@ static int clk_core_set_rate_nolock(struct clk_core *core,
1565
1641
{
1566
1642
struct clk_core * top , * fail_clk ;
1567
1643
unsigned long rate = req_rate ;
1644
+ int ret = 0 ;
1568
1645
1569
1646
if (!core )
1570
1647
return 0 ;
@@ -1581,21 +1658,28 @@ static int clk_core_set_rate_nolock(struct clk_core *core,
1581
1658
if (!top )
1582
1659
return - EINVAL ;
1583
1660
1661
+ ret = clk_pm_runtime_get (core );
1662
+ if (ret )
1663
+ return ret ;
1664
+
1584
1665
/* notify that we are about to change rates */
1585
1666
fail_clk = clk_propagate_rate_change (top , PRE_RATE_CHANGE );
1586
1667
if (fail_clk ) {
1587
1668
pr_debug ("%s: failed to set %s rate\n" , __func__ ,
1588
1669
fail_clk -> name );
1589
1670
clk_propagate_rate_change (top , ABORT_RATE_CHANGE );
1590
- return - EBUSY ;
1671
+ ret = - EBUSY ;
1672
+ goto err ;
1591
1673
}
1592
1674
1593
1675
/* change the rates */
1594
1676
clk_change_rate (top );
1595
1677
1596
1678
core -> req_rate = req_rate ;
1679
+ err :
1680
+ clk_pm_runtime_put (core );
1597
1681
1598
- return 0 ;
1682
+ return ret ;
1599
1683
}
1600
1684
1601
1685
/**
@@ -1826,12 +1910,16 @@ static int clk_core_set_parent(struct clk_core *core, struct clk_core *parent)
1826
1910
p_rate = parent -> rate ;
1827
1911
}
1828
1912
1913
+ ret = clk_pm_runtime_get (core );
1914
+ if (ret )
1915
+ goto out ;
1916
+
1829
1917
/* propagate PRE_RATE_CHANGE notifications */
1830
1918
ret = __clk_speculate_rates (core , p_rate );
1831
1919
1832
1920
/* abort if a driver objects */
1833
1921
if (ret & NOTIFY_STOP_MASK )
1834
- goto out ;
1922
+ goto runtime_put ;
1835
1923
1836
1924
/* do the re-parent */
1837
1925
ret = __clk_set_parent (core , parent , p_index );
@@ -1844,6 +1932,8 @@ static int clk_core_set_parent(struct clk_core *core, struct clk_core *parent)
1844
1932
__clk_recalc_accuracies (core );
1845
1933
}
1846
1934
1935
+ runtime_put :
1936
+ clk_pm_runtime_put (core );
1847
1937
out :
1848
1938
clk_prepare_unlock ();
1849
1939
@@ -2350,7 +2440,7 @@ static inline void clk_debug_unregister(struct clk_core *core)
2350
2440
*/
2351
2441
static int __clk_core_init (struct clk_core * core )
2352
2442
{
2353
- int i , ret = 0 ;
2443
+ int i , ret ;
2354
2444
struct clk_core * orphan ;
2355
2445
struct hlist_node * tmp2 ;
2356
2446
unsigned long rate ;
@@ -2360,6 +2450,10 @@ static int __clk_core_init(struct clk_core *core)
2360
2450
2361
2451
clk_prepare_lock ();
2362
2452
2453
+ ret = clk_pm_runtime_get (core );
2454
+ if (ret )
2455
+ goto unlock ;
2456
+
2363
2457
/* check to see if a clock with this name is already registered */
2364
2458
if (clk_core_lookup (core -> name )) {
2365
2459
pr_debug ("%s: clk %s already initialized\n" ,
@@ -2512,6 +2606,8 @@ static int __clk_core_init(struct clk_core *core)
2512
2606
2513
2607
kref_init (& core -> ref );
2514
2608
out :
2609
+ clk_pm_runtime_put (core );
2610
+ unlock :
2515
2611
clk_prepare_unlock ();
2516
2612
2517
2613
if (!ret )
@@ -2583,6 +2679,8 @@ struct clk *clk_register(struct device *dev, struct clk_hw *hw)
2583
2679
goto fail_name ;
2584
2680
}
2585
2681
core -> ops = hw -> init -> ops ;
2682
+ if (dev && pm_runtime_enabled (dev ))
2683
+ core -> dev = dev ;
2586
2684
if (dev && dev -> driver )
2587
2685
core -> owner = dev -> driver -> owner ;
2588
2686
core -> hw = hw ;
0 commit comments