@@ -77,14 +77,12 @@ struct clk_core {
77
77
unsigned int protect_count ;
78
78
unsigned long min_rate ;
79
79
unsigned long max_rate ;
80
- unsigned long default_request_rate ;
81
80
unsigned long accuracy ;
82
81
int phase ;
83
82
struct clk_duty duty ;
84
83
struct hlist_head children ;
85
84
struct hlist_node child_node ;
86
85
struct hlist_head clks ;
87
- struct list_head pending_requests ;
88
86
unsigned int notifier_count ;
89
87
#ifdef CONFIG_DEBUG_FS
90
88
struct dentry * dentry ;
@@ -107,12 +105,6 @@ struct clk {
107
105
struct hlist_node clks_node ;
108
106
};
109
107
110
- struct clk_request {
111
- struct list_head list ;
112
- struct clk * clk ;
113
- unsigned long rate ;
114
- };
115
-
116
108
/*** runtime pm ***/
117
109
static int clk_pm_runtime_get (struct clk_core * core )
118
110
{
@@ -1462,14 +1454,10 @@ unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate)
1462
1454
{
1463
1455
int ret ;
1464
1456
struct clk_rate_request req ;
1465
- struct clk_request * clk_req ;
1466
1457
1467
1458
clk_core_get_boundaries (hw -> core , & req .min_rate , & req .max_rate );
1468
1459
req .rate = rate ;
1469
1460
1470
- list_for_each_entry (clk_req , & hw -> core -> pending_requests , list )
1471
- req .min_rate = max (clk_req -> rate , req .min_rate );
1472
-
1473
1461
ret = clk_core_round_rate_nolock (hw -> core , & req );
1474
1462
if (ret )
1475
1463
return 0 ;
@@ -1490,7 +1478,6 @@ EXPORT_SYMBOL_GPL(clk_hw_round_rate);
1490
1478
long clk_round_rate (struct clk * clk , unsigned long rate )
1491
1479
{
1492
1480
struct clk_rate_request req ;
1493
- struct clk_request * clk_req ;
1494
1481
int ret ;
1495
1482
1496
1483
if (!clk )
@@ -1504,9 +1491,6 @@ long clk_round_rate(struct clk *clk, unsigned long rate)
1504
1491
clk_core_get_boundaries (clk -> core , & req .min_rate , & req .max_rate );
1505
1492
req .rate = rate ;
1506
1493
1507
- list_for_each_entry (clk_req , & clk -> core -> pending_requests , list )
1508
- req .min_rate = max (clk_req -> rate , req .min_rate );
1509
-
1510
1494
ret = clk_core_round_rate_nolock (clk -> core , & req );
1511
1495
1512
1496
if (clk -> exclusive_count )
@@ -1974,7 +1958,6 @@ static struct clk_core *clk_calc_new_rates(struct clk_core *core,
1974
1958
unsigned long new_rate ;
1975
1959
unsigned long min_rate ;
1976
1960
unsigned long max_rate ;
1977
- struct clk_request * req ;
1978
1961
int p_index = 0 ;
1979
1962
long ret ;
1980
1963
@@ -1989,9 +1972,6 @@ static struct clk_core *clk_calc_new_rates(struct clk_core *core,
1989
1972
1990
1973
clk_core_get_boundaries (core , & min_rate , & max_rate );
1991
1974
1992
- list_for_each_entry (req , & core -> pending_requests , list )
1993
- min_rate = max (req -> rate , min_rate );
1994
-
1995
1975
/* find the closest rate and parent clk/rate */
1996
1976
if (clk_core_can_round (core )) {
1997
1977
struct clk_rate_request req ;
@@ -2188,7 +2168,6 @@ static unsigned long clk_core_req_round_rate_nolock(struct clk_core *core,
2188
2168
{
2189
2169
int ret , cnt ;
2190
2170
struct clk_rate_request req ;
2191
- struct clk_request * clk_req ;
2192
2171
2193
2172
lockdep_assert_held (& prepare_lock );
2194
2173
@@ -2203,9 +2182,6 @@ static unsigned long clk_core_req_round_rate_nolock(struct clk_core *core,
2203
2182
clk_core_get_boundaries (core , & req .min_rate , & req .max_rate );
2204
2183
req .rate = req_rate ;
2205
2184
2206
- list_for_each_entry (clk_req , & core -> pending_requests , list )
2207
- req .min_rate = max (clk_req -> rate , req .min_rate );
2208
-
2209
2185
ret = clk_core_round_rate_nolock (core , & req );
2210
2186
2211
2187
/* restore the protection */
@@ -2299,9 +2275,6 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
2299
2275
2300
2276
ret = clk_core_set_rate_nolock (clk -> core , rate );
2301
2277
2302
- if (!list_empty (& clk -> core -> pending_requests ))
2303
- clk -> core -> default_request_rate = rate ;
2304
-
2305
2278
if (clk -> exclusive_count )
2306
2279
clk_core_rate_protect (clk -> core );
2307
2280
@@ -2470,99 +2443,6 @@ int clk_set_max_rate(struct clk *clk, unsigned long rate)
2470
2443
}
2471
2444
EXPORT_SYMBOL_GPL (clk_set_max_rate );
2472
2445
2473
- /**
2474
- * clk_request_start - Request a rate to be enforced temporarily
2475
- * @clk: the clk to act on
2476
- * @rate: the new rate asked for
2477
- *
2478
- * This function will create a request to temporarily increase the rate
2479
- * of the clock to a given rate to a certain minimum.
2480
- *
2481
- * This is meant as a best effort mechanism and while the rate of the
2482
- * clock will be guaranteed to be equal or higher than the requested
2483
- * rate, there's none on what the actual rate will be due to other
2484
- * factors (other requests previously set, clock boundaries, etc.).
2485
- *
2486
- * Once the request is marked as done through clk_request_done(), the
2487
- * rate will be reverted back to what the rate was before the request.
2488
- *
2489
- * The reported boundaries of the clock will also be adjusted so that
2490
- * clk_round_rate() take those requests into account. A call to
2491
- * clk_set_rate() during a request will affect the rate the clock will
2492
- * return to after the requests on that clock are done.
2493
- *
2494
- * Returns 0 on success, an ERR_PTR otherwise.
2495
- */
2496
- struct clk_request * clk_request_start (struct clk * clk , unsigned long rate )
2497
- {
2498
- struct clk_request * req ;
2499
- int ret ;
2500
-
2501
- if (!clk )
2502
- return ERR_PTR (- EINVAL );
2503
-
2504
- req = kzalloc (sizeof (* req ), GFP_KERNEL );
2505
- if (!req )
2506
- return ERR_PTR (- ENOMEM );
2507
-
2508
- clk_prepare_lock ();
2509
-
2510
- req -> clk = clk ;
2511
- req -> rate = rate ;
2512
-
2513
- if (list_empty (& clk -> core -> pending_requests ))
2514
- clk -> core -> default_request_rate = clk_core_get_rate_recalc (clk -> core );
2515
-
2516
- ret = clk_core_set_rate_nolock (clk -> core , rate );
2517
- if (ret ) {
2518
- clk_prepare_unlock ();
2519
- kfree (req );
2520
- return ERR_PTR (ret );
2521
- }
2522
-
2523
- list_add_tail (& req -> list , & clk -> core -> pending_requests );
2524
- clk_prepare_unlock ();
2525
-
2526
- return req ;
2527
- }
2528
- EXPORT_SYMBOL_GPL (clk_request_start );
2529
-
2530
- /**
2531
- * clk_request_done - Mark a clk_request as done
2532
- * @req: the request to mark done
2533
- *
2534
- * This function will remove the rate request from the clock and adjust
2535
- * the clock rate back to either to what it was before the request
2536
- * started, or if there's any other request on that clock to a proper
2537
- * rate for them.
2538
- */
2539
- void clk_request_done (struct clk_request * req )
2540
- {
2541
- struct clk_core * core = req -> clk -> core ;
2542
-
2543
- clk_prepare_lock ();
2544
-
2545
- list_del (& req -> list );
2546
-
2547
- if (list_empty (& core -> pending_requests )) {
2548
- clk_core_set_rate_nolock (core , core -> default_request_rate );
2549
- core -> default_request_rate = 0 ;
2550
- } else {
2551
- struct clk_request * cur_req ;
2552
- unsigned long new_rate = 0 ;
2553
-
2554
- list_for_each_entry (cur_req , & core -> pending_requests , list )
2555
- new_rate = max (new_rate , cur_req -> rate );
2556
-
2557
- clk_core_set_rate_nolock (core , new_rate );
2558
- }
2559
-
2560
- clk_prepare_unlock ();
2561
-
2562
- kfree (req );
2563
- }
2564
- EXPORT_SYMBOL_GPL (clk_request_done );
2565
-
2566
2446
/**
2567
2447
* clk_get_parent - return the parent of a clk
2568
2448
* @clk: the clk whose parent gets returned
@@ -4022,7 +3902,6 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
4022
3902
goto fail_parents ;
4023
3903
4024
3904
INIT_HLIST_HEAD (& core -> clks );
4025
- INIT_LIST_HEAD (& core -> pending_requests );
4026
3905
4027
3906
/*
4028
3907
* Don't call clk_hw_create_clk() here because that would pin the
0 commit comments