21
21
#include "platform/mbed_assert.h"
22
22
#include "platform/mbed_error.h"
23
23
24
+ #if !MBED_CONF_TARGET_CUSTOM_TICKERS
25
+ #include "us_ticker_api.h"
26
+ #include "lp_ticker_api.h"
27
+ #endif
28
+
29
+ // It's almost always worth avoiding division, but only worth avoiding
30
+ // multiplication on some cores.
31
+ #if defined(__CORTEX_M0 ) || defined(__CORTEX_M0PLUS ) || defined(__CORTEX_M23 )
32
+ #define SLOW_MULTIPLY 1
33
+ #else
34
+ #define SLOW_MULTIPLY 0
35
+ #endif
36
+
37
+ // Do we compute ratio from frequency, or can we always get it from defines?
38
+ #if MBED_CONF_TARGET_CUSTOM_TICKERS || (DEVICE_USTICKER && !defined US_TICKER_PERIOD_NUM ) || (DEVICE_LPTICKER && !defined LP_TICKER_PERIOD_NUM )
39
+ #define COMPUTE_RATIO_FROM_FREQUENCY 1
40
+ #else
41
+ #define COMPUTE_RATIO_FROM_FREQUENCY 0
42
+ #endif
43
+
24
44
static void schedule_interrupt (const ticker_data_t * const ticker );
25
45
static void update_present_time (const ticker_data_t * const ticker );
26
46
47
+ /* Macros that either look up the info from mbed_ticker_queue_t, or give a constant.
48
+ * Some constants are defined during the definition of initialize, to keep the
49
+ * compile-time and run-time calculations alongside each other.
50
+ */
51
+ #ifdef MBED_TICKER_CONSTANT_PERIOD_NUM
52
+ #define TICKER_PERIOD_NUM (queue ) MBED_TICKER_CONSTANT_PERIOD_NUM
53
+ // don't bother doing this - rely on the compiler being able convert "/ 2^k" to ">> k".
54
+ #define TICKER_PERIOD_NUM_SHIFTS (queue ) (-1)
55
+ #else
56
+ #define TICKER_PERIOD_NUM (queue ) ((queue)->period_num)
57
+ #define TICKER_PERIOD_NUM_SHIFTS (queue ) ((queue)->period_num_shifts)
58
+ #endif
59
+
60
+ #ifdef MBED_TICKER_CONSTANT_PERIOD_DEN
61
+ #define TICKER_PERIOD_DEN (queue ) MBED_TICKER_CONSTANT_PERIOD_DEN
62
+ #define TICKER_PERIOD_DEN_SHIFTS (queue ) (-1)
63
+ #else
64
+ #define TICKER_PERIOD_DEN (queue ) ((queue)->period_den)
65
+ #define TICKER_PERIOD_DEN_SHIFTS (queue ) ((queue)->period_den_shifts)
66
+ #endif
67
+
68
+ // But the above can generate compiler warnings from `if (-1 >= 0) { x >>= -1; }`
69
+ #if defined ( __CC_ARM )
70
+ #pragma diag_suppress 62 // Shift count is negative
71
+ #elif defined ( __GNUC__ )
72
+ #pragma GCC diagnostic ignored "-Wshift-count-negative"
73
+ #elif defined (__ICCARM__ )
74
+ #pragma diag_suppress=Pe062 // Shift count is negative
75
+ #endif
76
+
77
+ #ifdef MBED_TICKER_CONSTANT_MASK
78
+ #define TICKER_BITMASK (queue ) MBED_TICKER_CONSTANT_MASK
79
+ #define TICKER_MAX_DELTA (queue ) CONSTANT_MAX_DELTA
80
+ #else
81
+ #define TICKER_BITMASK (queue ) ((queue)->bitmask)
82
+ #define TICKER_MAX_DELTA (queue ) ((queue)->max_delta)
83
+ #endif
84
+
85
+ #if defined MBED_TICKER_CONSTANT_PERIOD && defined MBED_TICKER_CONSTANT_MASK
86
+ #define TICKER_MAX_DELTA_US (queue ) CONSTANT_MAX_DELTA_US
87
+ #else
88
+ #define TICKER_MAX_DELTA_US (queue ) ((queue)->max_delta_us)
89
+ #endif
90
+
91
+ #if COMPUTE_RATIO_FROM_FREQUENCY
92
+ static inline uint32_t gcd (uint32_t a , uint32_t b )
93
+ {
94
+ do {
95
+ uint32_t r = a % b ;
96
+ a = b ;
97
+ b = r ;
98
+ } while (b != 0 );
99
+ return a ;
100
+ }
101
+
102
+ static int exact_log2 (uint32_t n )
103
+ {
104
+ for (int i = 31 ; i > 0 ; -- i ) {
105
+ if ((1U << i ) == n ) {
106
+ return i ;
107
+ }
108
+ }
109
+ return -1 ;
110
+ }
111
+ #endif
112
+
27
113
/*
28
114
* Initialize a ticker instance.
29
115
*/
@@ -40,9 +126,36 @@ static void initialize(const ticker_data_t *ticker)
40
126
41
127
ticker -> interface -> init ();
42
128
129
+ #if MBED_TRAP_ERRORS_ENABLED || COMPUTE_RATIO_FROM_FREQUENCY || !defined MBED_TICKER_CONSTANT_MASK
43
130
const ticker_info_t * info = ticker -> interface -> get_info ();
131
+ #endif
132
+
133
+ #if !MBED_CONF_TARGET_CUSTOM_TICKERS && MBED_TRAP_ERRORS_ENABLED
134
+ /* They must be passing us one of the well-known tickers. Check info
135
+ * rather than the data, to cope with the lp_ticker_wrapper. It doesn't count
136
+ * as a "custom ticker" for the purpose of this optimization.
137
+ *
138
+ * This check has the downside of potentially pulling in code for an unused ticker.
139
+ * This is minimized by using direct xxx_ticker_get_info() calls rather than
140
+ * `get_us_ticker_data()->interface->get_info()` which would pull in the entire system,
141
+ * and we wrap it in `MBED_TRAP_ERRORS_ENABLED`.
142
+ */
143
+ #if DEVICE_USTICKER && DEVICE_LPTICKER
144
+ MBED_ASSERT (info == us_ticker_get_info () || info == lp_ticker_get_info ());
145
+ #elif DEVICE_USTICKER
146
+ MBED_ASSERT (info == us_ticker_get_info ());
147
+ #elif DEVICE_LPTICKER
148
+ MBED_ASSERT (info == lp_ticker_get_info ());
149
+ #else
150
+ MBED_ASSERT (false);
151
+ #endif
152
+ #endif
153
+
154
+ #if COMPUTE_RATIO_FROM_FREQUENCY
155
+ // Will need to use frequency computation for at least some cases, so always do it
156
+ // to minimise code size.
44
157
uint32_t frequency = info -> frequency ;
45
- if (info -> frequency == 0 ) {
158
+ if (frequency == 0 ) {
46
159
#if MBED_TRAP_ERRORS_ENABLED
47
160
MBED_ERROR (
48
161
MBED_MAKE_ERROR (
@@ -56,16 +169,27 @@ static void initialize(const ticker_data_t *ticker)
56
169
#endif // MBED_TRAP_ERRORS_ENABLED
57
170
}
58
171
59
- uint8_t frequency_shifts = 0 ;
60
- for (uint8_t i = 31 ; i > 0 ; -- i ) {
61
- if ((1U << i ) == frequency ) {
62
- frequency_shifts = i ;
63
- break ;
64
- }
65
- }
66
-
172
+ const uint32_t period_gcd = gcd (frequency , 1000000 );
173
+ ticker -> queue -> period_num = 1000000 / period_gcd ;
174
+ ticker -> queue -> period_num_shifts = exact_log2 (ticker -> queue -> period_num );
175
+ ticker -> queue -> period_den = frequency / period_gcd ;
176
+ ticker -> queue -> period_den_shifts = exact_log2 (ticker -> queue -> period_den );
177
+ #elif !MBED_TICKER_CONSTANT_PERIOD
178
+ // Have ratio defines, but need to figure out which one applies.
179
+ // `runs_in_deep_sleep` is a viable proxy. (We have asserts above that
180
+ // check that they're only passing usticker or lpticker).
181
+ const bool is_usticker = !DEVICE_LPTICKER || !ticker -> interface -> runs_in_deep_sleep ;
182
+ #ifndef MBED_TICKER_CONSTANT_PERIOD_NUM
183
+ ticker -> queue -> period_num = is_usticker ? US_TICKER_PERIOD_NUM : LP_TICKER_PERIOD_NUM ;
184
+ #endif
185
+ #ifndef MBED_TICKER_CONSTANT_PERIOD_DEN
186
+ ticker -> queue -> period_den = is_usticker ? US_TICKER_PERIOD_DEN : LP_TICKER_PERIOD_DEN ;
187
+ #endif
188
+ #endif // COMPUTE_RATIO_FROM_FREQUENCY / MBED_TICKER_CONSTANT_PERIOD
189
+
190
+ #ifndef MBED_TICKER_CONSTANT_MASK
67
191
uint32_t bits = info -> bits ;
68
- if ((info -> bits > 32 ) || (info -> bits < 4 )) {
192
+ if ((bits > 32 ) || (bits < 4 )) {
69
193
#if MBED_TRAP_ERRORS_ENABLED
70
194
MBED_ERROR (
71
195
MBED_MAKE_ERROR (
@@ -78,19 +202,24 @@ static void initialize(const ticker_data_t *ticker)
78
202
bits = 32 ;
79
203
#endif // MBED_TRAP_ERRORS_ENABLED
80
204
}
81
- uint32_t max_delta = 0x7 << (bits - 4 ); // 7/16th
82
- uint64_t max_delta_us =
83
- ((uint64_t )max_delta * 1000000 + frequency - 1 ) / frequency ;
205
+ ticker -> queue -> bitmask = bits == 32 ? 0xFFFFFFFF : (1U << bits ) - 1 ;
206
+ ticker -> queue -> max_delta = 7 << (bits - 4 ); // 7/16th
207
+ #else // MBED_TICKER_CONSTANT_MASK
208
+ #define CONSTANT_MAX_DELTA (7 * ((MBED_TICKER_CONSTANT_MASK >> 4) + 1)) // 7/16th
209
+ #endif // MBED_TICKER_CONSTANT_MASK
210
+
211
+ #if !(defined MBED_TICKER_CONSTANT_PERIOD && defined MBED_TICKER_CONSTANT_MASK )
212
+ ticker -> queue -> max_delta_us =
213
+ ((uint64_t )TICKER_MAX_DELTA (ticker -> queue ) * TICKER_PERIOD_NUM (ticker -> queue ) + TICKER_PERIOD_DEN (ticker -> queue ) - 1 ) / TICKER_PERIOD_DEN (ticker -> queue );
214
+ #else
215
+ #define CONSTANT_MAX_DELTA_US \
216
+ (((uint64_t)CONSTANT_MAX_DELTA * MBED_TICKER_CONSTANT_PERIOD_NUM + MBED_TICKER_CONSTANT_PERIOD_DEN - 1) / MBED_TICKER_CONSTANT_PERIOD_DEN)
217
+ #endif
84
218
85
219
ticker -> queue -> event_handler = NULL ;
86
220
ticker -> queue -> head = NULL ;
87
221
ticker -> queue -> tick_last_read = ticker -> interface -> read ();
88
222
ticker -> queue -> tick_remainder = 0 ;
89
- ticker -> queue -> frequency = frequency ;
90
- ticker -> queue -> frequency_shifts = frequency_shifts ;
91
- ticker -> queue -> bitmask = ((uint64_t )1 << bits ) - 1 ;
92
- ticker -> queue -> max_delta = max_delta ;
93
- ticker -> queue -> max_delta_us = max_delta_us ;
94
223
ticker -> queue -> present_time = 0 ;
95
224
ticker -> queue -> dispatching = false;
96
225
ticker -> queue -> suspended = false;
@@ -154,27 +283,31 @@ static void update_present_time(const ticker_data_t *const ticker)
154
283
return ;
155
284
}
156
285
157
- uint64_t elapsed_ticks = (ticker_time - queue -> tick_last_read ) & queue -> bitmask ;
286
+ uint32_t elapsed_ticks = (ticker_time - queue -> tick_last_read ) & TICKER_BITMASK ( queue ) ;
158
287
queue -> tick_last_read = ticker_time ;
159
288
289
+ // Convert elapsed_ticks to elapsed_us as (elapsed_ticks * period_num / period_den)
290
+ // adding in any remainder from the last division
291
+ uint64_t scaled_ticks ;
292
+ if (SLOW_MULTIPLY && TICKER_PERIOD_NUM_SHIFTS (queue ) >= 0 ) {
293
+ scaled_ticks = (uint64_t ) elapsed_ticks << TICKER_PERIOD_NUM_SHIFTS (queue );
294
+ } else {
295
+ scaled_ticks = (uint64_t ) elapsed_ticks * TICKER_PERIOD_NUM (queue );
296
+ }
160
297
uint64_t elapsed_us ;
161
- if (1000000 == queue -> frequency ) {
162
- // Optimized for 1MHz
163
-
164
- elapsed_us = elapsed_ticks ;
298
+ if (TICKER_PERIOD_DEN_SHIFTS (queue ) == 0 ) {
299
+ // Optimized for cases that don't need division
300
+ elapsed_us = scaled_ticks ;
165
301
} else {
166
- uint64_t us_x_ticks = elapsed_ticks * 1000000 ;
167
- if (0 != queue -> frequency_shifts ) {
168
- // Optimized for frequencies divisible by 2
169
- elapsed_us = us_x_ticks >> queue -> frequency_shifts ;
170
- queue -> tick_remainder += us_x_ticks - (elapsed_us << queue -> frequency_shifts );
302
+ scaled_ticks += queue -> tick_remainder ;
303
+ if (TICKER_PERIOD_DEN_SHIFTS ( queue ) >= 0 ) {
304
+ // Speed-optimised for shifts
305
+ elapsed_us = scaled_ticks >> TICKER_PERIOD_DEN_SHIFTS ( queue ) ;
306
+ queue -> tick_remainder = scaled_ticks - (elapsed_us << TICKER_PERIOD_DEN_SHIFTS ( queue ) );
171
307
} else {
172
- elapsed_us = us_x_ticks / queue -> frequency ;
173
- queue -> tick_remainder += us_x_ticks - elapsed_us * queue -> frequency ;
174
- }
175
- if (queue -> tick_remainder >= queue -> frequency ) {
176
- elapsed_us += 1 ;
177
- queue -> tick_remainder -= queue -> frequency ;
308
+ // General case division
309
+ elapsed_us = scaled_ticks / TICKER_PERIOD_DEN (queue );
310
+ queue -> tick_remainder = scaled_ticks - elapsed_us * TICKER_PERIOD_DEN (queue );
178
311
}
179
312
}
180
313
@@ -190,25 +323,37 @@ static timestamp_t compute_tick_round_up(const ticker_data_t *const ticker, us_t
190
323
ticker_event_queue_t * queue = ticker -> queue ;
191
324
us_timestamp_t delta_us = timestamp - queue -> present_time ;
192
325
193
- timestamp_t delta = ticker -> queue -> max_delta ;
194
- if (delta_us <= ticker -> queue -> max_delta_us ) {
326
+ timestamp_t delta = TICKER_MAX_DELTA ( ticker -> queue ) ;
327
+ if (delta_us <= TICKER_MAX_DELTA_US ( ticker -> queue ) ) {
195
328
// Checking max_delta_us ensures the operation will not overflow
196
329
197
- if ( 1000000 == queue -> frequency ) {
198
- // Optimized for 1MHz
199
- delta = delta_us ;
200
- } else if (0 != queue -> frequency_shifts ) {
201
- // Optimized frequencies divisible by 2
202
- delta = (( delta_us << ticker -> queue -> frequency_shifts ) + 1000000 - 1 ) / 1000000 ;
330
+ // Convert delta_us to delta (ticks) as (delta_us * period_den / period_num)
331
+ // taking care to round up if num != 1
332
+ uint64_t scaled_delta ;
333
+ if (SLOW_MULTIPLY && TICKER_PERIOD_DEN_SHIFTS ( queue ) >= 0 ) {
334
+ // Optimized denominators divisible by 2
335
+ scaled_delta = delta_us << TICKER_PERIOD_DEN_SHIFTS ( queue ) ;
203
336
} else {
204
337
// General case
205
- delta = (delta_us * queue -> frequency + 1000000 - 1 ) / 1000000 ;
338
+ scaled_delta = delta_us * TICKER_PERIOD_DEN (queue );
339
+ }
340
+ if (TICKER_PERIOD_NUM_SHIFTS (queue ) == 0 ) {
341
+ delta = scaled_delta ;
342
+ } else {
343
+ scaled_delta += TICKER_PERIOD_NUM (queue ) - 1 ;
344
+ if (TICKER_PERIOD_NUM_SHIFTS (queue ) >= 0 ) {
345
+ // Optimized numerators divisible by 2
346
+ delta = scaled_delta >> TICKER_PERIOD_NUM_SHIFTS (queue );
347
+ } else {
348
+ // General case
349
+ delta = scaled_delta / TICKER_PERIOD_NUM (queue );
350
+ }
206
351
}
207
- if (delta > ticker -> queue -> max_delta ) {
208
- delta = ticker -> queue -> max_delta ;
352
+ if (delta > TICKER_MAX_DELTA ( queue ) ) {
353
+ delta = TICKER_MAX_DELTA ( queue ) ;
209
354
}
210
355
}
211
- return (queue -> tick_last_read + delta ) & queue -> bitmask ;
356
+ return (queue -> tick_last_read + delta ) & TICKER_BITMASK ( queue ) ;
212
357
}
213
358
214
359
//NOTE: Must be called from critical section!
@@ -308,7 +453,7 @@ static void schedule_interrupt(const ticker_data_t *const ticker)
308
453
}
309
454
} else {
310
455
uint32_t match_tick =
311
- (queue -> tick_last_read + queue -> max_delta ) & queue -> bitmask ;
456
+ (queue -> tick_last_read + TICKER_MAX_DELTA ( queue )) & TICKER_BITMASK ( queue ) ;
312
457
ticker -> interface -> set_interrupt (match_tick );
313
458
}
314
459
}
0 commit comments