20
20
*
21
21
* Copyright (c) 2020 Cisco Systems, Inc. All rights reserved.
22
22
* Copyright (c) 2021 Argonne National Laboratory. All rights reserved.
23
+ * Copyright (c) 2022 Sandia National Laboratories. All rights reserved.
23
24
* $COPYRIGHT$
24
25
*
25
26
* Additional copyrights may follow
42
43
43
44
BEGIN_C_DECLS
44
45
45
- typedef opal_atomic_lock_t opal_thread_internal_mutex_t ;
46
+ typedef qthread_spinlock_t opal_thread_internal_mutex_t ;
46
47
47
- #define OPAL_THREAD_INTERNAL_MUTEX_INITIALIZER OPAL_ATOMIC_LOCK_INIT
48
- #define OPAL_THREAD_INTERNAL_RECURSIVE_MUTEX_INITIALIZER OPAL_ATOMIC_LOCK_INIT
48
+ #define OPAL_THREAD_INTERNAL_MUTEX_INITIALIZER QTHREAD_MUTEX_INITIALIZER
49
+ #define OPAL_THREAD_INTERNAL_RECURSIVE_MUTEX_INITIALIZER QTHREAD_RECURSIVE_MUTEX_INITIALIZER
49
50
50
51
static inline int opal_thread_internal_mutex_init (opal_thread_internal_mutex_t * p_mutex ,
51
52
bool recursive )
52
53
{
53
- opal_atomic_lock_init (p_mutex , 0 );
54
+ opal_threads_ensure_init_qthreads ();
55
+ #if OPAL_ENABLE_DEBUG
56
+ int ret = qthread_spinlock_init (p_mutex ,recursive );
57
+ if (QTHREAD_SUCCESS != ret ) {
58
+ opal_output (0 , "opal_thread_internal_mutex_init()" );
59
+ }
60
+ #else
61
+ qthread_spinlock_init (p_mutex ,recursive );
62
+ #endif
54
63
return OPAL_SUCCESS ;
55
64
}
56
65
57
66
static inline void opal_thread_internal_mutex_lock (opal_thread_internal_mutex_t * p_mutex )
58
67
{
59
68
opal_threads_ensure_init_qthreads ();
60
-
61
- int ret = opal_atomic_trylock (p_mutex );
62
- while (0 != ret ) {
63
- qthread_yield ();
64
- ret = opal_atomic_trylock (p_mutex );
69
+ #if OPAL_ENABLE_DEBUG
70
+ int ret = qthread_spinlock_lock (p_mutex );
71
+ if (QTHREAD_SUCCESS != ret ) {
72
+ opal_output (0 , "opal_thread_internal_mutex_lock()" );
65
73
}
74
+ #else
75
+ qthread_spinlock_lock (p_mutex );
76
+ #endif
66
77
}
67
78
68
79
static inline int opal_thread_internal_mutex_trylock (opal_thread_internal_mutex_t * p_mutex )
69
80
{
70
81
opal_threads_ensure_init_qthreads ();
71
-
72
- int ret = opal_atomic_trylock (p_mutex );
73
- if (0 != ret ) {
74
- /* Yield to avoid a deadlock. */
75
- qthread_yield ();
76
- }
77
- return ret ;
82
+ int ret = qthread_spinlock_trylock (p_mutex );
83
+ if (QTHREAD_OPFAIL == ret ) {
84
+ return 1 ;
85
+ } else if (QTHREAD_SUCCESS != ret ) {
86
+ #if OPAL_ENABLE_DEBUG
87
+ opal_output (0 , "opal_thread_internal_mutex_trylock()" );
88
+ #endif
89
+ return 1 ;
90
+ }
91
+ return 0 ;
78
92
}
79
93
80
94
static inline void opal_thread_internal_mutex_unlock (opal_thread_internal_mutex_t * p_mutex )
81
95
{
82
96
opal_threads_ensure_init_qthreads ();
83
-
84
- opal_atomic_unlock (p_mutex );
97
+ int ret ;
98
+ #if OPAL_ENABLE_DEBUG
99
+ ret = qthread_spinlock_unlock (p_mutex );
100
+ if (QTHREAD_SUCCESS != ret ) {
101
+ opal_output (0 , "opal_thread_internal_mutex_unlock()" );
102
+ }
103
+ #else
104
+ qthread_spinlock_unlock (p_mutex );
105
+ #endif
85
106
/* For fairness of locking. */
86
107
qthread_yield ();
87
108
}
@@ -97,19 +118,19 @@ typedef struct opal_thread_cond_waiter_t {
97
118
} opal_thread_cond_waiter_t ;
98
119
99
120
typedef struct {
100
- opal_atomic_lock_t m_lock ;
121
+ opal_thread_internal_mutex_t m_lock ;
101
122
opal_thread_cond_waiter_t * m_waiter_head ;
102
123
opal_thread_cond_waiter_t * m_waiter_tail ;
103
124
} opal_thread_internal_cond_t ;
104
125
105
126
#define OPAL_THREAD_INTERNAL_COND_INITIALIZER \
106
127
{ \
107
- .m_lock = OPAL_ATOMIC_LOCK_INIT , .m_waiter_head = NULL, .m_waiter_tail = NULL, \
128
+ .m_lock = QTHREAD_MUTEX_INITIALIZER , .m_waiter_head = NULL, .m_waiter_tail = NULL, \
108
129
}
109
130
110
131
static inline int opal_thread_internal_cond_init (opal_thread_internal_cond_t * p_cond )
111
132
{
112
- opal_atomic_lock_init (& p_cond -> m_lock , 0 );
133
+ qthread_spinlock_init (& p_cond -> m_lock , false /* is_recursive */ );
113
134
p_cond -> m_waiter_head = NULL ;
114
135
p_cond -> m_waiter_tail = NULL ;
115
136
return OPAL_SUCCESS ;
@@ -121,24 +142,23 @@ static inline void opal_thread_internal_cond_wait(opal_thread_internal_cond_t *p
121
142
opal_threads_ensure_init_qthreads ();
122
143
/* This thread is taking "lock", so only this thread can access this
123
144
* condition variable. */
124
- opal_atomic_lock (& p_cond -> m_lock );
145
+ qthread_spinlock_lock (& p_cond -> m_lock );
125
146
opal_thread_cond_waiter_t waiter = {0 , NULL };
126
147
if (NULL == p_cond -> m_waiter_head ) {
127
148
p_cond -> m_waiter_tail = & waiter ;
128
149
} else {
129
150
p_cond -> m_waiter_head -> m_prev = & waiter ;
130
151
}
131
152
p_cond -> m_waiter_head = & waiter ;
132
- opal_atomic_unlock (& p_cond -> m_lock );
133
-
134
- while (1 ) {
153
+ qthread_spinlock_unlock (& p_cond -> m_lock );
154
+ while (1 ) {
135
155
opal_thread_internal_mutex_unlock (p_mutex );
136
156
qthread_yield ();
137
157
opal_thread_internal_mutex_lock (p_mutex );
138
158
/* Check if someone woke me up. */
139
- opal_atomic_lock (& p_cond -> m_lock );
159
+ qthread_spinlock_lock (& p_cond -> m_lock );
140
160
int signaled = waiter .m_signaled ;
141
- opal_atomic_unlock (& p_cond -> m_lock );
161
+ qthread_spinlock_unlock (& p_cond -> m_lock );
142
162
if (1 == signaled ) {
143
163
break ;
144
164
}
@@ -148,7 +168,7 @@ static inline void opal_thread_internal_cond_wait(opal_thread_internal_cond_t *p
148
168
149
169
static inline void opal_thread_internal_cond_broadcast (opal_thread_internal_cond_t * p_cond )
150
170
{
151
- opal_atomic_lock (& p_cond -> m_lock );
171
+ qthread_spinlock_lock (& p_cond -> m_lock );
152
172
while (NULL != p_cond -> m_waiter_tail ) {
153
173
opal_thread_cond_waiter_t * p_cur_tail = p_cond -> m_waiter_tail ;
154
174
p_cond -> m_waiter_tail = p_cur_tail -> m_prev ;
@@ -157,12 +177,12 @@ static inline void opal_thread_internal_cond_broadcast(opal_thread_internal_cond
157
177
}
158
178
/* No waiters. */
159
179
p_cond -> m_waiter_head = NULL ;
160
- opal_atomic_unlock (& p_cond -> m_lock );
180
+ qthread_spinlock_unlock (& p_cond -> m_lock );
161
181
}
162
182
163
183
static inline void opal_thread_internal_cond_signal (opal_thread_internal_cond_t * p_cond )
164
184
{
165
- opal_atomic_lock (& p_cond -> m_lock );
185
+ qthread_spinlock_lock (& p_cond -> m_lock );
166
186
if (NULL != p_cond -> m_waiter_tail ) {
167
187
opal_thread_cond_waiter_t * p_cur_tail = p_cond -> m_waiter_tail ;
168
188
p_cond -> m_waiter_tail = p_cur_tail -> m_prev ;
@@ -172,7 +192,7 @@ static inline void opal_thread_internal_cond_signal(opal_thread_internal_cond_t
172
192
p_cond -> m_waiter_head = NULL ;
173
193
}
174
194
}
175
- opal_atomic_unlock (& p_cond -> m_lock );
195
+ qthread_spinlock_unlock (& p_cond -> m_lock );
176
196
}
177
197
178
198
static inline void opal_thread_internal_cond_destroy (opal_thread_internal_cond_t * p_cond )
0 commit comments