20
20
*
21
21
* Copyright (c) 2020 Cisco Systems, Inc. All rights reserved.
22
22
* Copyright (c) 2021 Argonne National Laboratory. All rights reserved.
23
+ * Copyright (c) 2022 Sandia National Laboratories. All rights reserved.
23
24
* $COPYRIGHT$
24
25
*
25
26
* Additional copyrights may follow
42
43
43
44
BEGIN_C_DECLS
44
45
45
- typedef opal_atomic_lock_t opal_thread_internal_mutex_t ;
46
+ typedef aligned_t qtheads_lock_t ;
47
+ typedef qtheads_lock_t opal_thread_internal_mutex_t ;
46
48
47
49
#define OPAL_THREAD_INTERNAL_MUTEX_INITIALIZER OPAL_ATOMIC_LOCK_INIT
48
50
#define OPAL_THREAD_INTERNAL_RECURSIVE_MUTEX_INITIALIZER OPAL_ATOMIC_LOCK_INIT
49
51
50
52
static inline int opal_thread_internal_mutex_init (opal_thread_internal_mutex_t * p_mutex ,
51
53
bool recursive )
52
54
{
53
- opal_atomic_lock_init (p_mutex , 0 );
55
+ opal_threads_ensure_init_qthreads ();
56
+ #if OPAL_ENABLE_DEBUG
57
+ int ret = qthread_fill (p_mutex , recursive ); //set here that this is recursive or not
58
+ if (QTHREAD_SUCCESS != ret ) {
59
+ opal_output (0 , "opal_thread_internal_mutex_init()" );
60
+ }
61
+ #else
62
+ qthread_fill (p_mutex , recursive ); //set here that this is recursive or not
63
+ #endif
64
+
54
65
return OPAL_SUCCESS ;
55
66
}
56
67
57
68
static inline void opal_thread_internal_mutex_lock (opal_thread_internal_mutex_t * p_mutex )
58
69
{
59
70
opal_threads_ensure_init_qthreads ();
60
-
61
- int ret = opal_atomic_trylock (p_mutex );
62
- while (0 != ret ) {
63
- qthread_yield ();
64
- ret = opal_atomic_trylock (p_mutex );
71
+ #if OPAL_ENABLE_DEBUG
72
+ int ret = qthread_lock (p_mutex );
73
+ if (QTHREAD_SUCCESS != ret ) {
74
+ opal_output (0 , "opal_thread_internal_mutex_init()" );
65
75
}
76
+ #else
77
+ qthread_lock (p_mutex );
78
+ #endif
66
79
}
67
80
68
81
static inline int opal_thread_internal_mutex_trylock (opal_thread_internal_mutex_t * p_mutex )
69
82
{
70
83
opal_threads_ensure_init_qthreads ();
71
-
72
- int ret = opal_atomic_trylock (p_mutex );
73
- if (0 != ret ) {
74
- /* Yield to avoid a deadlock. */
75
- qthread_yield ();
84
+ int ret = qthread_trylock (p_mutex );
85
+ if (QTHREAD_OPFAIL == ret ) {
86
+ return 1 ;
87
+ } else if (QTHREAD_SUCCESS != ret ) {
88
+ #if OPAL_ENABLE_DEBUG
89
+ opal_output (0 , "opal_thread_internal_mutex_trylock()" );
90
+ #endif
91
+ return 1 ;
92
+ } else {
93
+ return 0 ;
76
94
}
77
- return ret ;
78
95
}
79
96
80
97
static inline void opal_thread_internal_mutex_unlock (opal_thread_internal_mutex_t * p_mutex )
81
98
{
82
99
opal_threads_ensure_init_qthreads ();
83
-
84
- opal_atomic_unlock (p_mutex );
100
+ #if OPAL_ENABLE_DEBUG
101
+ int ret = qthread_unlock (p_mutex );
102
+ if (QTHREAD_SUCCESS != ret ) {
103
+ opal_output (0 , "opal_thread_internal_mutex_unlock()" );
104
+ }
105
+ #else
106
+ qthread_unlock (p_mutex );
107
+ #endif
85
108
/* For fairness of locking. */
86
109
qthread_yield ();
87
110
}
@@ -97,7 +120,7 @@ typedef struct opal_thread_cond_waiter_t {
97
120
} opal_thread_cond_waiter_t ;
98
121
99
122
typedef struct {
100
- opal_atomic_lock_t m_lock ;
123
+ qtheads_lock_t m_lock ;
101
124
opal_thread_cond_waiter_t * m_waiter_head ;
102
125
opal_thread_cond_waiter_t * m_waiter_tail ;
103
126
} opal_thread_internal_cond_t ;
@@ -109,7 +132,7 @@ typedef struct {
109
132
110
133
static inline int opal_thread_internal_cond_init (opal_thread_internal_cond_t * p_cond )
111
134
{
112
- opal_atomic_lock_init (& p_cond -> m_lock , 0 );
135
+ qthread_lock (& p_cond -> m_lock );
113
136
p_cond -> m_waiter_head = NULL ;
114
137
p_cond -> m_waiter_tail = NULL ;
115
138
return OPAL_SUCCESS ;
@@ -121,24 +144,23 @@ static inline void opal_thread_internal_cond_wait(opal_thread_internal_cond_t *p
121
144
opal_threads_ensure_init_qthreads ();
122
145
/* This thread is taking "lock", so only this thread can access this
123
146
* condition variable. */
124
- opal_atomic_lock (& p_cond -> m_lock );
147
+ qthread_lock (& p_cond -> m_lock );
125
148
opal_thread_cond_waiter_t waiter = {0 , NULL };
126
149
if (NULL == p_cond -> m_waiter_head ) {
127
150
p_cond -> m_waiter_tail = & waiter ;
128
151
} else {
129
152
p_cond -> m_waiter_head -> m_prev = & waiter ;
130
153
}
131
154
p_cond -> m_waiter_head = & waiter ;
132
- opal_atomic_unlock (& p_cond -> m_lock );
133
-
134
- while (1 ) {
135
- opal_thread_internal_mutex_unlock (p_mutex );
155
+ qthread_unlock (& p_cond -> m_lock );
156
+ while (1 ) {
157
+ opal_thread_internal_mutex_lock (p_mutex );
136
158
qthread_yield ();
137
159
opal_thread_internal_mutex_lock (p_mutex );
138
160
/* Check if someone woke me up. */
139
- opal_atomic_lock (& p_cond -> m_lock );
161
+ qthread_lock (& p_cond -> m_lock );
140
162
int signaled = waiter .m_signaled ;
141
- opal_atomic_unlock (& p_cond -> m_lock );
163
+ qthread_unlock (& p_cond -> m_lock );
142
164
if (1 == signaled ) {
143
165
break ;
144
166
}
@@ -148,7 +170,7 @@ static inline void opal_thread_internal_cond_wait(opal_thread_internal_cond_t *p
148
170
149
171
static inline void opal_thread_internal_cond_broadcast (opal_thread_internal_cond_t * p_cond )
150
172
{
151
- opal_atomic_lock (& p_cond -> m_lock );
173
+ qthread_lock (& p_cond -> m_lock );
152
174
while (NULL != p_cond -> m_waiter_tail ) {
153
175
opal_thread_cond_waiter_t * p_cur_tail = p_cond -> m_waiter_tail ;
154
176
p_cond -> m_waiter_tail = p_cur_tail -> m_prev ;
@@ -157,12 +179,12 @@ static inline void opal_thread_internal_cond_broadcast(opal_thread_internal_cond
157
179
}
158
180
/* No waiters. */
159
181
p_cond -> m_waiter_head = NULL ;
160
- opal_atomic_unlock (& p_cond -> m_lock );
182
+ qthread_unlock (& p_cond -> m_lock );
161
183
}
162
184
163
185
static inline void opal_thread_internal_cond_signal (opal_thread_internal_cond_t * p_cond )
164
186
{
165
- opal_atomic_lock (& p_cond -> m_lock );
187
+ qthread_lock (& p_cond -> m_lock );
166
188
if (NULL != p_cond -> m_waiter_tail ) {
167
189
opal_thread_cond_waiter_t * p_cur_tail = p_cond -> m_waiter_tail ;
168
190
p_cond -> m_waiter_tail = p_cur_tail -> m_prev ;
@@ -172,7 +194,7 @@ static inline void opal_thread_internal_cond_signal(opal_thread_internal_cond_t
172
194
p_cond -> m_waiter_head = NULL ;
173
195
}
174
196
}
175
- opal_atomic_unlock (& p_cond -> m_lock );
197
+ qthread_unlock (& p_cond -> m_lock );
176
198
}
177
199
178
200
static inline void opal_thread_internal_cond_destroy (opal_thread_internal_cond_t * p_cond )
0 commit comments