@@ -430,7 +430,7 @@ static QINLINE void qthread_gotlock_empty_inner(qthread_shepherd_t *shep,
430
430
FREE_ADDRRES (X );
431
431
qthread_gotlock_fill_inner (shep , m , maddr , 1 , precond_tasks );
432
432
}
433
- if ((m -> full == 1 ) && (m -> EFQ == NULL ) && (m -> FEQ == NULL ) && (m -> FFQ == NULL ) && (m -> FFWQ == NULL )) {
433
+ if ((m -> full == 1 ) && (m -> EFQ == NULL ) && (m -> FEQ == NULL ) && (m -> FFQ == NULL ) && (m -> FFWQ == NULL ) && ( m -> acq_owner_stat . state < FEB_is_recursive_lock )) {
434
434
removeable = 1 ;
435
435
} else {
436
436
removeable = 0 ;
@@ -551,7 +551,7 @@ static QINLINE void qthread_gotlock_fill_inner(qthread_shepherd_t *shep,
551
551
}
552
552
if (recursive == 0 ) {
553
553
int removeable ;
554
- if ((m -> EFQ == NULL ) && (m -> FEQ == NULL ) && (m -> full == 1 )) {
554
+ if ((m -> EFQ == NULL ) && (m -> FEQ == NULL ) && (m -> full == 1 ) && ( m -> acq_owner_stat . state < FEB_is_recursive_lock ) ) {
555
555
qthread_debug (FEB_DETAILS , "m(%p), addr(%p), recursive(%u): addrstat removeable!\n" , m , maddr , recursive );
556
556
removeable = 1 ;
557
557
} else {
@@ -733,6 +733,11 @@ int INTERNAL qthread_feb_adr_init(const aligned_t *dest, const bool is_from_recu
733
733
return QTHREAD_SUCCESS ;
734
734
} /*}}} */
735
735
736
+ int INTERNAL qthread_feb_adr_remove (aligned_t * dest )
737
+ { /*{{{ */
738
+ qthread_FEB_remove (dest );
739
+ } /*}}} */
740
+
736
741
int API_FUNC qthread_fill (const aligned_t * dest )
737
742
{ /*{{{ */
738
743
const aligned_t * alignedaddr ;
@@ -776,9 +781,6 @@ int API_FUNC qthread_fill(const aligned_t *dest)
776
781
m = (qthread_addrstat_t * )qt_hash_get_locked (FEBs [lockbin ], (void * )alignedaddr );
777
782
if (m ) {
778
783
QTHREAD_FASTLOCK_LOCK (& m -> lock );
779
- if (m -> acq_owner_stat .state >= FEB_is_recursive_lock ) {
780
- -- m -> acq_owner_stat .recursive_access_counter ;
781
- }
782
784
}
783
785
} /* END CRITICAL SECTION */
784
786
qt_hash_unlock (FEBs [lockbin ]); /* unlock hash */
@@ -787,10 +789,12 @@ int API_FUNC qthread_fill(const aligned_t *dest)
787
789
if (m -> acq_owner_stat .state >= FEB_is_recursive_lock ) {
788
790
-- m -> acq_owner_stat .recursive_access_counter ;
789
791
if (m -> acq_owner_stat .recursive_access_counter > 0 ){
790
- qthread_debug (FEB_BEHAVIOR , "dest=%p (tid=%i): decrementing recursive_access_counter\n" , dest , qthread_id ());
792
+ qthread_debug (FEB_BEHAVIOR , "dest=%p (tid=%i): released recursive lock (inner)\n" , dest , qthread_id ());
793
+ QTHREAD_FASTLOCK_UNLOCK (& m -> lock );
791
794
return QTHREAD_SUCCESS ;
792
795
}
793
- m -> acq_owner_stat .state = FEB_is_recursive_lock ; //reset
796
+ qthread_debug (FEB_BEHAVIOR , "dest=%p (tid=%i): released recursive lock (outer)\n" , dest , qthread_id ());
797
+ m -> acq_owner_stat .state = FEB_is_recursive_lock ; //reset owner, maintain lock type
794
798
}
795
799
/* if dest wasn't in the hash, it was already full. Since it was,
796
800
* we need to fill it. */
@@ -1460,6 +1464,7 @@ int API_FUNC qthread_readFE(aligned_t *restrict dest,
1460
1464
if (!me ) {
1461
1465
return qthread_feb_blocker_func (dest , (void * )src , READFE );
1462
1466
}
1467
+
1463
1468
assert (me -> rdata );
1464
1469
qthread_debug (FEB_CALLS , "dest=%p, src=%p (tid=%i)\n" , dest , src , me -> thread_id );
1465
1470
QTHREAD_FEB_UNIQUERECORD (feb , src , me );
@@ -1520,13 +1525,16 @@ int API_FUNC qthread_readFE(aligned_t *restrict dest,
1520
1525
if (m -> full == 0 ) { /* empty, thus, we must block */
1521
1526
/* unless read_FE was taken by qthread_lock() */
1522
1527
/* on same thread */
1523
- if (m -> acq_owner_stat .state >= FEB_is_recursive_lock ) {
1524
- if (m -> acq_owner_stat .state == qthread_id ()) {
1528
+ if (m -> acq_owner_stat .state >= FEB_is_recursive_lock ) {
1529
+ if (m -> acq_owner_stat .state == qthread_readstate (CURRENT_WORKER )) {
1530
+
1525
1531
++ m -> acq_owner_stat .recursive_access_counter ;
1532
+ qthread_debug (FEB_BEHAVIOR , "dest=%p (tid=%i): acquired recursive lock (inner)\n" , dest , me -> thread_id );
1526
1533
QTHREAD_FASTLOCK_UNLOCK (& m -> lock );
1527
1534
QTHREAD_FEB_TIMER_STOP (febblock , me );
1528
1535
return QTHREAD_SUCCESS ;
1529
1536
}
1537
+ qthread_debug (FEB_BEHAVIOR , "dest=%p (tid=%i): attempt to acquire inner recursive lock failed thus blocking\n" , dest , me -> thread_id );
1530
1538
}
1531
1539
QTHREAD_WAIT_TIMER_DECLARATION ;
1532
1540
qthread_addrres_t * X = ALLOC_ADDRRES ();
@@ -1552,16 +1560,18 @@ int API_FUNC qthread_readFE(aligned_t *restrict dest,
1552
1560
#endif /* QTHREAD_USE_EUREKAS */
1553
1561
qthread_debug (FEB_BEHAVIOR , "tid %u succeeded on %p=%p after waiting\n" , me -> thread_id , dest , src );
1554
1562
} else { /* full, thus IT IS OURS! MUAHAHAHA! */
1563
+
1564
+
1555
1565
if (dest && (dest != src )) {
1556
1566
* (aligned_t * )dest = * (aligned_t * )src ;
1557
1567
MACHINE_FENCE ;
1558
1568
}
1559
1569
1560
1570
if (m -> acq_owner_stat .state >= FEB_is_recursive_lock ) {
1561
- m -> acq_owner_stat .state == qthread_id ();
1571
+ m -> acq_owner_stat .state = qthread_readstate ( CURRENT_WORKER ); /* Set owner */
1562
1572
++ m -> acq_owner_stat .recursive_access_counter ;
1563
1573
MACHINE_FENCE ;
1564
- qthread_debug (FEB_BEHAVIOR , "dest=%p (tid=%i): incrementing recursive_access_counter \n" , dest , qthread_id () );
1574
+ qthread_debug (FEB_BEHAVIOR , "dest=%p (tid=%i): acquired recursive lock (outer) \n" , dest , me -> thread_id );
1565
1575
}
1566
1576
1567
1577
qthread_debug (FEB_BEHAVIOR , "tid %u succeeded on %p=%p\n" , me -> thread_id , dest , src );
@@ -1651,13 +1661,17 @@ int API_FUNC qthread_readFE_nb(aligned_t *restrict dest,
1651
1661
# endif /* ifdef LOCK_FREE_FEBS */
1652
1662
qthread_debug (FEB_DETAILS , "data structure locked\n" );
1653
1663
/* by this point m is locked */
1654
- if (m -> full == 0 ) { /* empty, thus, we must fail */
1655
- /* unless called from qthread_trylock on a recursive lock*/
1656
- if (m -> acq_owner_stat .state >= FEB_is_recursive_lock ) {
1657
-
1664
+ if (m -> full == 0 ) { /* empty, thus, we must block */
1665
+ /* unless read_FE was taken by qthread_lock() */
1666
+ /* on same thread */
1667
+ if (m -> acq_owner_stat .state >= FEB_is_recursive_lock ) {
1668
+ if (m -> acq_owner_stat .state == qthread_readstate (CURRENT_WORKER )) {
1669
+
1658
1670
++ m -> acq_owner_stat .recursive_access_counter ;
1659
- QTHREAD_FASTLOCK_UNLOCK (& m -> lock );
1671
+ qthread_debug (FEB_BEHAVIOR , "dest=%p (tid=%i): acquired recursive lock (inner)\n" , dest , me -> thread_id );
1672
+ QTHREAD_FASTLOCK_UNLOCK (& m -> lock );
1660
1673
return QTHREAD_SUCCESS ;
1674
+ }
1661
1675
}
1662
1676
qthread_debug (FEB_BEHAVIOR , "tid %u non-blocking fail\n" , me -> thread_id );
1663
1677
QTHREAD_FASTLOCK_UNLOCK (& m -> lock );
@@ -1669,10 +1683,10 @@ int API_FUNC qthread_readFE_nb(aligned_t *restrict dest,
1669
1683
}
1670
1684
1671
1685
if (m -> acq_owner_stat .state >= FEB_is_recursive_lock ) {
1672
- m -> acq_owner_stat .state == qthread_id ();
1686
+ m -> acq_owner_stat .state = qthread_readstate ( CURRENT_WORKER ); /* Set owner */
1673
1687
++ m -> acq_owner_stat .recursive_access_counter ;
1674
1688
MACHINE_FENCE ;
1675
- qthread_debug (FEB_BEHAVIOR , "dest=%p (tid=%i): incrementing recursive_access_counter \n" , dest , qthread_id () );
1689
+ qthread_debug (FEB_BEHAVIOR , "dest=%p (tid=%i): acquired recursive lock (outer) \n" , dest , me -> thread_id );
1676
1690
}
1677
1691
1678
1692
qthread_debug (FEB_BEHAVIOR , "tid %u succeeded on %p=%p\n" , me -> thread_id , dest , src );
0 commit comments