@@ -642,6 +642,78 @@ static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp,
642
642
return skb ;
643
643
}
644
644
645
+ static struct resp_res * rxe_prepare_read_res (struct rxe_qp * qp ,
646
+ struct rxe_pkt_info * pkt )
647
+ {
648
+ struct resp_res * res ;
649
+ u32 pkts ;
650
+
651
+ res = & qp -> resp .resources [qp -> resp .res_head ];
652
+ rxe_advance_resp_resource (qp );
653
+ free_rd_atomic_resource (qp , res );
654
+
655
+ res -> type = RXE_READ_MASK ;
656
+ res -> replay = 0 ;
657
+ res -> read .va = qp -> resp .va + qp -> resp .offset ;
658
+ res -> read .va_org = qp -> resp .va + qp -> resp .offset ;
659
+ res -> read .resid = qp -> resp .resid ;
660
+ res -> read .length = qp -> resp .resid ;
661
+ res -> read .rkey = qp -> resp .rkey ;
662
+
663
+ pkts = max_t (u32 , (reth_len (pkt ) + qp -> mtu - 1 )/qp -> mtu , 1 );
664
+ res -> first_psn = pkt -> psn ;
665
+ res -> cur_psn = pkt -> psn ;
666
+ res -> last_psn = (pkt -> psn + pkts - 1 ) & BTH_PSN_MASK ;
667
+
668
+ res -> state = rdatm_res_state_new ;
669
+
670
+ return res ;
671
+ }
672
+
673
+ /**
674
+ * rxe_recheck_mr - revalidate MR from rkey and get a reference
675
+ * @qp: the qp
676
+ * @rkey: the rkey
677
+ *
678
+ * This code allows the MR to be invalidated or deregistered or
679
+ * the MW if one was used to be invalidated or deallocated.
680
+ * It is assumed that the access permissions if originally good
681
+ * are OK and the mappings to be unchanged.
682
+ *
683
+ * Return: mr on success else NULL
684
+ */
685
+ static struct rxe_mr * rxe_recheck_mr (struct rxe_qp * qp , u32 rkey )
686
+ {
687
+ struct rxe_dev * rxe = to_rdev (qp -> ibqp .device );
688
+ struct rxe_mr * mr ;
689
+ struct rxe_mw * mw ;
690
+
691
+ if (rkey_is_mw (rkey )) {
692
+ mw = rxe_pool_get_index (& rxe -> mw_pool , rkey >> 8 );
693
+ if (!mw || mw -> rkey != rkey )
694
+ return NULL ;
695
+
696
+ if (mw -> state != RXE_MW_STATE_VALID ) {
697
+ rxe_drop_ref (mw );
698
+ return NULL ;
699
+ }
700
+
701
+ mr = mw -> mr ;
702
+ rxe_drop_ref (mw );
703
+ } else {
704
+ mr = rxe_pool_get_index (& rxe -> mr_pool , rkey >> 8 );
705
+ if (!mr || mr -> rkey != rkey )
706
+ return NULL ;
707
+ }
708
+
709
+ if (mr -> state != RXE_MR_STATE_VALID ) {
710
+ rxe_drop_ref (mr );
711
+ return NULL ;
712
+ }
713
+
714
+ return mr ;
715
+ }
716
+
645
717
/* RDMA read response. If res is not NULL, then we have a current RDMA request
646
718
* being processed or replayed.
647
719
*/
@@ -656,53 +728,26 @@ static enum resp_states read_reply(struct rxe_qp *qp,
656
728
int opcode ;
657
729
int err ;
658
730
struct resp_res * res = qp -> resp .res ;
731
+ struct rxe_mr * mr ;
659
732
660
733
if (!res ) {
661
- /* This is the first time we process that request. Get a
662
- * resource
663
- */
664
- res = & qp -> resp .resources [qp -> resp .res_head ];
665
-
666
- free_rd_atomic_resource (qp , res );
667
- rxe_advance_resp_resource (qp );
668
-
669
- res -> type = RXE_READ_MASK ;
670
- res -> replay = 0 ;
671
-
672
- res -> read .va = qp -> resp .va +
673
- qp -> resp .offset ;
674
- res -> read .va_org = qp -> resp .va +
675
- qp -> resp .offset ;
676
-
677
- res -> first_psn = req_pkt -> psn ;
678
-
679
- if (reth_len (req_pkt )) {
680
- res -> last_psn = (req_pkt -> psn +
681
- (reth_len (req_pkt ) + mtu - 1 ) /
682
- mtu - 1 ) & BTH_PSN_MASK ;
683
- } else {
684
- res -> last_psn = res -> first_psn ;
685
- }
686
- res -> cur_psn = req_pkt -> psn ;
687
-
688
- res -> read .resid = qp -> resp .resid ;
689
- res -> read .length = qp -> resp .resid ;
690
- res -> read .rkey = qp -> resp .rkey ;
691
-
692
- /* note res inherits the reference to mr from qp */
693
- res -> read .mr = qp -> resp .mr ;
694
- qp -> resp .mr = NULL ;
695
-
696
- qp -> resp .res = res ;
697
- res -> state = rdatm_res_state_new ;
734
+ res = rxe_prepare_read_res (qp , req_pkt );
735
+ qp -> resp .res = res ;
698
736
}
699
737
700
738
if (res -> state == rdatm_res_state_new ) {
739
+ mr = qp -> resp .mr ;
740
+ qp -> resp .mr = NULL ;
741
+
701
742
if (res -> read .resid <= mtu )
702
743
opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY ;
703
744
else
704
745
opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST ;
705
746
} else {
747
+ mr = rxe_recheck_mr (qp , res -> read .rkey );
748
+ if (!mr )
749
+ return RESPST_ERR_RKEY_VIOLATION ;
750
+
706
751
if (res -> read .resid > mtu )
707
752
opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE ;
708
753
else
@@ -718,10 +763,12 @@ static enum resp_states read_reply(struct rxe_qp *qp,
718
763
if (!skb )
719
764
return RESPST_ERR_RNR ;
720
765
721
- err = rxe_mr_copy (res -> read . mr , res -> read .va , payload_addr (& ack_pkt ),
766
+ err = rxe_mr_copy (mr , res -> read .va , payload_addr (& ack_pkt ),
722
767
payload , RXE_FROM_MR_OBJ );
723
768
if (err )
724
769
pr_err ("Failed copying memory\n" );
770
+ if (mr )
771
+ rxe_drop_ref (mr );
725
772
726
773
if (bth_pad (& ack_pkt )) {
727
774
u8 * pad = payload_addr (& ack_pkt ) + payload ;
0 commit comments