Skip to content

Commit 39e7d09

Browse files
committed
Merge branch 'mlx4-next'
Or Gerlitz says: ==================== mlx4: Add SRIOV support for RoCE This series adds SRIOV support for RoCE (RDMA over Ethernet) to the mlx4 driver. The patches are against net-next, as of commit 2d8d40a "pkt_sched: fq: do not hold qdisc lock while allocating memory" changes from V1: - addressed feedback from Dave on patch #3 and changed get_real_sgid_index() to be called fill_in_real_sgid_index() and be a void function. - removed some checkpatch warnings on long lines changes from V0: - always check the return code of mlx4_get_roce_gid_from_slave(). The call we fixed is introduced in patch #1 and later removed by patch #3 that allows guests to have multiple GIDS. The 1..3 separation was done for proper division of patches to logical changes. ==================== Signed-off-by: David S. Miller <[email protected]>
2 parents 36f6fdb + aa9a2d5 commit 39e7d09

File tree

15 files changed

+811
-139
lines changed

15 files changed

+811
-139
lines changed

drivers/infiniband/hw/mlx4/cm.c

Lines changed: 63 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -61,6 +61,11 @@ struct cm_generic_msg {
6161
__be32 remote_comm_id;
6262
};
6363

64+
struct cm_sidr_generic_msg {
65+
struct ib_mad_hdr hdr;
66+
__be32 request_id;
67+
};
68+
6469
struct cm_req_msg {
6570
unsigned char unused[0x60];
6671
union ib_gid primary_path_sgid;
@@ -69,28 +74,62 @@ struct cm_req_msg {
6974

7075
static void set_local_comm_id(struct ib_mad *mad, u32 cm_id)
7176
{
72-
struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
73-
msg->local_comm_id = cpu_to_be32(cm_id);
77+
if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
78+
struct cm_sidr_generic_msg *msg =
79+
(struct cm_sidr_generic_msg *)mad;
80+
msg->request_id = cpu_to_be32(cm_id);
81+
} else if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
82+
pr_err("trying to set local_comm_id in SIDR_REP\n");
83+
return;
84+
} else {
85+
struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
86+
msg->local_comm_id = cpu_to_be32(cm_id);
87+
}
7488
}
7589

7690
static u32 get_local_comm_id(struct ib_mad *mad)
7791
{
78-
struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
79-
80-
return be32_to_cpu(msg->local_comm_id);
92+
if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
93+
struct cm_sidr_generic_msg *msg =
94+
(struct cm_sidr_generic_msg *)mad;
95+
return be32_to_cpu(msg->request_id);
96+
} else if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
97+
pr_err("trying to set local_comm_id in SIDR_REP\n");
98+
return -1;
99+
} else {
100+
struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
101+
return be32_to_cpu(msg->local_comm_id);
102+
}
81103
}
82104

83105
static void set_remote_comm_id(struct ib_mad *mad, u32 cm_id)
84106
{
85-
struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
86-
msg->remote_comm_id = cpu_to_be32(cm_id);
107+
if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
108+
struct cm_sidr_generic_msg *msg =
109+
(struct cm_sidr_generic_msg *)mad;
110+
msg->request_id = cpu_to_be32(cm_id);
111+
} else if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
112+
pr_err("trying to set remote_comm_id in SIDR_REQ\n");
113+
return;
114+
} else {
115+
struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
116+
msg->remote_comm_id = cpu_to_be32(cm_id);
117+
}
87118
}
88119

89120
static u32 get_remote_comm_id(struct ib_mad *mad)
90121
{
91-
struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
92-
93-
return be32_to_cpu(msg->remote_comm_id);
122+
if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
123+
struct cm_sidr_generic_msg *msg =
124+
(struct cm_sidr_generic_msg *)mad;
125+
return be32_to_cpu(msg->request_id);
126+
} else if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
127+
pr_err("trying to set remote_comm_id in SIDR_REQ\n");
128+
return -1;
129+
} else {
130+
struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
131+
return be32_to_cpu(msg->remote_comm_id);
132+
}
94133
}
95134

96135
static union ib_gid gid_from_req_msg(struct ib_device *ibdev, struct ib_mad *mad)
@@ -282,19 +321,21 @@ int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id
282321
u32 sl_cm_id;
283322
int pv_cm_id = -1;
284323

285-
sl_cm_id = get_local_comm_id(mad);
286-
287324
if (mad->mad_hdr.attr_id == CM_REQ_ATTR_ID ||
288-
mad->mad_hdr.attr_id == CM_REP_ATTR_ID) {
325+
mad->mad_hdr.attr_id == CM_REP_ATTR_ID ||
326+
mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
327+
sl_cm_id = get_local_comm_id(mad);
289328
id = id_map_alloc(ibdev, slave_id, sl_cm_id);
290329
if (IS_ERR(id)) {
291330
mlx4_ib_warn(ibdev, "%s: id{slave: %d, sl_cm_id: 0x%x} Failed to id_map_alloc\n",
292331
__func__, slave_id, sl_cm_id);
293332
return PTR_ERR(id);
294333
}
295-
} else if (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID) {
334+
} else if (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID ||
335+
mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
296336
return 0;
297337
} else {
338+
sl_cm_id = get_local_comm_id(mad);
298339
id = id_map_get(ibdev, &pv_cm_id, slave_id, sl_cm_id);
299340
}
300341

@@ -315,14 +356,18 @@ int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id
315356
}
316357

317358
int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave,
318-
struct ib_mad *mad)
359+
struct ib_mad *mad)
319360
{
320361
u32 pv_cm_id;
321362
struct id_map_entry *id;
322363

323-
if (mad->mad_hdr.attr_id == CM_REQ_ATTR_ID) {
364+
if (mad->mad_hdr.attr_id == CM_REQ_ATTR_ID ||
365+
mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
324366
union ib_gid gid;
325367

368+
if (!slave)
369+
return 0;
370+
326371
gid = gid_from_req_msg(ibdev, mad);
327372
*slave = mlx4_ib_find_real_gid(ibdev, port, gid.global.interface_id);
328373
if (*slave < 0) {
@@ -341,7 +386,8 @@ int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave,
341386
return -ENOENT;
342387
}
343388

344-
*slave = id->slave_id;
389+
if (slave)
390+
*slave = id->slave_id;
345391
set_remote_comm_id(mad, id->sl_cm_id);
346392

347393
if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID)

drivers/infiniband/hw/mlx4/cq.c

Lines changed: 28 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -564,7 +564,7 @@ static int mlx4_ib_ipoib_csum_ok(__be16 status, __be16 checksum)
564564
}
565565

566566
static int use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct ib_wc *wc,
567-
unsigned tail, struct mlx4_cqe *cqe)
567+
unsigned tail, struct mlx4_cqe *cqe, int is_eth)
568568
{
569569
struct mlx4_ib_proxy_sqp_hdr *hdr;
570570

@@ -574,12 +574,20 @@ static int use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct
574574
DMA_FROM_DEVICE);
575575
hdr = (struct mlx4_ib_proxy_sqp_hdr *) (qp->sqp_proxy_rcv[tail].addr);
576576
wc->pkey_index = be16_to_cpu(hdr->tun.pkey_index);
577-
wc->slid = be16_to_cpu(hdr->tun.slid_mac_47_32);
578-
wc->sl = (u8) (be16_to_cpu(hdr->tun.sl_vid) >> 12);
579577
wc->src_qp = be32_to_cpu(hdr->tun.flags_src_qp) & 0xFFFFFF;
580578
wc->wc_flags |= (hdr->tun.g_ml_path & 0x80) ? (IB_WC_GRH) : 0;
581579
wc->dlid_path_bits = 0;
582580

581+
if (is_eth) {
582+
wc->vlan_id = be16_to_cpu(hdr->tun.sl_vid);
583+
memcpy(&(wc->smac[0]), (char *)&hdr->tun.mac_31_0, 4);
584+
memcpy(&(wc->smac[4]), (char *)&hdr->tun.slid_mac_47_32, 2);
585+
wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC);
586+
} else {
587+
wc->slid = be16_to_cpu(hdr->tun.slid_mac_47_32);
588+
wc->sl = (u8) (be16_to_cpu(hdr->tun.sl_vid) >> 12);
589+
}
590+
583591
return 0;
584592
}
585593

@@ -594,6 +602,7 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
594602
struct mlx4_srq *msrq = NULL;
595603
int is_send;
596604
int is_error;
605+
int is_eth;
597606
u32 g_mlpath_rqpn;
598607
u16 wqe_ctr;
599608
unsigned tail = 0;
@@ -778,11 +787,15 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
778787
break;
779788
}
780789

790+
is_eth = (rdma_port_get_link_layer(wc->qp->device,
791+
(*cur_qp)->port) ==
792+
IB_LINK_LAYER_ETHERNET);
781793
if (mlx4_is_mfunc(to_mdev(cq->ibcq.device)->dev)) {
782794
if ((*cur_qp)->mlx4_ib_qp_type &
783795
(MLX4_IB_QPT_PROXY_SMI_OWNER |
784796
MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI))
785-
return use_tunnel_data(*cur_qp, cq, wc, tail, cqe);
797+
return use_tunnel_data(*cur_qp, cq, wc, tail,
798+
cqe, is_eth);
786799
}
787800

788801
wc->slid = be16_to_cpu(cqe->rlid);
@@ -793,20 +806,21 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
793806
wc->pkey_index = be32_to_cpu(cqe->immed_rss_invalid) & 0x7f;
794807
wc->wc_flags |= mlx4_ib_ipoib_csum_ok(cqe->status,
795808
cqe->checksum) ? IB_WC_IP_CSUM_OK : 0;
796-
if (rdma_port_get_link_layer(wc->qp->device,
797-
(*cur_qp)->port) == IB_LINK_LAYER_ETHERNET)
809+
if (is_eth) {
798810
wc->sl = be16_to_cpu(cqe->sl_vid) >> 13;
799-
else
800-
wc->sl = be16_to_cpu(cqe->sl_vid) >> 12;
801-
if (be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_VLAN_PRESENT_MASK) {
802-
wc->vlan_id = be16_to_cpu(cqe->sl_vid) &
803-
MLX4_CQE_VID_MASK;
811+
if (be32_to_cpu(cqe->vlan_my_qpn) &
812+
MLX4_CQE_VLAN_PRESENT_MASK) {
813+
wc->vlan_id = be16_to_cpu(cqe->sl_vid) &
814+
MLX4_CQE_VID_MASK;
815+
} else {
816+
wc->vlan_id = 0xffff;
817+
}
818+
memcpy(wc->smac, cqe->smac, ETH_ALEN);
819+
wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC);
804820
} else {
821+
wc->sl = be16_to_cpu(cqe->sl_vid) >> 12;
805822
wc->vlan_id = 0xffff;
806823
}
807-
wc->wc_flags |= IB_WC_WITH_VLAN;
808-
memcpy(wc->smac, cqe->smac, ETH_ALEN);
809-
wc->wc_flags |= IB_WC_WITH_SMAC;
810824
}
811825

812826
return 0;

0 commit comments

Comments
 (0)