@@ -73,14 +73,24 @@ static void *get_cqe(struct mlx5_ib_cq *cq, int n)
73
73
return get_cqe_from_buf (& cq -> buf , n , cq -> mcq .cqe_sz );
74
74
}
75
75
76
+ static u8 sw_ownership_bit (int n , int nent )
77
+ {
78
+ return (n & nent ) ? 1 : 0 ;
79
+ }
80
+
76
81
static void * get_sw_cqe (struct mlx5_ib_cq * cq , int n )
77
82
{
78
83
void * cqe = get_cqe (cq , n & cq -> ibcq .cqe );
79
84
struct mlx5_cqe64 * cqe64 ;
80
85
81
86
cqe64 = (cq -> mcq .cqe_sz == 64 ) ? cqe : cqe + 64 ;
82
- return ((cqe64 -> op_own & MLX5_CQE_OWNER_MASK ) ^
83
- !!(n & (cq -> ibcq .cqe + 1 ))) ? NULL : cqe ;
87
+
88
+ if (likely ((cqe64 -> op_own ) >> 4 != MLX5_CQE_INVALID ) &&
89
+ !((cqe64 -> op_own & MLX5_CQE_OWNER_MASK ) ^ !!(n & (cq -> ibcq .cqe + 1 )))) {
90
+ return cqe ;
91
+ } else {
92
+ return NULL ;
93
+ }
84
94
}
85
95
86
96
static void * next_cqe_sw (struct mlx5_ib_cq * cq )
@@ -351,6 +361,11 @@ static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
351
361
qp -> sq .last_poll = tail ;
352
362
}
353
363
364
+ static void free_cq_buf (struct mlx5_ib_dev * dev , struct mlx5_ib_cq_buf * buf )
365
+ {
366
+ mlx5_buf_free (& dev -> mdev , & buf -> buf );
367
+ }
368
+
354
369
static int mlx5_poll_one (struct mlx5_ib_cq * cq ,
355
370
struct mlx5_ib_qp * * cur_qp ,
356
371
struct ib_wc * wc )
@@ -366,6 +381,7 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq,
366
381
void * cqe ;
367
382
int idx ;
368
383
384
+ repoll :
369
385
cqe = next_cqe_sw (cq );
370
386
if (!cqe )
371
387
return - EAGAIN ;
@@ -379,7 +395,18 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq,
379
395
*/
380
396
rmb ();
381
397
382
- /* TBD: resize CQ */
398
+ opcode = cqe64 -> op_own >> 4 ;
399
+ if (unlikely (opcode == MLX5_CQE_RESIZE_CQ )) {
400
+ if (likely (cq -> resize_buf )) {
401
+ free_cq_buf (dev , & cq -> buf );
402
+ cq -> buf = * cq -> resize_buf ;
403
+ kfree (cq -> resize_buf );
404
+ cq -> resize_buf = NULL ;
405
+ goto repoll ;
406
+ } else {
407
+ mlx5_ib_warn (dev , "unexpected resize cqe\n" );
408
+ }
409
+ }
383
410
384
411
qpn = ntohl (cqe64 -> sop_drop_qpn ) & 0xffffff ;
385
412
if (!* cur_qp || (qpn != (* cur_qp )-> ibqp .qp_num )) {
@@ -398,7 +425,6 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq,
398
425
}
399
426
400
427
wc -> qp = & (* cur_qp )-> ibqp ;
401
- opcode = cqe64 -> op_own >> 4 ;
402
428
switch (opcode ) {
403
429
case MLX5_CQE_REQ :
404
430
wq = & (* cur_qp )-> sq ;
@@ -503,15 +529,11 @@ static int alloc_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf,
503
529
return err ;
504
530
505
531
buf -> cqe_size = cqe_size ;
532
+ buf -> nent = nent ;
506
533
507
534
return 0 ;
508
535
}
509
536
510
- static void free_cq_buf (struct mlx5_ib_dev * dev , struct mlx5_ib_cq_buf * buf )
511
- {
512
- mlx5_buf_free (& dev -> mdev , & buf -> buf );
513
- }
514
-
515
537
static int create_cq_user (struct mlx5_ib_dev * dev , struct ib_udata * udata ,
516
538
struct ib_ucontext * context , struct mlx5_ib_cq * cq ,
517
539
int entries , struct mlx5_create_cq_mbox_in * * cqb ,
@@ -576,16 +598,16 @@ static void destroy_cq_user(struct mlx5_ib_cq *cq, struct ib_ucontext *context)
576
598
ib_umem_release (cq -> buf .umem );
577
599
}
578
600
579
- static void init_cq_buf (struct mlx5_ib_cq * cq , int nent )
601
+ static void init_cq_buf (struct mlx5_ib_cq * cq , struct mlx5_ib_cq_buf * buf )
580
602
{
581
603
int i ;
582
604
void * cqe ;
583
605
struct mlx5_cqe64 * cqe64 ;
584
606
585
- for (i = 0 ; i < nent ; i ++ ) {
586
- cqe = get_cqe ( cq , i );
587
- cqe64 = ( cq -> buf . cqe_size == 64 ) ? cqe : cqe + 64 ;
588
- cqe64 -> op_own = 0xf1 ;
607
+ for (i = 0 ; i < buf -> nent ; i ++ ) {
608
+ cqe = get_cqe_from_buf ( buf , i , buf -> cqe_size );
609
+ cqe64 = buf -> cqe_size == 64 ? cqe : cqe + 64 ;
610
+ cqe64 -> op_own = MLX5_CQE_INVALID << 4 ;
589
611
}
590
612
}
591
613
@@ -610,7 +632,7 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
610
632
if (err )
611
633
goto err_db ;
612
634
613
- init_cq_buf (cq , entries );
635
+ init_cq_buf (cq , & cq -> buf );
614
636
615
637
* inlen = sizeof (* * cqb ) + sizeof (* (* cqb )-> pas ) * cq -> buf .buf .npages ;
616
638
* cqb = mlx5_vzalloc (* inlen );
@@ -836,7 +858,7 @@ int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
836
858
in -> ctx .cq_period = cpu_to_be16 (cq_period );
837
859
in -> ctx .cq_max_count = cpu_to_be16 (cq_count );
838
860
in -> field_select = cpu_to_be32 (fsel );
839
- err = mlx5_core_modify_cq (& dev -> mdev , & mcq -> mcq , in );
861
+ err = mlx5_core_modify_cq (& dev -> mdev , & mcq -> mcq , in , sizeof ( * in ) );
840
862
kfree (in );
841
863
842
864
if (err )
@@ -845,9 +867,235 @@ int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
845
867
return err ;
846
868
}
847
869
870
+ static int resize_user (struct mlx5_ib_dev * dev , struct mlx5_ib_cq * cq ,
871
+ int entries , struct ib_udata * udata , int * npas ,
872
+ int * page_shift , int * cqe_size )
873
+ {
874
+ struct mlx5_ib_resize_cq ucmd ;
875
+ struct ib_umem * umem ;
876
+ int err ;
877
+ int npages ;
878
+ struct ib_ucontext * context = cq -> buf .umem -> context ;
879
+
880
+ if (ib_copy_from_udata (& ucmd , udata , sizeof (ucmd )))
881
+ return - EFAULT ;
882
+
883
+ umem = ib_umem_get (context , ucmd .buf_addr , entries * ucmd .cqe_size ,
884
+ IB_ACCESS_LOCAL_WRITE , 1 );
885
+ if (IS_ERR (umem )) {
886
+ err = PTR_ERR (umem );
887
+ return err ;
888
+ }
889
+
890
+ mlx5_ib_cont_pages (umem , ucmd .buf_addr , & npages , page_shift ,
891
+ npas , NULL );
892
+
893
+ cq -> resize_umem = umem ;
894
+ * cqe_size = ucmd .cqe_size ;
895
+
896
+ return 0 ;
897
+ }
898
+
899
+ static void un_resize_user (struct mlx5_ib_cq * cq )
900
+ {
901
+ ib_umem_release (cq -> resize_umem );
902
+ }
903
+
904
+ static int resize_kernel (struct mlx5_ib_dev * dev , struct mlx5_ib_cq * cq ,
905
+ int entries , int cqe_size )
906
+ {
907
+ int err ;
908
+
909
+ cq -> resize_buf = kzalloc (sizeof (* cq -> resize_buf ), GFP_KERNEL );
910
+ if (!cq -> resize_buf )
911
+ return - ENOMEM ;
912
+
913
+ err = alloc_cq_buf (dev , cq -> resize_buf , entries , cqe_size );
914
+ if (err )
915
+ goto ex ;
916
+
917
+ init_cq_buf (cq , cq -> resize_buf );
918
+
919
+ return 0 ;
920
+
921
+ ex :
922
+ kfree (cq -> resize_buf );
923
+ return err ;
924
+ }
925
+
926
+ static void un_resize_kernel (struct mlx5_ib_dev * dev , struct mlx5_ib_cq * cq )
927
+ {
928
+ free_cq_buf (dev , cq -> resize_buf );
929
+ cq -> resize_buf = NULL ;
930
+ }
931
+
932
+ static int copy_resize_cqes (struct mlx5_ib_cq * cq )
933
+ {
934
+ struct mlx5_ib_dev * dev = to_mdev (cq -> ibcq .device );
935
+ struct mlx5_cqe64 * scqe64 ;
936
+ struct mlx5_cqe64 * dcqe64 ;
937
+ void * start_cqe ;
938
+ void * scqe ;
939
+ void * dcqe ;
940
+ int ssize ;
941
+ int dsize ;
942
+ int i ;
943
+ u8 sw_own ;
944
+
945
+ ssize = cq -> buf .cqe_size ;
946
+ dsize = cq -> resize_buf -> cqe_size ;
947
+ if (ssize != dsize ) {
948
+ mlx5_ib_warn (dev , "resize from different cqe size is not supported\n" );
949
+ return - EINVAL ;
950
+ }
951
+
952
+ i = cq -> mcq .cons_index ;
953
+ scqe = get_sw_cqe (cq , i );
954
+ scqe64 = ssize == 64 ? scqe : scqe + 64 ;
955
+ start_cqe = scqe ;
956
+ if (!scqe ) {
957
+ mlx5_ib_warn (dev , "expected cqe in sw ownership\n" );
958
+ return - EINVAL ;
959
+ }
960
+
961
+ while ((scqe64 -> op_own >> 4 ) != MLX5_CQE_RESIZE_CQ ) {
962
+ dcqe = get_cqe_from_buf (cq -> resize_buf ,
963
+ (i + 1 ) & (cq -> resize_buf -> nent ),
964
+ dsize );
965
+ dcqe64 = dsize == 64 ? dcqe : dcqe + 64 ;
966
+ sw_own = sw_ownership_bit (i + 1 , cq -> resize_buf -> nent );
967
+ memcpy (dcqe , scqe , dsize );
968
+ dcqe64 -> op_own = (dcqe64 -> op_own & ~MLX5_CQE_OWNER_MASK ) | sw_own ;
969
+
970
+ ++ i ;
971
+ scqe = get_sw_cqe (cq , i );
972
+ scqe64 = ssize == 64 ? scqe : scqe + 64 ;
973
+ if (!scqe ) {
974
+ mlx5_ib_warn (dev , "expected cqe in sw ownership\n" );
975
+ return - EINVAL ;
976
+ }
977
+
978
+ if (scqe == start_cqe ) {
979
+ pr_warn ("resize CQ failed to get resize CQE, CQN 0x%x\n" ,
980
+ cq -> mcq .cqn );
981
+ return - ENOMEM ;
982
+ }
983
+ }
984
+ ++ cq -> mcq .cons_index ;
985
+ return 0 ;
986
+ }
987
+
848
988
int mlx5_ib_resize_cq (struct ib_cq * ibcq , int entries , struct ib_udata * udata )
849
989
{
850
- return - ENOSYS ;
990
+ struct mlx5_ib_dev * dev = to_mdev (ibcq -> device );
991
+ struct mlx5_ib_cq * cq = to_mcq (ibcq );
992
+ struct mlx5_modify_cq_mbox_in * in ;
993
+ int err ;
994
+ int npas ;
995
+ int page_shift ;
996
+ int inlen ;
997
+ int uninitialized_var (cqe_size );
998
+ unsigned long flags ;
999
+
1000
+ if (!(dev -> mdev .caps .flags & MLX5_DEV_CAP_FLAG_RESIZE_CQ )) {
1001
+ pr_info ("Firmware does not support resize CQ\n" );
1002
+ return - ENOSYS ;
1003
+ }
1004
+
1005
+ if (entries < 1 )
1006
+ return - EINVAL ;
1007
+
1008
+ entries = roundup_pow_of_two (entries + 1 );
1009
+ if (entries > dev -> mdev .caps .max_cqes + 1 )
1010
+ return - EINVAL ;
1011
+
1012
+ if (entries == ibcq -> cqe + 1 )
1013
+ return 0 ;
1014
+
1015
+ mutex_lock (& cq -> resize_mutex );
1016
+ if (udata ) {
1017
+ err = resize_user (dev , cq , entries , udata , & npas , & page_shift ,
1018
+ & cqe_size );
1019
+ } else {
1020
+ cqe_size = 64 ;
1021
+ err = resize_kernel (dev , cq , entries , cqe_size );
1022
+ if (!err ) {
1023
+ npas = cq -> resize_buf -> buf .npages ;
1024
+ page_shift = cq -> resize_buf -> buf .page_shift ;
1025
+ }
1026
+ }
1027
+
1028
+ if (err )
1029
+ goto ex ;
1030
+
1031
+ inlen = sizeof (* in ) + npas * sizeof (in -> pas [0 ]);
1032
+ in = mlx5_vzalloc (inlen );
1033
+ if (!in ) {
1034
+ err = - ENOMEM ;
1035
+ goto ex_resize ;
1036
+ }
1037
+
1038
+ if (udata )
1039
+ mlx5_ib_populate_pas (dev , cq -> resize_umem , page_shift ,
1040
+ in -> pas , 0 );
1041
+ else
1042
+ mlx5_fill_page_array (& cq -> resize_buf -> buf , in -> pas );
1043
+
1044
+ in -> field_select = cpu_to_be32 (MLX5_MODIFY_CQ_MASK_LOG_SIZE |
1045
+ MLX5_MODIFY_CQ_MASK_PG_OFFSET |
1046
+ MLX5_MODIFY_CQ_MASK_PG_SIZE );
1047
+ in -> ctx .log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT ;
1048
+ in -> ctx .cqe_sz_flags = cqe_sz_to_mlx_sz (cqe_size ) << 5 ;
1049
+ in -> ctx .page_offset = 0 ;
1050
+ in -> ctx .log_sz_usr_page = cpu_to_be32 (ilog2 (entries ) << 24 );
1051
+ in -> hdr .opmod = cpu_to_be16 (MLX5_CQ_OPMOD_RESIZE );
1052
+ in -> cqn = cpu_to_be32 (cq -> mcq .cqn );
1053
+
1054
+ err = mlx5_core_modify_cq (& dev -> mdev , & cq -> mcq , in , inlen );
1055
+ if (err )
1056
+ goto ex_alloc ;
1057
+
1058
+ if (udata ) {
1059
+ cq -> ibcq .cqe = entries - 1 ;
1060
+ ib_umem_release (cq -> buf .umem );
1061
+ cq -> buf .umem = cq -> resize_umem ;
1062
+ cq -> resize_umem = NULL ;
1063
+ } else {
1064
+ struct mlx5_ib_cq_buf tbuf ;
1065
+ int resized = 0 ;
1066
+
1067
+ spin_lock_irqsave (& cq -> lock , flags );
1068
+ if (cq -> resize_buf ) {
1069
+ err = copy_resize_cqes (cq );
1070
+ if (!err ) {
1071
+ tbuf = cq -> buf ;
1072
+ cq -> buf = * cq -> resize_buf ;
1073
+ kfree (cq -> resize_buf );
1074
+ cq -> resize_buf = NULL ;
1075
+ resized = 1 ;
1076
+ }
1077
+ }
1078
+ cq -> ibcq .cqe = entries - 1 ;
1079
+ spin_unlock_irqrestore (& cq -> lock , flags );
1080
+ if (resized )
1081
+ free_cq_buf (dev , & tbuf );
1082
+ }
1083
+ mutex_unlock (& cq -> resize_mutex );
1084
+
1085
+ mlx5_vfree (in );
1086
+ return 0 ;
1087
+
1088
+ ex_alloc :
1089
+ mlx5_vfree (in );
1090
+
1091
+ ex_resize :
1092
+ if (udata )
1093
+ un_resize_user (cq );
1094
+ else
1095
+ un_resize_kernel (dev , cq );
1096
+ ex :
1097
+ mutex_unlock (& cq -> resize_mutex );
1098
+ return err ;
851
1099
}
852
1100
853
1101
int mlx5_ib_get_cqe_size (struct mlx5_ib_dev * dev , struct ib_cq * ibcq )
0 commit comments