21
21
#include <linux/clk.h>
22
22
#include <linux/completion.h>
23
23
#include <linux/delay.h>
24
+ #include <linux/dmaengine.h>
25
+ #include <linux/dma-mapping.h>
24
26
#include <linux/err.h>
25
27
#include <linux/gpio.h>
26
28
#include <linux/interrupt.h>
37
39
#include <linux/of_device.h>
38
40
#include <linux/of_gpio.h>
39
41
42
+ #include <linux/platform_data/dma-imx.h>
40
43
#include <linux/platform_data/spi-imx.h>
41
44
42
45
#define DRIVER_NAME "spi_imx"
51
54
#define MXC_INT_RR (1 << 0) /* Receive data ready interrupt */
52
55
#define MXC_INT_TE (1 << 1) /* Transmit FIFO empty interrupt */
53
56
57
+ /* The maximum bytes that a sdma BD can transfer.*/
58
+ #define MAX_SDMA_BD_BYTES (1 << 15)
59
+ #define IMX_DMA_TIMEOUT (msecs_to_jiffies(3000))
54
60
struct spi_imx_config {
55
61
unsigned int speed_hz ;
56
62
unsigned int bpw ;
@@ -95,6 +101,16 @@ struct spi_imx_data {
95
101
const void * tx_buf ;
96
102
unsigned int txfifo ; /* number of words pushed in tx FIFO */
97
103
104
+ /* DMA */
105
+ unsigned int dma_is_inited ;
106
+ unsigned int dma_finished ;
107
+ bool usedma ;
108
+ u32 rx_wml ;
109
+ u32 tx_wml ;
110
+ u32 rxt_wml ;
111
+ struct completion dma_rx_completion ;
112
+ struct completion dma_tx_completion ;
113
+
98
114
const struct spi_imx_devtype_data * devtype_data ;
99
115
int chipselect [0 ];
100
116
};
@@ -181,9 +197,21 @@ static unsigned int spi_imx_clkdiv_2(unsigned int fin,
181
197
return 7 ;
182
198
}
183
199
200
+ static bool spi_imx_can_dma (struct spi_master * master , struct spi_device * spi ,
201
+ struct spi_transfer * transfer )
202
+ {
203
+ struct spi_imx_data * spi_imx = spi_master_get_devdata (master );
204
+
205
+ if (spi_imx -> dma_is_inited && (transfer -> len > spi_imx -> rx_wml )
206
+ && (transfer -> len > spi_imx -> tx_wml ))
207
+ return true;
208
+ return false;
209
+ }
210
+
184
211
#define MX51_ECSPI_CTRL 0x08
185
212
#define MX51_ECSPI_CTRL_ENABLE (1 << 0)
186
213
#define MX51_ECSPI_CTRL_XCH (1 << 2)
214
+ #define MX51_ECSPI_CTRL_SMC (1 << 3)
187
215
#define MX51_ECSPI_CTRL_MODE_MASK (0xf << 4)
188
216
#define MX51_ECSPI_CTRL_POSTDIV_OFFSET 8
189
217
#define MX51_ECSPI_CTRL_PREDIV_OFFSET 12
@@ -201,6 +229,18 @@ static unsigned int spi_imx_clkdiv_2(unsigned int fin,
201
229
#define MX51_ECSPI_INT_TEEN (1 << 0)
202
230
#define MX51_ECSPI_INT_RREN (1 << 3)
203
231
232
+ #define MX51_ECSPI_DMA 0x14
233
+ #define MX51_ECSPI_DMA_TX_WML_OFFSET 0
234
+ #define MX51_ECSPI_DMA_TX_WML_MASK 0x3F
235
+ #define MX51_ECSPI_DMA_RX_WML_OFFSET 16
236
+ #define MX51_ECSPI_DMA_RX_WML_MASK (0x3F << 16)
237
+ #define MX51_ECSPI_DMA_RXT_WML_OFFSET 24
238
+ #define MX51_ECSPI_DMA_RXT_WML_MASK (0x3F << 24)
239
+
240
+ #define MX51_ECSPI_DMA_TEDEN_OFFSET 7
241
+ #define MX51_ECSPI_DMA_RXDEN_OFFSET 23
242
+ #define MX51_ECSPI_DMA_RXTDEN_OFFSET 31
243
+
204
244
#define MX51_ECSPI_STAT 0x18
205
245
#define MX51_ECSPI_STAT_RR (1 << 3)
206
246
@@ -257,17 +297,22 @@ static void __maybe_unused mx51_ecspi_intctrl(struct spi_imx_data *spi_imx, int
257
297
258
298
static void __maybe_unused mx51_ecspi_trigger (struct spi_imx_data * spi_imx )
259
299
{
260
- u32 reg ;
261
-
262
- reg = readl (spi_imx -> base + MX51_ECSPI_CTRL );
263
- reg |= MX51_ECSPI_CTRL_XCH ;
300
+ u32 reg = readl (spi_imx -> base + MX51_ECSPI_CTRL );
301
+
302
+ if (!spi_imx -> usedma )
303
+ reg |= MX51_ECSPI_CTRL_XCH ;
304
+ else if (!spi_imx -> dma_finished )
305
+ reg |= MX51_ECSPI_CTRL_SMC ;
306
+ else
307
+ reg &= ~MX51_ECSPI_CTRL_SMC ;
264
308
writel (reg , spi_imx -> base + MX51_ECSPI_CTRL );
265
309
}
266
310
267
311
static int __maybe_unused mx51_ecspi_config (struct spi_imx_data * spi_imx ,
268
312
struct spi_imx_config * config )
269
313
{
270
- u32 ctrl = MX51_ECSPI_CTRL_ENABLE , cfg = 0 ;
314
+ u32 ctrl = MX51_ECSPI_CTRL_ENABLE , cfg = 0 , dma = 0 ;
315
+ u32 tx_wml_cfg , rx_wml_cfg , rxt_wml_cfg ;
271
316
u32 clk = config -> speed_hz , delay ;
272
317
273
318
/*
@@ -319,6 +364,30 @@ static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx,
319
364
else /* SCLK is _very_ slow */
320
365
usleep_range (delay , delay + 10 );
321
366
367
+ /*
368
+ * Configure the DMA register: setup the watermark
369
+ * and enable DMA request.
370
+ */
371
+ if (spi_imx -> dma_is_inited ) {
372
+ dma = readl (spi_imx -> base + MX51_ECSPI_DMA );
373
+
374
+ spi_imx -> tx_wml = spi_imx_get_fifosize (spi_imx ) / 2 ;
375
+ spi_imx -> rx_wml = spi_imx_get_fifosize (spi_imx ) / 2 ;
376
+ spi_imx -> rxt_wml = spi_imx_get_fifosize (spi_imx ) / 2 ;
377
+ rx_wml_cfg = spi_imx -> rx_wml << MX51_ECSPI_DMA_RX_WML_OFFSET ;
378
+ tx_wml_cfg = spi_imx -> tx_wml << MX51_ECSPI_DMA_TX_WML_OFFSET ;
379
+ rxt_wml_cfg = spi_imx -> rxt_wml << MX51_ECSPI_DMA_RXT_WML_OFFSET ;
380
+ dma = (dma & ~MX51_ECSPI_DMA_TX_WML_MASK
381
+ & ~MX51_ECSPI_DMA_RX_WML_MASK
382
+ & ~MX51_ECSPI_DMA_RXT_WML_MASK )
383
+ | rx_wml_cfg | tx_wml_cfg | rxt_wml_cfg
384
+ |(1 << MX51_ECSPI_DMA_TEDEN_OFFSET )
385
+ |(1 << MX51_ECSPI_DMA_RXDEN_OFFSET )
386
+ |(1 << MX51_ECSPI_DMA_RXTDEN_OFFSET );
387
+
388
+ writel (dma , spi_imx -> base + MX51_ECSPI_DMA );
389
+ }
390
+
322
391
return 0 ;
323
392
}
324
393
@@ -730,7 +799,186 @@ static int spi_imx_setupxfer(struct spi_device *spi,
730
799
return 0 ;
731
800
}
732
801
733
- static int spi_imx_transfer (struct spi_device * spi ,
802
+ static void spi_imx_sdma_exit (struct spi_imx_data * spi_imx )
803
+ {
804
+ struct spi_master * master = spi_imx -> bitbang .master ;
805
+
806
+ if (master -> dma_rx ) {
807
+ dma_release_channel (master -> dma_rx );
808
+ master -> dma_rx = NULL ;
809
+ }
810
+
811
+ if (master -> dma_tx ) {
812
+ dma_release_channel (master -> dma_tx );
813
+ master -> dma_tx = NULL ;
814
+ }
815
+
816
+ spi_imx -> dma_is_inited = 0 ;
817
+ }
818
+
819
+ static int spi_imx_sdma_init (struct device * dev , struct spi_imx_data * spi_imx ,
820
+ struct spi_master * master ,
821
+ const struct resource * res )
822
+ {
823
+ struct dma_slave_config slave_config = {};
824
+ int ret ;
825
+
826
+ /* Prepare for TX DMA: */
827
+ master -> dma_tx = dma_request_slave_channel (dev , "tx" );
828
+ if (!master -> dma_tx ) {
829
+ dev_err (dev , "cannot get the TX DMA channel!\n" );
830
+ ret = - EINVAL ;
831
+ goto err ;
832
+ }
833
+
834
+ slave_config .direction = DMA_MEM_TO_DEV ;
835
+ slave_config .dst_addr = res -> start + MXC_CSPITXDATA ;
836
+ slave_config .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE ;
837
+ slave_config .dst_maxburst = spi_imx_get_fifosize (spi_imx ) / 2 ;
838
+ ret = dmaengine_slave_config (master -> dma_tx , & slave_config );
839
+ if (ret ) {
840
+ dev_err (dev , "error in TX dma configuration.\n" );
841
+ goto err ;
842
+ }
843
+
844
+ /* Prepare for RX : */
845
+ master -> dma_rx = dma_request_slave_channel (dev , "rx" );
846
+ if (!master -> dma_rx ) {
847
+ dev_dbg (dev , "cannot get the DMA channel.\n" );
848
+ ret = - EINVAL ;
849
+ goto err ;
850
+ }
851
+
852
+ slave_config .direction = DMA_DEV_TO_MEM ;
853
+ slave_config .src_addr = res -> start + MXC_CSPIRXDATA ;
854
+ slave_config .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE ;
855
+ slave_config .src_maxburst = spi_imx_get_fifosize (spi_imx ) / 2 ;
856
+ ret = dmaengine_slave_config (master -> dma_rx , & slave_config );
857
+ if (ret ) {
858
+ dev_err (dev , "error in RX dma configuration.\n" );
859
+ goto err ;
860
+ }
861
+
862
+ init_completion (& spi_imx -> dma_rx_completion );
863
+ init_completion (& spi_imx -> dma_tx_completion );
864
+ master -> can_dma = spi_imx_can_dma ;
865
+ master -> max_dma_len = MAX_SDMA_BD_BYTES ;
866
+ spi_imx -> bitbang .master -> flags = SPI_MASTER_MUST_RX |
867
+ SPI_MASTER_MUST_TX ;
868
+ spi_imx -> dma_is_inited = 1 ;
869
+
870
+ return 0 ;
871
+ err :
872
+ spi_imx_sdma_exit (spi_imx );
873
+ return ret ;
874
+ }
875
+
876
+ static void spi_imx_dma_rx_callback (void * cookie )
877
+ {
878
+ struct spi_imx_data * spi_imx = (struct spi_imx_data * )cookie ;
879
+
880
+ complete (& spi_imx -> dma_rx_completion );
881
+ }
882
+
883
+ static void spi_imx_dma_tx_callback (void * cookie )
884
+ {
885
+ struct spi_imx_data * spi_imx = (struct spi_imx_data * )cookie ;
886
+
887
+ complete (& spi_imx -> dma_tx_completion );
888
+ }
889
+
890
+ static int spi_imx_dma_transfer (struct spi_imx_data * spi_imx ,
891
+ struct spi_transfer * transfer )
892
+ {
893
+ struct dma_async_tx_descriptor * desc_tx = NULL , * desc_rx = NULL ;
894
+ int ret ;
895
+ u32 dma ;
896
+ int left ;
897
+ struct spi_master * master = spi_imx -> bitbang .master ;
898
+ struct sg_table * tx = & transfer -> tx_sg , * rx = & transfer -> rx_sg ;
899
+
900
+ if (tx ) {
901
+ desc_tx = dmaengine_prep_slave_sg (master -> dma_tx ,
902
+ tx -> sgl , tx -> nents , DMA_TO_DEVICE ,
903
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK );
904
+ if (!desc_tx )
905
+ goto no_dma ;
906
+
907
+ desc_tx -> callback = spi_imx_dma_tx_callback ;
908
+ desc_tx -> callback_param = (void * )spi_imx ;
909
+ dmaengine_submit (desc_tx );
910
+ }
911
+
912
+ if (rx ) {
913
+ desc_rx = dmaengine_prep_slave_sg (master -> dma_rx ,
914
+ rx -> sgl , rx -> nents , DMA_FROM_DEVICE ,
915
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK );
916
+ if (!desc_rx )
917
+ goto no_dma ;
918
+
919
+ desc_rx -> callback = spi_imx_dma_rx_callback ;
920
+ desc_rx -> callback_param = (void * )spi_imx ;
921
+ dmaengine_submit (desc_rx );
922
+ }
923
+
924
+ reinit_completion (& spi_imx -> dma_rx_completion );
925
+ reinit_completion (& spi_imx -> dma_tx_completion );
926
+
927
+ /* Trigger the cspi module. */
928
+ spi_imx -> dma_finished = 0 ;
929
+
930
+ dma = readl (spi_imx -> base + MX51_ECSPI_DMA );
931
+ dma = dma & (~MX51_ECSPI_DMA_RXT_WML_MASK );
932
+ /* Change RX_DMA_LENGTH trigger dma fetch tail data */
933
+ left = transfer -> len % spi_imx -> rxt_wml ;
934
+ if (left )
935
+ writel (dma | (left << MX51_ECSPI_DMA_RXT_WML_OFFSET ),
936
+ spi_imx -> base + MX51_ECSPI_DMA );
937
+ spi_imx -> devtype_data -> trigger (spi_imx );
938
+
939
+ dma_async_issue_pending (master -> dma_tx );
940
+ dma_async_issue_pending (master -> dma_rx );
941
+ /* Wait SDMA to finish the data transfer.*/
942
+ ret = wait_for_completion_timeout (& spi_imx -> dma_tx_completion ,
943
+ IMX_DMA_TIMEOUT );
944
+ if (!ret ) {
945
+ pr_warn ("%s %s: I/O Error in DMA TX\n" ,
946
+ dev_driver_string (& master -> dev ),
947
+ dev_name (& master -> dev ));
948
+ dmaengine_terminate_all (master -> dma_tx );
949
+ } else {
950
+ ret = wait_for_completion_timeout (& spi_imx -> dma_rx_completion ,
951
+ IMX_DMA_TIMEOUT );
952
+ if (!ret ) {
953
+ pr_warn ("%s %s: I/O Error in DMA RX\n" ,
954
+ dev_driver_string (& master -> dev ),
955
+ dev_name (& master -> dev ));
956
+ spi_imx -> devtype_data -> reset (spi_imx );
957
+ dmaengine_terminate_all (master -> dma_rx );
958
+ }
959
+ writel (dma |
960
+ spi_imx -> rxt_wml << MX51_ECSPI_DMA_RXT_WML_OFFSET ,
961
+ spi_imx -> base + MX51_ECSPI_DMA );
962
+ }
963
+
964
+ spi_imx -> dma_finished = 1 ;
965
+ spi_imx -> devtype_data -> trigger (spi_imx );
966
+
967
+ if (!ret )
968
+ ret = - ETIMEDOUT ;
969
+ else if (ret > 0 )
970
+ ret = transfer -> len ;
971
+
972
+ return ret ;
973
+
974
+ no_dma :
975
+ pr_warn_once ("%s %s: DMA not available, falling back to PIO\n" ,
976
+ dev_driver_string (& master -> dev ),
977
+ dev_name (& master -> dev ));
978
+ return - EAGAIN ;
979
+ }
980
+
981
+ static int spi_imx_pio_transfer (struct spi_device * spi ,
734
982
struct spi_transfer * transfer )
735
983
{
736
984
struct spi_imx_data * spi_imx = spi_master_get_devdata (spi -> master );
@@ -751,6 +999,24 @@ static int spi_imx_transfer(struct spi_device *spi,
751
999
return transfer -> len ;
752
1000
}
753
1001
1002
+ static int spi_imx_transfer (struct spi_device * spi ,
1003
+ struct spi_transfer * transfer )
1004
+ {
1005
+ int ret ;
1006
+ struct spi_imx_data * spi_imx = spi_master_get_devdata (spi -> master );
1007
+
1008
+ if (spi_imx -> bitbang .master -> can_dma &&
1009
+ spi_imx_can_dma (spi_imx -> bitbang .master , spi , transfer )) {
1010
+ spi_imx -> usedma = true;
1011
+ ret = spi_imx_dma_transfer (spi_imx , transfer );
1012
+ if (ret != - EAGAIN )
1013
+ return ret ;
1014
+ }
1015
+ spi_imx -> usedma = false;
1016
+
1017
+ return spi_imx_pio_transfer (spi , transfer );
1018
+ }
1019
+
754
1020
static int spi_imx_setup (struct spi_device * spi )
755
1021
{
756
1022
struct spi_imx_data * spi_imx = spi_master_get_devdata (spi -> master );
@@ -911,6 +1177,13 @@ static int spi_imx_probe(struct platform_device *pdev)
911
1177
goto out_put_per ;
912
1178
913
1179
spi_imx -> spi_clk = clk_get_rate (spi_imx -> clk_per );
1180
+ /*
1181
+ * Only validated on i.mx6 now, can remove the constrain if validated on
1182
+ * other chips.
1183
+ */
1184
+ if (spi_imx -> devtype_data == & imx51_ecspi_devtype_data
1185
+ && spi_imx_sdma_init (& pdev -> dev , spi_imx , master , res ))
1186
+ dev_err (& pdev -> dev , "dma setup error,use pio instead\n" );
914
1187
915
1188
spi_imx -> devtype_data -> reset (spi_imx );
916
1189
@@ -949,6 +1222,7 @@ static int spi_imx_remove(struct platform_device *pdev)
949
1222
writel (0 , spi_imx -> base + MXC_CSPICTRL );
950
1223
clk_unprepare (spi_imx -> clk_ipg );
951
1224
clk_unprepare (spi_imx -> clk_per );
1225
+ spi_imx_sdma_exit (spi_imx );
952
1226
spi_master_put (master );
953
1227
954
1228
return 0 ;
0 commit comments