57
57
#define EDMA_MAX_SLOTS MAX_NR_SG
58
58
#define EDMA_DESCRIPTORS 16
59
59
60
+ struct edma_pset {
61
+ struct edmacc_param param ;
62
+ };
63
+
60
64
struct edma_desc {
61
65
struct virt_dma_desc vdesc ;
62
66
struct list_head node ;
@@ -65,7 +69,7 @@ struct edma_desc {
65
69
int pset_nr ;
66
70
int processed ;
67
71
u32 residue ;
68
- struct edmacc_param pset [0 ];
72
+ struct edma_pset pset [0 ];
69
73
};
70
74
71
75
struct edma_cc ;
@@ -141,7 +145,7 @@ static void edma_execute(struct edma_chan *echan)
141
145
/* Write descriptor PaRAM set(s) */
142
146
for (i = 0 ; i < nslots ; i ++ ) {
143
147
j = i + edesc -> processed ;
144
- edma_write_slot (echan -> slot [i ], & edesc -> pset [j ]);
148
+ edma_write_slot (echan -> slot [i ], & edesc -> pset [j ]. param );
145
149
dev_vdbg (echan -> vchan .chan .device -> dev ,
146
150
"\n pset[%d]:\n"
147
151
" chnum\t%d\n"
@@ -155,14 +159,14 @@ static void edma_execute(struct edma_chan *echan)
155
159
" cidx\t%08x\n"
156
160
" lkrld\t%08x\n" ,
157
161
j , echan -> ch_num , echan -> slot [i ],
158
- edesc -> pset [j ].opt ,
159
- edesc -> pset [j ].src ,
160
- edesc -> pset [j ].dst ,
161
- edesc -> pset [j ].a_b_cnt ,
162
- edesc -> pset [j ].ccnt ,
163
- edesc -> pset [j ].src_dst_bidx ,
164
- edesc -> pset [j ].src_dst_cidx ,
165
- edesc -> pset [j ].link_bcntrld );
162
+ edesc -> pset [j ].param . opt ,
163
+ edesc -> pset [j ].param . src ,
164
+ edesc -> pset [j ].param . dst ,
165
+ edesc -> pset [j ].param . a_b_cnt ,
166
+ edesc -> pset [j ].param . ccnt ,
167
+ edesc -> pset [j ].param . src_dst_bidx ,
168
+ edesc -> pset [j ].param . src_dst_cidx ,
169
+ edesc -> pset [j ].param . link_bcntrld );
166
170
/* Link to the previous slot if not the last set */
167
171
if (i != (nslots - 1 ))
168
172
edma_link (echan -> slot [i ], echan -> slot [i + 1 ]);
@@ -305,13 +309,14 @@ static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
305
309
* @dma_length: Total length of the DMA transfer
306
310
* @direction: Direction of the transfer
307
311
*/
308
- static int edma_config_pset (struct dma_chan * chan , struct edmacc_param * pset ,
312
+ static int edma_config_pset (struct dma_chan * chan , struct edma_pset * epset ,
309
313
dma_addr_t src_addr , dma_addr_t dst_addr , u32 burst ,
310
314
enum dma_slave_buswidth dev_width , unsigned int dma_length ,
311
315
enum dma_transfer_direction direction )
312
316
{
313
317
struct edma_chan * echan = to_edma_chan (chan );
314
318
struct device * dev = chan -> device -> dev ;
319
+ struct edmacc_param * param = & epset -> param ;
315
320
int acnt , bcnt , ccnt , cidx ;
316
321
int src_bidx , dst_bidx , src_cidx , dst_cidx ;
317
322
int absync ;
@@ -391,26 +396,26 @@ static int edma_config_pset(struct dma_chan *chan, struct edmacc_param *pset,
391
396
return - EINVAL ;
392
397
}
393
398
394
- pset -> opt = EDMA_TCC (EDMA_CHAN_SLOT (echan -> ch_num ));
399
+ param -> opt = EDMA_TCC (EDMA_CHAN_SLOT (echan -> ch_num ));
395
400
/* Configure A or AB synchronized transfers */
396
401
if (absync )
397
- pset -> opt |= SYNCDIM ;
402
+ param -> opt |= SYNCDIM ;
398
403
399
- pset -> src = src_addr ;
400
- pset -> dst = dst_addr ;
404
+ param -> src = src_addr ;
405
+ param -> dst = dst_addr ;
401
406
402
- pset -> src_dst_bidx = (dst_bidx << 16 ) | src_bidx ;
403
- pset -> src_dst_cidx = (dst_cidx << 16 ) | src_cidx ;
407
+ param -> src_dst_bidx = (dst_bidx << 16 ) | src_bidx ;
408
+ param -> src_dst_cidx = (dst_cidx << 16 ) | src_cidx ;
404
409
405
- pset -> a_b_cnt = bcnt << 16 | acnt ;
406
- pset -> ccnt = ccnt ;
410
+ param -> a_b_cnt = bcnt << 16 | acnt ;
411
+ param -> ccnt = ccnt ;
407
412
/*
408
413
* Only time when (bcntrld) auto reload is required is for
409
414
* A-sync case, and in this case, a requirement of reload value
410
415
* of SZ_64K-1 only is assured. 'link' is initially set to NULL
411
416
* and then later will be populated by edma_execute.
412
417
*/
413
- pset -> link_bcntrld = 0xffffffff ;
418
+ param -> link_bcntrld = 0xffffffff ;
414
419
return absync ;
415
420
}
416
421
@@ -498,11 +503,11 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
498
503
/* If this is the last in a current SG set of transactions,
499
504
enable interrupts so that next set is processed */
500
505
if (!((i + 1 ) % MAX_NR_SG ))
501
- edesc -> pset [i ].opt |= TCINTEN ;
506
+ edesc -> pset [i ].param . opt |= TCINTEN ;
502
507
503
508
/* If this is the last set, enable completion interrupt flag */
504
509
if (i == sg_len - 1 )
505
- edesc -> pset [i ].opt |= TCINTEN ;
510
+ edesc -> pset [i ].param . opt |= TCINTEN ;
506
511
}
507
512
508
513
return vchan_tx_prep (& echan -> vchan , & edesc -> vdesc , tx_flags );
@@ -661,22 +666,22 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
661
666
" cidx\t%08x\n"
662
667
" lkrld\t%08x\n" ,
663
668
i , echan -> ch_num , echan -> slot [i ],
664
- edesc -> pset [i ].opt ,
665
- edesc -> pset [i ].src ,
666
- edesc -> pset [i ].dst ,
667
- edesc -> pset [i ].a_b_cnt ,
668
- edesc -> pset [i ].ccnt ,
669
- edesc -> pset [i ].src_dst_bidx ,
670
- edesc -> pset [i ].src_dst_cidx ,
671
- edesc -> pset [i ].link_bcntrld );
669
+ edesc -> pset [i ].param . opt ,
670
+ edesc -> pset [i ].param . src ,
671
+ edesc -> pset [i ].param . dst ,
672
+ edesc -> pset [i ].param . a_b_cnt ,
673
+ edesc -> pset [i ].param . ccnt ,
674
+ edesc -> pset [i ].param . src_dst_bidx ,
675
+ edesc -> pset [i ].param . src_dst_cidx ,
676
+ edesc -> pset [i ].param . link_bcntrld );
672
677
673
678
edesc -> absync = ret ;
674
679
675
680
/*
676
681
* Enable interrupts for every period because callback
677
682
* has to be called for every period.
678
683
*/
679
- edesc -> pset [i ].opt |= TCINTEN ;
684
+ edesc -> pset [i ].param . opt |= TCINTEN ;
680
685
}
681
686
682
687
return vchan_tx_prep (& echan -> vchan , & edesc -> vdesc , tx_flags );
0 commit comments