@@ -190,14 +190,14 @@ enum AxiDmaDirectionRegister {
190
190
#define XILINX_AXI_DMA_MM2S_REG_OFFSET 0x00
191
191
#define XILINX_AXI_DMA_S2MM_REG_OFFSET 0x30
192
192
193
+ struct dma_xilinx_axi_dma_data ;
194
+
193
195
/* global configuration per DMA device */
194
196
struct dma_xilinx_axi_dma_config {
195
197
mm_reg_t reg ;
196
198
/* this should always be 2 - one for TX, one for RX */
197
199
uint32_t channels ;
198
- void (* irq_configure )();
199
- uint32_t * irq0_channels ;
200
- size_t irq0_channels_size ;
200
+ void (* irq_configure )(struct dma_xilinx_axi_dma_data * data );
201
201
};
202
202
203
203
typedef void (* dma_xilinx_axi_dma_isr_t )(const struct device * dev );
@@ -216,6 +216,8 @@ struct dma_xilinx_axi_dma_channel {
216
216
217
217
mm_reg_t channel_regs ;
218
218
219
+ uint32_t irq ;
220
+
219
221
enum dma_channel_direction direction ;
220
222
221
223
/* call this when the transfer is complete */
@@ -234,82 +236,59 @@ struct dma_xilinx_axi_dma_data {
234
236
struct dma_xilinx_axi_dma_channel * channels ;
235
237
};
236
238
237
- #ifdef CONFIG_DMA_XILINX_AXI_DMA_LOCK_ALL_IRQS
238
- static inline int dma_xilinx_axi_dma_lock_irq (const struct dma_xilinx_axi_dma_config * cfg ,
239
- const uint32_t channel_num )
240
- {
241
- (void )cfg ;
242
- (void )channel_num ;
243
- return irq_lock ();
244
- }
245
-
246
- static inline void dma_xilinx_axi_dma_unlock_irq (const struct dma_xilinx_axi_dma_config * cfg ,
247
- const uint32_t channel_num , int key )
248
- {
249
- (void )cfg ;
250
- (void )channel_num ;
251
- return irq_unlock (key );
252
- }
253
- #elif defined(CONFIG_DMA_XILINX_AXI_DMA_LOCK_DMA_IRQS )
254
- static inline int dma_xilinx_axi_dma_lock_irq (const struct dma_xilinx_axi_dma_config * cfg ,
255
- const uint32_t channel_num )
239
+ static inline int dma_xilinx_axi_dma_lock_irq (const struct device * dev , const uint32_t channel_num )
256
240
{
241
+ const struct dma_xilinx_axi_dma_data * data = dev -> data ;
257
242
int ret ;
258
- (void )channel_num ;
259
-
260
- /* TX is 0, RX is 1 */
261
- ret = irq_is_enabled (cfg -> irq0_channels [0 ]) ? 1 : 0 ;
262
- ret |= (irq_is_enabled (cfg -> irq0_channels [1 ]) ? 1 : 0 ) << 1 ;
263
243
264
- LOG_DBG ("DMA IRQ state: %x TX IRQN: %" PRIu32 " RX IRQN: %" PRIu32 , ret ,
265
- cfg -> irq0_channels [0 ], cfg -> irq0_channels [1 ]);
244
+ if (IS_ENABLED (CONFIG_DMA_XILINX_AXI_DMA_LOCK_ALL_IRQS )) {
245
+ ret = irq_lock ();
246
+ } else if (IS_ENABLED (CONFIG_DMA_XILINX_AXI_DMA_LOCK_DMA_IRQS )) {
247
+ /* TX is 0, RX is 1 */
248
+ ret = irq_is_enabled (data -> channels [0 ].irq ) ? 1 : 0 ;
249
+ ret |= (irq_is_enabled (data -> channels [1 ].irq ) ? 1 : 0 ) << 1 ;
266
250
267
- irq_disable ( cfg -> irq0_channels [ 0 ]);
268
- irq_disable ( cfg -> irq0_channels [ 1 ] );
251
+ LOG_DBG ( "DMA IRQ state: %x TX IRQN: %" PRIu32 " RX IRQN: %" PRIu32 , ret ,
252
+ data -> channels [ 0 ]. irq , data -> channels [ 1 ]. irq );
269
253
270
- return ret ;
271
- }
254
+ irq_disable (data -> channels [0 ].irq );
255
+ irq_disable (data -> channels [1 ].irq );
256
+ } else {
257
+ /* CONFIG_DMA_XILINX_AXI_DMA_LOCK_CHANNEL_IRQ */
258
+ ret = irq_is_enabled (data -> channels [channel_num ].irq );
272
259
273
- static inline void dma_xilinx_axi_dma_unlock_irq (const struct dma_xilinx_axi_dma_config * cfg ,
274
- const uint32_t channel_num , int key )
275
- {
276
- (void )channel_num ;
260
+ LOG_DBG ("DMA IRQ state: %x " , ret );
277
261
278
- if (key & 0x1 ) {
279
- /* TX was enabled */
280
- irq_enable (cfg -> irq0_channels [0 ]);
262
+ irq_disable (data -> channels [channel_num ].irq );
281
263
}
282
- if (key & 0x2 ) {
283
- /* RX was enabled */
284
- irq_enable (cfg -> irq0_channels [1 ]);
285
- }
286
- }
287
- #elif defined(CONFIG_DMA_XILINX_AXI_DMA_LOCK_CHANNEL_IRQ )
288
- static inline int dma_xilinx_axi_dma_lock_irq (const struct dma_xilinx_axi_dma_config * cfg ,
289
- const uint32_t channel_num )
290
- {
291
- int ret ;
292
-
293
- ret = irq_is_enabled (cfg -> irq0_channels [channel_num ]);
294
-
295
- LOG_DBG ("DMA IRQ state: %x " , ret );
296
-
297
- irq_disable (cfg -> irq0_channels [channel_num ]);
298
264
299
265
return ret ;
300
266
}
301
267
302
- static inline void dma_xilinx_axi_dma_unlock_irq (const struct dma_xilinx_axi_dma_config * cfg ,
268
+ static inline void dma_xilinx_axi_dma_unlock_irq (const struct device * dev ,
303
269
const uint32_t channel_num , int key )
304
270
{
305
- if (key ) {
306
- /* was enabled */
307
- irq_enable (cfg -> irq0_channels [channel_num ]);
271
+ const struct dma_xilinx_axi_dma_data * data = dev -> data ;
272
+
273
+ if (IS_ENABLED (CONFIG_DMA_XILINX_AXI_DMA_LOCK_ALL_IRQS )) {
274
+ irq_unlock (key );
275
+ } else if (IS_ENABLED (CONFIG_DMA_XILINX_AXI_DMA_LOCK_DMA_IRQS )) {
276
+ if (key & 0x1 ) {
277
+ /* TX was enabled */
278
+ irq_enable (data -> channels [0 ].irq );
279
+ }
280
+ if (key & 0x2 ) {
281
+ /* RX was enabled */
282
+ irq_enable (data -> channels [1 ].irq );
283
+ }
284
+ } else {
285
+ /* CONFIG_DMA_XILINX_AXI_DMA_LOCK_CHANNEL_IRQ */
286
+ if (key ) {
287
+ /* was enabled */
288
+ irq_enable (data -> channels [channel_num ].irq );
289
+ }
308
290
}
309
291
}
310
- #else
311
- #error "No IRQ strategy selected in Kconfig!"
312
- #endif
313
292
314
293
static void dma_xilinx_axi_dma_write_reg (const struct dma_xilinx_axi_dma_channel * channel_data ,
315
294
enum AxiDmaDirectionRegister reg , uint32_t val )
@@ -443,7 +422,11 @@ static void dma_xilinx_axi_dma_tx_isr(const struct device *dev)
443
422
struct dma_xilinx_axi_dma_data * data = dev -> data ;
444
423
struct dma_xilinx_axi_dma_channel * channel_data =
445
424
& data -> channels [XILINX_AXI_DMA_TX_CHANNEL_NUM ];
446
- uint32_t dmasr = dma_xilinx_axi_dma_read_reg (channel_data , XILINX_AXI_DMA_REG_DMASR );
425
+ const int irq_enabled = irq_is_enabled (channel_data -> irq );
426
+ uint32_t dmasr ;
427
+
428
+ irq_disable (channel_data -> irq );
429
+ dmasr = dma_xilinx_axi_dma_read_reg (channel_data , XILINX_AXI_DMA_REG_DMASR );
447
430
448
431
if (dmasr & XILINX_AXI_DMA_REGS_DMASR_ERR_IRQ ) {
449
432
LOG_ERR ("DMA reports TX error, DMASR = 0x%" PRIx32 , dmasr );
@@ -464,14 +447,21 @@ static void dma_xilinx_axi_dma_tx_isr(const struct device *dev)
464
447
465
448
LOG_DBG ("Completed %u TX packets in this ISR!\n" , processed_packets );
466
449
}
450
+ if (irq_enabled ) {
451
+ irq_enable (channel_data -> irq );
452
+ }
467
453
}
468
454
469
455
static void dma_xilinx_axi_dma_rx_isr (const struct device * dev )
470
456
{
471
457
struct dma_xilinx_axi_dma_data * data = dev -> data ;
472
458
struct dma_xilinx_axi_dma_channel * channel_data =
473
459
& data -> channels [XILINX_AXI_DMA_RX_CHANNEL_NUM ];
474
- uint32_t dmasr = dma_xilinx_axi_dma_read_reg (channel_data , XILINX_AXI_DMA_REG_DMASR );
460
+ const int irq_enabled = irq_is_enabled (channel_data -> irq );
461
+ uint32_t dmasr ;
462
+
463
+ irq_disable (channel_data -> irq );
464
+ dmasr = dma_xilinx_axi_dma_read_reg (channel_data , XILINX_AXI_DMA_REG_DMASR );
475
465
476
466
if (dmasr & XILINX_AXI_DMA_REGS_DMASR_ERR_IRQ ) {
477
467
LOG_ERR ("DMA reports RX error, DMASR = 0x%" PRIx32 , dmasr );
@@ -492,6 +482,9 @@ static void dma_xilinx_axi_dma_rx_isr(const struct device *dev)
492
482
493
483
LOG_DBG ("Cleaned up %u RX packets in this ISR!" , processed_packets );
494
484
}
485
+ if (irq_enabled ) {
486
+ irq_enable (channel_data -> irq );
487
+ }
495
488
}
496
489
497
490
#ifdef CONFIG_DMA_64BIT
@@ -508,12 +501,12 @@ static int dma_xilinx_axi_dma_start(const struct device *dev, uint32_t channel)
508
501
volatile struct dma_xilinx_axi_dma_sg_descriptor * current_descriptor ;
509
502
510
503
/* running ISR in parallel could cause issues with the metadata */
511
- const int irq_key = dma_xilinx_axi_dma_lock_irq (cfg , channel );
504
+ const int irq_key = dma_xilinx_axi_dma_lock_irq (dev , channel );
512
505
513
506
if (channel >= cfg -> channels ) {
514
507
LOG_ERR ("Invalid channel %" PRIu32 " - must be < %" PRIu32 "!" , channel ,
515
508
cfg -> channels );
516
- dma_xilinx_axi_dma_unlock_irq (cfg , channel , irq_key );
509
+ dma_xilinx_axi_dma_unlock_irq (dev , channel , irq_key );
517
510
return - EINVAL ;
518
511
}
519
512
@@ -570,7 +563,7 @@ static int dma_xilinx_axi_dma_start(const struct device *dev, uint32_t channel)
570
563
(uint32_t )(uintptr_t )current_descriptor );
571
564
#endif
572
565
573
- dma_xilinx_axi_dma_unlock_irq (cfg , channel , irq_key );
566
+ dma_xilinx_axi_dma_unlock_irq (dev , channel , irq_key );
574
567
575
568
/* commit stores before returning to caller */
576
569
barrier_dmem_fence_full ();
@@ -634,16 +627,16 @@ static int dma_xilinx_axi_dma_get_status(const struct device *dev, uint32_t chan
634
627
* If is_first or is_last are NOT set, the buffer is considered part of a SG transfer consisting of
635
628
* multiple blocks. Otherwise, the block is one transfer.
636
629
*/
637
- static inline int dma_xilinx_axi_dma_transfer_block (const struct dma_xilinx_axi_dma_config * cfg ,
638
- uint32_t channel ,
639
- struct dma_xilinx_axi_dma_channel * channel_data ,
630
+ static inline int dma_xilinx_axi_dma_transfer_block (const struct device * dev , uint32_t channel ,
640
631
dma_addr_t buffer_addr , size_t block_size ,
641
632
bool is_first , bool is_last )
642
633
{
634
+ struct dma_xilinx_axi_dma_data * data = dev -> data ;
635
+ struct dma_xilinx_axi_dma_channel * channel_data = & data -> channels [channel ];
643
636
volatile struct dma_xilinx_axi_dma_sg_descriptor * current_descriptor ;
644
637
645
638
/* running ISR in parallel could cause issues with the metadata */
646
- const int irq_key = dma_xilinx_axi_dma_lock_irq (cfg , channel );
639
+ const int irq_key = dma_xilinx_axi_dma_lock_irq (dev , channel );
647
640
size_t next_desc_index = channel_data -> populated_desc_index + 1 ;
648
641
649
642
if (next_desc_index >= channel_data -> num_descriptors ) {
@@ -657,7 +650,7 @@ static inline int dma_xilinx_axi_dma_transfer_block(const struct dma_xilinx_axi_
657
650
/* Do not overwrite this descriptor as it has not been completed yet. */
658
651
LOG_WRN ("Descriptor %" PRIu32 " is not yet completed, not starting new transfer!" ,
659
652
next_desc_index );
660
- dma_xilinx_axi_dma_unlock_irq (cfg , channel , irq_key );
653
+ dma_xilinx_axi_dma_unlock_irq (dev , channel , irq_key );
661
654
return - EBUSY ;
662
655
}
663
656
@@ -669,7 +662,7 @@ static inline int dma_xilinx_axi_dma_transfer_block(const struct dma_xilinx_axi_
669
662
if (((uintptr_t )buffer_addr & (sys_cache_data_line_size_get () - 1 )) ||
670
663
(block_size & (sys_cache_data_line_size_get () - 1 ))) {
671
664
LOG_ERR ("RX buffer address and block size must be cache line size aligned" );
672
- dma_xilinx_axi_dma_unlock_irq (cfg , channel , irq_key );
665
+ dma_xilinx_axi_dma_unlock_irq (dev , channel , irq_key );
673
666
return - EINVAL ;
674
667
}
675
668
#endif
@@ -690,7 +683,7 @@ static inline int dma_xilinx_axi_dma_transfer_block(const struct dma_xilinx_axi_
690
683
if (block_size > UINT32_MAX ) {
691
684
LOG_ERR ("Too large block: %zu bytes!" , block_size );
692
685
693
- dma_xilinx_axi_dma_unlock_irq (cfg , channel , irq_key );
686
+ dma_xilinx_axi_dma_unlock_irq (dev , channel , irq_key );
694
687
695
688
return - EINVAL ;
696
689
}
@@ -712,7 +705,7 @@ static inline int dma_xilinx_axi_dma_transfer_block(const struct dma_xilinx_axi_
712
705
713
706
channel_data -> populated_desc_index = next_desc_index ;
714
707
715
- dma_xilinx_axi_dma_unlock_irq (cfg , channel , irq_key );
708
+ dma_xilinx_axi_dma_unlock_irq (dev , channel , irq_key );
716
709
717
710
return 0 ;
718
711
}
@@ -726,8 +719,6 @@ static inline int dma_xilinx_axi_dma_config_reload(const struct device *dev, uin
726
719
#endif
727
720
{
728
721
const struct dma_xilinx_axi_dma_config * cfg = dev -> config ;
729
- struct dma_xilinx_axi_dma_data * data = dev -> data ;
730
- struct dma_xilinx_axi_dma_channel * channel_data = & data -> channels [channel ];
731
722
732
723
if (channel >= cfg -> channels ) {
733
724
LOG_ERR ("Invalid channel %" PRIu32 " - must be < %" PRIu32 "!" , channel ,
@@ -736,8 +727,8 @@ static inline int dma_xilinx_axi_dma_config_reload(const struct device *dev, uin
736
727
}
737
728
/* one-block-at-a-time transfer */
738
729
return dma_xilinx_axi_dma_transfer_block (
739
- cfg , channel , channel_data , channel == XILINX_AXI_DMA_TX_CHANNEL_NUM ? src : dst ,
740
- size , true, true);
730
+ dev , channel , channel == XILINX_AXI_DMA_TX_CHANNEL_NUM ? src : dst , size , true ,
731
+ true);
741
732
}
742
733
743
734
static int dma_xilinx_axi_dma_configure (const struct device * dev , uint32_t channel ,
@@ -873,7 +864,7 @@ static int dma_xilinx_axi_dma_configure(const struct device *dev, uint32_t chann
873
864
874
865
do {
875
866
ret = ret ||
876
- dma_xilinx_axi_dma_transfer_block (cfg , channel , & data -> channels [ channel ] ,
867
+ dma_xilinx_axi_dma_transfer_block (dev , channel ,
877
868
channel == XILINX_AXI_DMA_TX_CHANNEL_NUM
878
869
? current_block -> source_address
879
870
: current_block -> dest_address ,
@@ -958,42 +949,29 @@ static int dma_xilinx_axi_dma_init(const struct device *dev)
958
949
return - EIO ;
959
950
}
960
951
961
- cfg -> irq_configure ();
952
+ cfg -> irq_configure (data );
962
953
return 0 ;
963
954
}
964
955
965
- /* first IRQ is TX */
966
- #define TX_IRQ_CONFIGURE (inst ) \
967
- IRQ_CONNECT(DT_INST_IRQN_BY_IDX(inst, 0), DT_INST_IRQ_BY_IDX(inst, 0, priority), \
968
- dma_xilinx_axi_dma_tx_isr, DEVICE_DT_INST_GET(inst), 0); \
969
- irq_enable(DT_INST_IRQN_BY_IDX(inst, 0));
970
- /* second IRQ is RX */
971
- #define RX_IRQ_CONFIGURE (inst ) \
972
- IRQ_CONNECT(DT_INST_IRQN_BY_IDX(inst, 1), DT_INST_IRQ_BY_IDX(inst, 1, priority), \
973
- dma_xilinx_axi_dma_rx_isr, DEVICE_DT_INST_GET(inst), 0); \
974
- irq_enable(DT_INST_IRQN_BY_IDX(inst, 1));
975
-
976
- #define CONFIGURE_ALL_IRQS (inst ) \
977
- TX_IRQ_CONFIGURE(inst); \
978
- RX_IRQ_CONFIGURE(inst);
979
-
980
956
#define XILINX_AXI_DMA_INIT (inst ) \
981
- static void dma_xilinx_axi_dma##inst##_irq_configure(void) \
957
+ static void dma_xilinx_axi_dma##inst##_irq_configure(struct dma_xilinx_axi_dma_data *data) \
982
958
{ \
983
- CONFIGURE_ALL_IRQS(inst); \
959
+ data->channels[XILINX_AXI_DMA_TX_CHANNEL_NUM].irq = DT_INST_IRQN_BY_IDX(inst, 0); \
960
+ IRQ_CONNECT(DT_INST_IRQN_BY_IDX(inst, 0), DT_INST_IRQ_BY_IDX(inst, 0, priority), \
961
+ dma_xilinx_axi_dma_tx_isr, DEVICE_DT_INST_GET(inst), 0); \
962
+ irq_enable(DT_INST_IRQN_BY_IDX(inst, 0)); \
963
+ data->channels[XILINX_AXI_DMA_RX_CHANNEL_NUM].irq = DT_INST_IRQN_BY_IDX(inst, 1); \
964
+ IRQ_CONNECT(DT_INST_IRQN_BY_IDX(inst, 1), DT_INST_IRQ_BY_IDX(inst, 1, priority), \
965
+ dma_xilinx_axi_dma_rx_isr, DEVICE_DT_INST_GET(inst), 0); \
966
+ irq_enable(DT_INST_IRQN_BY_IDX(inst, 1)); \
984
967
} \
985
- static uint32_t dma_xilinx_axi_dma##inst##_irq0_channels[] = \
986
- DT_INST_PROP_OR(inst, interrupts, {0}); \
987
968
static const struct dma_xilinx_axi_dma_config dma_xilinx_axi_dma##inst##_config = { \
988
969
.reg = DT_INST_REG_ADDR(inst), \
989
970
.channels = DT_INST_PROP(inst, dma_channels), \
990
971
.irq_configure = dma_xilinx_axi_dma##inst##_irq_configure, \
991
- .irq0_channels = dma_xilinx_axi_dma##inst##_irq0_channels, \
992
- .irq0_channels_size = ARRAY_SIZE(dma_xilinx_axi_dma##inst##_irq0_channels), \
993
972
}; \
994
973
static struct dma_xilinx_axi_dma_channel \
995
974
dma_xilinx_axi_dma##inst##_channels[DT_INST_PROP(inst, dma_channels)]; \
996
- ATOMIC_DEFINE(dma_xilinx_axi_dma_atomic##inst, DT_INST_PROP(inst, dma_channels)); \
997
975
static struct dma_xilinx_axi_dma_data dma_xilinx_axi_dma##inst##_data = { \
998
976
.ctx = {.magic = DMA_MAGIC, .atomic = NULL}, \
999
977
.channels = dma_xilinx_axi_dma##inst##_channels, \
0 commit comments