Skip to content

Commit d20a04d

Browse files
committed
drivers: dma: dma_xilinx_axi_dma: Fix up IRQ locking
The way the driver was storing IRQ numbers for later use in the various locking modes was not correct, causing the wrong IRQs to potentially be disabled/enabled in some modes. Refactor the way this is done to be cleaner, and also the way the different locking modes are implemented in order to ensure that all modes receive compile test coverage. Also, ensure that the IRQ for the RX or TX channel is always disabled during the execution of the corresponding ISR, to prevent it from being preempted by itself if the DMA core raises another interrupt during the ISR execution. Signed-off-by: Robert Hancock <[email protected]>
1 parent 53f7c35 commit d20a04d

File tree

1 file changed

+82
-104
lines changed

1 file changed

+82
-104
lines changed

drivers/dma/dma_xilinx_axi_dma.c

Lines changed: 82 additions & 104 deletions
Original file line numberDiff line numberDiff line change
@@ -190,14 +190,14 @@ enum AxiDmaDirectionRegister {
190190
#define XILINX_AXI_DMA_MM2S_REG_OFFSET 0x00
191191
#define XILINX_AXI_DMA_S2MM_REG_OFFSET 0x30
192192

193+
struct dma_xilinx_axi_dma_data;
194+
193195
/* global configuration per DMA device */
194196
struct dma_xilinx_axi_dma_config {
195197
mm_reg_t reg;
196198
/* this should always be 2 - one for TX, one for RX */
197199
uint32_t channels;
198-
void (*irq_configure)();
199-
uint32_t *irq0_channels;
200-
size_t irq0_channels_size;
200+
void (*irq_configure)(struct dma_xilinx_axi_dma_data *data);
201201
};
202202

203203
typedef void (*dma_xilinx_axi_dma_isr_t)(const struct device *dev);
@@ -216,6 +216,8 @@ struct dma_xilinx_axi_dma_channel {
216216

217217
mm_reg_t channel_regs;
218218

219+
uint32_t irq;
220+
219221
enum dma_channel_direction direction;
220222

221223
/* call this when the transfer is complete */
@@ -234,82 +236,59 @@ struct dma_xilinx_axi_dma_data {
234236
struct dma_xilinx_axi_dma_channel *channels;
235237
};
236238

237-
#ifdef CONFIG_DMA_XILINX_AXI_DMA_LOCK_ALL_IRQS
238-
static inline int dma_xilinx_axi_dma_lock_irq(const struct dma_xilinx_axi_dma_config *cfg,
239-
const uint32_t channel_num)
240-
{
241-
(void)cfg;
242-
(void)channel_num;
243-
return irq_lock();
244-
}
245-
246-
static inline void dma_xilinx_axi_dma_unlock_irq(const struct dma_xilinx_axi_dma_config *cfg,
247-
const uint32_t channel_num, int key)
248-
{
249-
(void)cfg;
250-
(void)channel_num;
251-
return irq_unlock(key);
252-
}
253-
#elif defined(CONFIG_DMA_XILINX_AXI_DMA_LOCK_DMA_IRQS)
254-
static inline int dma_xilinx_axi_dma_lock_irq(const struct dma_xilinx_axi_dma_config *cfg,
255-
const uint32_t channel_num)
239+
static inline int dma_xilinx_axi_dma_lock_irq(const struct device *dev, const uint32_t channel_num)
256240
{
241+
const struct dma_xilinx_axi_dma_data *data = dev->data;
257242
int ret;
258-
(void)channel_num;
259-
260-
/* TX is 0, RX is 1 */
261-
ret = irq_is_enabled(cfg->irq0_channels[0]) ? 1 : 0;
262-
ret |= (irq_is_enabled(cfg->irq0_channels[1]) ? 1 : 0) << 1;
263243

264-
LOG_DBG("DMA IRQ state: %x TX IRQN: %" PRIu32 " RX IRQN: %" PRIu32, ret,
265-
cfg->irq0_channels[0], cfg->irq0_channels[1]);
244+
if (IS_ENABLED(CONFIG_DMA_XILINX_AXI_DMA_LOCK_ALL_IRQS)) {
245+
ret = irq_lock();
246+
} else if (IS_ENABLED(CONFIG_DMA_XILINX_AXI_DMA_LOCK_DMA_IRQS)) {
247+
/* TX is 0, RX is 1 */
248+
ret = irq_is_enabled(data->channels[0].irq) ? 1 : 0;
249+
ret |= (irq_is_enabled(data->channels[1].irq) ? 1 : 0) << 1;
266250

267-
irq_disable(cfg->irq0_channels[0]);
268-
irq_disable(cfg->irq0_channels[1]);
251+
LOG_DBG("DMA IRQ state: %x TX IRQN: %" PRIu32 " RX IRQN: %" PRIu32, ret,
252+
data->channels[0].irq, data->channels[1].irq);
269253

270-
return ret;
271-
}
254+
irq_disable(data->channels[0].irq);
255+
irq_disable(data->channels[1].irq);
256+
} else {
257+
/* CONFIG_DMA_XILINX_AXI_DMA_LOCK_CHANNEL_IRQ */
258+
ret = irq_is_enabled(data->channels[channel_num].irq);
272259

273-
static inline void dma_xilinx_axi_dma_unlock_irq(const struct dma_xilinx_axi_dma_config *cfg,
274-
const uint32_t channel_num, int key)
275-
{
276-
(void)channel_num;
260+
LOG_DBG("DMA IRQ state: %x ", ret);
277261

278-
if (key & 0x1) {
279-
/* TX was enabled */
280-
irq_enable(cfg->irq0_channels[0]);
262+
irq_disable(data->channels[channel_num].irq);
281263
}
282-
if (key & 0x2) {
283-
/* RX was enabled */
284-
irq_enable(cfg->irq0_channels[1]);
285-
}
286-
}
287-
#elif defined(CONFIG_DMA_XILINX_AXI_DMA_LOCK_CHANNEL_IRQ)
288-
static inline int dma_xilinx_axi_dma_lock_irq(const struct dma_xilinx_axi_dma_config *cfg,
289-
const uint32_t channel_num)
290-
{
291-
int ret;
292-
293-
ret = irq_is_enabled(cfg->irq0_channels[channel_num]);
294-
295-
LOG_DBG("DMA IRQ state: %x ", ret);
296-
297-
irq_disable(cfg->irq0_channels[channel_num]);
298264

299265
return ret;
300266
}
301267

302-
static inline void dma_xilinx_axi_dma_unlock_irq(const struct dma_xilinx_axi_dma_config *cfg,
268+
static inline void dma_xilinx_axi_dma_unlock_irq(const struct device *dev,
303269
const uint32_t channel_num, int key)
304270
{
305-
if (key) {
306-
/* was enabled */
307-
irq_enable(cfg->irq0_channels[channel_num]);
271+
const struct dma_xilinx_axi_dma_data *data = dev->data;
272+
273+
if (IS_ENABLED(CONFIG_DMA_XILINX_AXI_DMA_LOCK_ALL_IRQS)) {
274+
irq_unlock(key);
275+
} else if (IS_ENABLED(CONFIG_DMA_XILINX_AXI_DMA_LOCK_DMA_IRQS)) {
276+
if (key & 0x1) {
277+
/* TX was enabled */
278+
irq_enable(data->channels[0].irq);
279+
}
280+
if (key & 0x2) {
281+
/* RX was enabled */
282+
irq_enable(data->channels[1].irq);
283+
}
284+
} else {
285+
/* CONFIG_DMA_XILINX_AXI_DMA_LOCK_CHANNEL_IRQ */
286+
if (key) {
287+
/* was enabled */
288+
irq_enable(data->channels[channel_num].irq);
289+
}
308290
}
309291
}
310-
#else
311-
#error "No IRQ strategy selected in Kconfig!"
312-
#endif
313292

314293
static void dma_xilinx_axi_dma_write_reg(const struct dma_xilinx_axi_dma_channel *channel_data,
315294
enum AxiDmaDirectionRegister reg, uint32_t val)
@@ -443,7 +422,11 @@ static void dma_xilinx_axi_dma_tx_isr(const struct device *dev)
443422
struct dma_xilinx_axi_dma_data *data = dev->data;
444423
struct dma_xilinx_axi_dma_channel *channel_data =
445424
&data->channels[XILINX_AXI_DMA_TX_CHANNEL_NUM];
446-
uint32_t dmasr = dma_xilinx_axi_dma_read_reg(channel_data, XILINX_AXI_DMA_REG_DMASR);
425+
const int irq_enabled = irq_is_enabled(channel_data->irq);
426+
uint32_t dmasr;
427+
428+
irq_disable(channel_data->irq);
429+
dmasr = dma_xilinx_axi_dma_read_reg(channel_data, XILINX_AXI_DMA_REG_DMASR);
447430

448431
if (dmasr & XILINX_AXI_DMA_REGS_DMASR_ERR_IRQ) {
449432
LOG_ERR("DMA reports TX error, DMASR = 0x%" PRIx32, dmasr);
@@ -464,14 +447,21 @@ static void dma_xilinx_axi_dma_tx_isr(const struct device *dev)
464447

465448
LOG_DBG("Completed %u TX packets in this ISR!\n", processed_packets);
466449
}
450+
if (irq_enabled) {
451+
irq_enable(channel_data->irq);
452+
}
467453
}
468454

469455
static void dma_xilinx_axi_dma_rx_isr(const struct device *dev)
470456
{
471457
struct dma_xilinx_axi_dma_data *data = dev->data;
472458
struct dma_xilinx_axi_dma_channel *channel_data =
473459
&data->channels[XILINX_AXI_DMA_RX_CHANNEL_NUM];
474-
uint32_t dmasr = dma_xilinx_axi_dma_read_reg(channel_data, XILINX_AXI_DMA_REG_DMASR);
460+
const int irq_enabled = irq_is_enabled(channel_data->irq);
461+
uint32_t dmasr;
462+
463+
irq_disable(channel_data->irq);
464+
dmasr = dma_xilinx_axi_dma_read_reg(channel_data, XILINX_AXI_DMA_REG_DMASR);
475465

476466
if (dmasr & XILINX_AXI_DMA_REGS_DMASR_ERR_IRQ) {
477467
LOG_ERR("DMA reports RX error, DMASR = 0x%" PRIx32, dmasr);
@@ -492,6 +482,9 @@ static void dma_xilinx_axi_dma_rx_isr(const struct device *dev)
492482

493483
LOG_DBG("Cleaned up %u RX packets in this ISR!", processed_packets);
494484
}
485+
if (irq_enabled) {
486+
irq_enable(channel_data->irq);
487+
}
495488
}
496489

497490
#ifdef CONFIG_DMA_64BIT
@@ -508,12 +501,12 @@ static int dma_xilinx_axi_dma_start(const struct device *dev, uint32_t channel)
508501
volatile struct dma_xilinx_axi_dma_sg_descriptor *current_descriptor;
509502

510503
/* running ISR in parallel could cause issues with the metadata */
511-
const int irq_key = dma_xilinx_axi_dma_lock_irq(cfg, channel);
504+
const int irq_key = dma_xilinx_axi_dma_lock_irq(dev, channel);
512505

513506
if (channel >= cfg->channels) {
514507
LOG_ERR("Invalid channel %" PRIu32 " - must be < %" PRIu32 "!", channel,
515508
cfg->channels);
516-
dma_xilinx_axi_dma_unlock_irq(cfg, channel, irq_key);
509+
dma_xilinx_axi_dma_unlock_irq(dev, channel, irq_key);
517510
return -EINVAL;
518511
}
519512

@@ -570,7 +563,7 @@ static int dma_xilinx_axi_dma_start(const struct device *dev, uint32_t channel)
570563
(uint32_t)(uintptr_t)current_descriptor);
571564
#endif
572565

573-
dma_xilinx_axi_dma_unlock_irq(cfg, channel, irq_key);
566+
dma_xilinx_axi_dma_unlock_irq(dev, channel, irq_key);
574567

575568
/* commit stores before returning to caller */
576569
barrier_dmem_fence_full();
@@ -634,16 +627,16 @@ static int dma_xilinx_axi_dma_get_status(const struct device *dev, uint32_t chan
634627
* If is_first or is_last are NOT set, the buffer is considered part of a SG transfer consisting of
635628
* multiple blocks. Otherwise, the block is one transfer.
636629
*/
637-
static inline int dma_xilinx_axi_dma_transfer_block(const struct dma_xilinx_axi_dma_config *cfg,
638-
uint32_t channel,
639-
struct dma_xilinx_axi_dma_channel *channel_data,
630+
static inline int dma_xilinx_axi_dma_transfer_block(const struct device *dev, uint32_t channel,
640631
dma_addr_t buffer_addr, size_t block_size,
641632
bool is_first, bool is_last)
642633
{
634+
struct dma_xilinx_axi_dma_data *data = dev->data;
635+
struct dma_xilinx_axi_dma_channel *channel_data = &data->channels[channel];
643636
volatile struct dma_xilinx_axi_dma_sg_descriptor *current_descriptor;
644637

645638
/* running ISR in parallel could cause issues with the metadata */
646-
const int irq_key = dma_xilinx_axi_dma_lock_irq(cfg, channel);
639+
const int irq_key = dma_xilinx_axi_dma_lock_irq(dev, channel);
647640
size_t next_desc_index = channel_data->populated_desc_index + 1;
648641

649642
if (next_desc_index >= channel_data->num_descriptors) {
@@ -657,7 +650,7 @@ static inline int dma_xilinx_axi_dma_transfer_block(const struct dma_xilinx_axi_
657650
/* Do not overwrite this descriptor as it has not been completed yet. */
658651
LOG_WRN("Descriptor %" PRIu32 " is not yet completed, not starting new transfer!",
659652
next_desc_index);
660-
dma_xilinx_axi_dma_unlock_irq(cfg, channel, irq_key);
653+
dma_xilinx_axi_dma_unlock_irq(dev, channel, irq_key);
661654
return -EBUSY;
662655
}
663656

@@ -669,7 +662,7 @@ static inline int dma_xilinx_axi_dma_transfer_block(const struct dma_xilinx_axi_
669662
if (((uintptr_t)buffer_addr & (sys_cache_data_line_size_get() - 1)) ||
670663
(block_size & (sys_cache_data_line_size_get() - 1))) {
671664
LOG_ERR("RX buffer address and block size must be cache line size aligned");
672-
dma_xilinx_axi_dma_unlock_irq(cfg, channel, irq_key);
665+
dma_xilinx_axi_dma_unlock_irq(dev, channel, irq_key);
673666
return -EINVAL;
674667
}
675668
#endif
@@ -690,7 +683,7 @@ static inline int dma_xilinx_axi_dma_transfer_block(const struct dma_xilinx_axi_
690683
if (block_size > UINT32_MAX) {
691684
LOG_ERR("Too large block: %zu bytes!", block_size);
692685

693-
dma_xilinx_axi_dma_unlock_irq(cfg, channel, irq_key);
686+
dma_xilinx_axi_dma_unlock_irq(dev, channel, irq_key);
694687

695688
return -EINVAL;
696689
}
@@ -712,7 +705,7 @@ static inline int dma_xilinx_axi_dma_transfer_block(const struct dma_xilinx_axi_
712705

713706
channel_data->populated_desc_index = next_desc_index;
714707

715-
dma_xilinx_axi_dma_unlock_irq(cfg, channel, irq_key);
708+
dma_xilinx_axi_dma_unlock_irq(dev, channel, irq_key);
716709

717710
return 0;
718711
}
@@ -726,8 +719,6 @@ static inline int dma_xilinx_axi_dma_config_reload(const struct device *dev, uin
726719
#endif
727720
{
728721
const struct dma_xilinx_axi_dma_config *cfg = dev->config;
729-
struct dma_xilinx_axi_dma_data *data = dev->data;
730-
struct dma_xilinx_axi_dma_channel *channel_data = &data->channels[channel];
731722

732723
if (channel >= cfg->channels) {
733724
LOG_ERR("Invalid channel %" PRIu32 " - must be < %" PRIu32 "!", channel,
@@ -736,8 +727,8 @@ static inline int dma_xilinx_axi_dma_config_reload(const struct device *dev, uin
736727
}
737728
/* one-block-at-a-time transfer */
738729
return dma_xilinx_axi_dma_transfer_block(
739-
cfg, channel, channel_data, channel == XILINX_AXI_DMA_TX_CHANNEL_NUM ? src : dst,
740-
size, true, true);
730+
dev, channel, channel == XILINX_AXI_DMA_TX_CHANNEL_NUM ? src : dst, size, true,
731+
true);
741732
}
742733

743734
static int dma_xilinx_axi_dma_configure(const struct device *dev, uint32_t channel,
@@ -873,7 +864,7 @@ static int dma_xilinx_axi_dma_configure(const struct device *dev, uint32_t chann
873864

874865
do {
875866
ret = ret ||
876-
dma_xilinx_axi_dma_transfer_block(cfg, channel, &data->channels[channel],
867+
dma_xilinx_axi_dma_transfer_block(dev, channel,
877868
channel == XILINX_AXI_DMA_TX_CHANNEL_NUM
878869
? current_block->source_address
879870
: current_block->dest_address,
@@ -958,42 +949,29 @@ static int dma_xilinx_axi_dma_init(const struct device *dev)
958949
return -EIO;
959950
}
960951

961-
cfg->irq_configure();
952+
cfg->irq_configure(data);
962953
return 0;
963954
}
964955

965-
/* first IRQ is TX */
966-
#define TX_IRQ_CONFIGURE(inst) \
967-
IRQ_CONNECT(DT_INST_IRQN_BY_IDX(inst, 0), DT_INST_IRQ_BY_IDX(inst, 0, priority), \
968-
dma_xilinx_axi_dma_tx_isr, DEVICE_DT_INST_GET(inst), 0); \
969-
irq_enable(DT_INST_IRQN_BY_IDX(inst, 0));
970-
/* second IRQ is RX */
971-
#define RX_IRQ_CONFIGURE(inst) \
972-
IRQ_CONNECT(DT_INST_IRQN_BY_IDX(inst, 1), DT_INST_IRQ_BY_IDX(inst, 1, priority), \
973-
dma_xilinx_axi_dma_rx_isr, DEVICE_DT_INST_GET(inst), 0); \
974-
irq_enable(DT_INST_IRQN_BY_IDX(inst, 1));
975-
976-
#define CONFIGURE_ALL_IRQS(inst) \
977-
TX_IRQ_CONFIGURE(inst); \
978-
RX_IRQ_CONFIGURE(inst);
979-
980956
#define XILINX_AXI_DMA_INIT(inst) \
981-
static void dma_xilinx_axi_dma##inst##_irq_configure(void) \
957+
static void dma_xilinx_axi_dma##inst##_irq_configure(struct dma_xilinx_axi_dma_data *data) \
982958
{ \
983-
CONFIGURE_ALL_IRQS(inst); \
959+
data->channels[XILINX_AXI_DMA_TX_CHANNEL_NUM].irq = DT_INST_IRQN_BY_IDX(inst, 0); \
960+
IRQ_CONNECT(DT_INST_IRQN_BY_IDX(inst, 0), DT_INST_IRQ_BY_IDX(inst, 0, priority), \
961+
dma_xilinx_axi_dma_tx_isr, DEVICE_DT_INST_GET(inst), 0); \
962+
irq_enable(DT_INST_IRQN_BY_IDX(inst, 0)); \
963+
data->channels[XILINX_AXI_DMA_RX_CHANNEL_NUM].irq = DT_INST_IRQN_BY_IDX(inst, 1); \
964+
IRQ_CONNECT(DT_INST_IRQN_BY_IDX(inst, 1), DT_INST_IRQ_BY_IDX(inst, 1, priority), \
965+
dma_xilinx_axi_dma_rx_isr, DEVICE_DT_INST_GET(inst), 0); \
966+
irq_enable(DT_INST_IRQN_BY_IDX(inst, 1)); \
984967
} \
985-
static uint32_t dma_xilinx_axi_dma##inst##_irq0_channels[] = \
986-
DT_INST_PROP_OR(inst, interrupts, {0}); \
987968
static const struct dma_xilinx_axi_dma_config dma_xilinx_axi_dma##inst##_config = { \
988969
.reg = DT_INST_REG_ADDR(inst), \
989970
.channels = DT_INST_PROP(inst, dma_channels), \
990971
.irq_configure = dma_xilinx_axi_dma##inst##_irq_configure, \
991-
.irq0_channels = dma_xilinx_axi_dma##inst##_irq0_channels, \
992-
.irq0_channels_size = ARRAY_SIZE(dma_xilinx_axi_dma##inst##_irq0_channels), \
993972
}; \
994973
static struct dma_xilinx_axi_dma_channel \
995974
dma_xilinx_axi_dma##inst##_channels[DT_INST_PROP(inst, dma_channels)]; \
996-
ATOMIC_DEFINE(dma_xilinx_axi_dma_atomic##inst, DT_INST_PROP(inst, dma_channels)); \
997975
static struct dma_xilinx_axi_dma_data dma_xilinx_axi_dma##inst##_data = { \
998976
.ctx = {.magic = DMA_MAGIC, .atomic = NULL}, \
999977
.channels = dma_xilinx_axi_dma##inst##_channels, \

0 commit comments

Comments
 (0)