summaryrefslogtreecommitdiff
path: root/libhw_cr/rp2040_dma.c
blob: 7b78535bcfd027f3131863f472866e13b362efdc (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
/* libhw_cr/rp2040_dma.c - Utilities for sharing the DMA IRQs
 *
 * Copyright (c) 2020 Raspberry Pi (Trading) Ltd.
 * SPDX-License-Identifier: BSD-3-Clause
 *
 * Copyright (C) 2025  Luke T. Shumaker <lukeshu@lukeshu.com>
 * SPDX-License-Identifier: AGPL-3.0-or-later
 */

#include <hardware/irq.h> /* for irq_set_exclusive_handler() */

#include <libmisc/log.h>

#include "rp2040_dma.h"

/* static_assert("rp2040_dma.h" == <hardware/irq.h>); */
static_assert((uint)DMAIRQ_0 == (uint)DMA_IRQ_0);
static_assert((uint)DMAIRQ_1 == (uint)DMA_IRQ_1);

/* Borrowed from <hardware/dma.h> *********************************************/

dma_channel_hw_t *dma_channel_hw_addr(uint channel) {
	assert(channel < NUM_DMA_CHANNELS);
	return &dma_hw->ch[channel];
}

/* Our own code ***************************************************************/

typedef uint8_t addr_flag_t;
#define ADDR_FLAG_UNMAPPED   ((addr_flag_t)(1<<0))
#define ADDR_FLAG_UNSAFE     ((addr_flag_t)(1<<1))
#define ADDR_FLAG_RD_OK      ((addr_flag_t)(1<<2))
#define ADDR_FLAG_WR_OK      ((addr_flag_t)(1<<3))
#define ADDR_FLAG_NEEDS_DREQ ((addr_flag_t)(1<<4))

static addr_flag_t dma_classify_addr(volatile const void *_addr) {
	uintptr_t addr = (uintptr_t)_addr;
	switch (addr >> 28) {
	case 0x0: /* ROM */
		if (addr < 0x4000)
			return ADDR_FLAG_RD_OK;
		return ADDR_FLAG_UNMAPPED;
	case 0x1: /* XIP */
		switch ((addr >> 24)&0xf) {
		case 0x0: case 0x1: case 0x2: case 0x3: /* not safe for DMA */
			return ADDR_FLAG_UNSAFE;
		case 0x4: /* CTRL registers */
			if (addr < 0x14000020)
				return ADDR_FLAG_RD_OK | ADDR_FLAG_WR_OK;
			return ADDR_FLAG_UNMAPPED;
		case 0x5: /* SRAM */
			if (addr < 0x15004000)
				return ADDR_FLAG_RD_OK | ADDR_FLAG_WR_OK;
			return ADDR_FLAG_UNMAPPED;
		case 0x8: /* SSI registers */
			if (addr < 0x18000064)
				return ADDR_FLAG_RD_OK | ADDR_FLAG_WR_OK;
			if (0x180000f0 <= addr && addr < 0x180000fc)
				return ADDR_FLAG_RD_OK | ADDR_FLAG_WR_OK;
			return ADDR_FLAG_UNMAPPED;
		}
		return ADDR_FLAG_UNMAPPED;
	case 0x2: /* SRAM */
		if ((addr & 0xfeffffff) < 0x20040000) /* banks 0-3 striped/unstriped depending on bit */
			return ADDR_FLAG_RD_OK | ADDR_FLAG_WR_OK;
		if (addr < 0x20042000) /* banks 4-5 */
			return ADDR_FLAG_RD_OK | ADDR_FLAG_WR_OK;
		return ADDR_FLAG_UNMAPPED;
	case 0x4: /* APB Peripherals */
		/* TODO */
		return ADDR_FLAG_RD_OK | ADDR_FLAG_WR_OK;
	case 0x5: /* AHB-Lite Peripherals */
		/* TODO */
		return ADDR_FLAG_RD_OK | ADDR_FLAG_WR_OK;
	case 0xd: /* IOPORT Registers */
		/* TODO */
		return ADDR_FLAG_RD_OK | ADDR_FLAG_WR_OK;
	case 0xe: /* Cortex-M0+ internal registers */
		/* TODO */
		return ADDR_FLAG_RD_OK | ADDR_FLAG_WR_OK;
	}
	return ADDR_FLAG_UNMAPPED;
}

bool dma_is_unsafe(volatile const void *addr) {
	return dma_classify_addr(addr) & ADDR_FLAG_UNSAFE;
}

#ifndef NDEBUG
void dma_assert_addrs(volatile void *dst, volatile const void *src) {
	addr_flag_t dst_flags = dma_classify_addr(dst);
	addr_flag_t src_flags = dma_classify_addr(src);
	bool bad = false;
	if (!(dst_flags & ADDR_FLAG_WR_OK)) {
		log_n_errorln(ASSERT, "dma_assert_addrs(", (ptr, dst), ", ", (ptr, src), "): invalid destination");
		bad = true;
	}
	if (!(src_flags & ADDR_FLAG_RD_OK)) {
		log_n_errorln(ASSERT, "dma_assert_addrs(", (ptr, dst), ", ", (ptr, src), "): invalid source");
		bad = true;
	}
	if (!bad && (dst_flags & ADDR_FLAG_NEEDS_DREQ && src_flags & ADDR_FLAG_NEEDS_DREQ) ) {
		log_n_errorln(ASSERT, "dma_assert_addrs(", (ptr, dst), ", ", (ptr, src), "): source and destination both required DREQs");
		bad = true;
	}
	if (bad)
		__lm_abort();
}
#endif

struct dmairq_handler_entry {
	dmairq_handler_t         fn;
	void                    *arg;
};
struct dmairq_handler_entry dmairq_handlers[NUM_DMA_CHANNELS] = {};

bool dmairq_initialized[NUM_DMA_IRQS] = {};

static void dmairq_handler(void) {
	enum dmairq irq = __get_current_exception() - VTABLE_FIRST_IRQ;
	size_t irq_idx = irq - DMAIRQ_0;
	assert(irq_idx < NUM_DMA_IRQS);

	uint32_t regval = dma_hw->irq_ctrl[irq_idx].ints;
	dma_hw->intr = regval; /* acknowledge irq */
	for (uint channel = 0; channel < NUM_DMA_CHANNELS; channel++) {
		if (regval & 1u<<channel) {
			struct dmairq_handler_entry *handler = &dmairq_handlers[channel];
			if (handler->fn)
				handler->fn(handler->arg, irq, channel);
		}
	}
}

void dmairq_set_and_enable_exclusive_handler(enum dmairq irq, uint channel, dmairq_handler_t fn, void *arg) {
	assert(irq == DMAIRQ_0 || irq == DMAIRQ_1);
	assert(channel < NUM_DMA_CHANNELS);
	assert(fn);

	assert(dmairq_handlers[channel].fn == NULL);

	dmairq_handlers[channel].fn = fn;
	dmairq_handlers[channel].arg = arg;

	size_t irq_idx = irq - DMAIRQ_0;
	hw_set_bits(&dma_hw->irq_ctrl[irq_idx].inte, 1u<<channel);

	if (!dmairq_initialized[irq_idx]) {
		irq_set_exclusive_handler(irq, dmairq_handler);
		irq_set_enabled(irq, true);
		dmairq_initialized[irq_idx] = true;
	}
}