1 /* $NetBSD: fdt_memory.c,v 1.10 2024/01/14 07:53:38 mlelstv Exp $ */
2
3 /*-
4 * Copyright (c) 2018 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jared McNeill <jmcneill@invisible.ca>.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include "opt_fdt.h"
33
34 #include <sys/cdefs.h>
35 __KERNEL_RCSID(0, "$NetBSD: fdt_memory.c,v 1.10 2024/01/14 07:53:38 mlelstv Exp $");
36
37 #include <sys/param.h>
38 #include <sys/queue.h>
39
40 #include <libfdt.h>
41 #include <dev/fdt/fdtvar.h>
42 #include <dev/fdt/fdt_memory.h>
43
44 struct fdt_memory_range {
45 struct fdt_memory mr_mem;
46 bool mr_used;
47 TAILQ_ENTRY(fdt_memory_range) mr_list;
48 };
49
50 static TAILQ_HEAD(fdt_memory_rangehead, fdt_memory_range) fdt_memory_ranges =
51 TAILQ_HEAD_INITIALIZER(fdt_memory_ranges);
52
53 static struct fdt_memory_range fdt_memory_range_pool[FDT_MEMORY_RANGES];
54
55 static struct fdt_memory_range *
fdt_memory_range_alloc(void)56 fdt_memory_range_alloc(void)
57 {
58 for (size_t n = 0; n < FDT_MEMORY_RANGES; n++)
59 if (!fdt_memory_range_pool[n].mr_used) {
60 fdt_memory_range_pool[n].mr_used = true;
61 return &fdt_memory_range_pool[n];
62 }
63
64 printf("%s: no free memory ranges, increase FDT_MEMORY_RANGES!\n", __func__);
65 return NULL;
66 }
67
68 static void
fdt_memory_range_free(struct fdt_memory_range * mr)69 fdt_memory_range_free(struct fdt_memory_range *mr)
70 {
71 mr->mr_used = false;
72 }
73
74 /*
75 * Get all of physical memory, including holes.
76 */
77 void
fdt_memory_get(uint64_t * pstart,uint64_t * pend)78 fdt_memory_get(uint64_t *pstart, uint64_t *pend)
79 {
80 const void *fdt_data = fdtbus_get_data();
81 uint64_t cur_addr, cur_size;
82 int index, nadd = 0, off, memory;
83
84 off = fdt_node_offset_by_prop_value(fdt_data, -1,
85 "device_type", "memory", sizeof("memory"));
86
87 /*
88 * Device Tree Specification 3.2 says that memory
89 * nodes are named "memory" and have device_type
90 * "memory", but if the device_type is missing, try
91 * to find the (then single) node by name.
92 */
93 if (off == -FDT_ERR_NOTFOUND)
94 off = fdt_path_offset(fdt_data, "/memory");
95
96 while (off != -FDT_ERR_NOTFOUND) {
97 memory = fdtbus_offset2phandle(off);
98 for (index = 0;
99 fdtbus_get_reg64(memory, index, &cur_addr, &cur_size) == 0;
100 index++) {
101 if (cur_size == 0)
102 continue;
103 fdt_memory_add_range(cur_addr, cur_size);
104
105 if (nadd++ == 0) {
106 *pstart = cur_addr;
107 *pend = cur_addr + cur_size;
108 continue;
109 }
110 if (cur_addr < *pstart)
111 *pstart = cur_addr;
112 if (cur_addr + cur_size > *pend)
113 *pend = cur_addr + cur_size;
114 }
115 off = fdt_node_offset_by_prop_value(fdt_data, off,
116 "device_type", "memory", sizeof("memory"));
117 }
118 if (nadd == 0)
119 panic("Cannot determine memory size");
120 }
121
122 /*
123 * Exclude memory ranges from memory config from the device tree
124 */
125 void
fdt_memory_remove_reserved(uint64_t min_addr,uint64_t max_addr)126 fdt_memory_remove_reserved(uint64_t min_addr, uint64_t max_addr)
127 {
128 uint64_t lstart = 0, lend = 0;
129 int index, error, phandle, child;
130 const void *fdt_data = fdtbus_get_data();
131 const int num = fdt_num_mem_rsv(fdt_data);
132
133 for (index = 0; index <= num; index++) {
134 uint64_t addr, size;
135
136 error = fdt_get_mem_rsv(fdt_data, index, &addr, &size);
137 if (error != 0)
138 continue;
139
140 if (lstart <= addr && addr <= lend) {
141 size -= (lend - addr);
142 addr = lend;
143 }
144 if (size == 0)
145 continue;
146 if (addr + size <= min_addr)
147 continue;
148 if (addr >= max_addr)
149 continue;
150 if (addr < min_addr) {
151 size -= (min_addr - addr);
152 addr = min_addr;
153 }
154 if (addr + size > max_addr)
155 size = max_addr - addr;
156 fdt_memory_remove_range(addr, size);
157 lstart = addr;
158 lend = addr + size;
159 }
160
161 /*
162 * "no-map" ranges defined in the /reserved-memory node
163 * must also be excluded.
164 */
165 phandle = OF_finddevice("/reserved-memory");
166 if (phandle != -1) {
167 for (child = OF_child(phandle); child; child = OF_peer(child)) {
168 bus_addr_t addr;
169 bus_size_t size;
170
171 if (fdtbus_get_reg(child, 0, &addr, &size) != 0)
172 continue;
173 if (size == 0)
174 continue;
175 fdt_memory_remove_range(addr, size);
176 }
177 }
178 }
179
180 void
fdt_memory_add_range(uint64_t start,uint64_t size)181 fdt_memory_add_range(uint64_t start, uint64_t size)
182 {
183 struct fdt_memory_range *mr, *prev, *cur, *tmp;
184 bool inserted = false;
185
186 mr = fdt_memory_range_alloc();
187 if (mr == NULL)
188 return;
189
190 mr->mr_mem.start = start;
191 mr->mr_mem.end = start + size;
192
193 /*
194 * Add the new range to the list of sorted ranges.
195 */
196 TAILQ_FOREACH(cur, &fdt_memory_ranges, mr_list)
197 if (mr->mr_mem.start <= cur->mr_mem.start) {
198 TAILQ_INSERT_BEFORE(cur, mr, mr_list);
199 inserted = true;
200 break;
201 }
202 if (!inserted)
203 TAILQ_INSERT_TAIL(&fdt_memory_ranges, mr, mr_list);
204
205 /*
206 * Remove overlaps.
207 */
208 TAILQ_FOREACH_SAFE(mr, &fdt_memory_ranges, mr_list, tmp) {
209 prev = TAILQ_PREV(mr, fdt_memory_rangehead, mr_list);
210 if (prev && prev->mr_mem.end > mr->mr_mem.start) {
211 mr->mr_mem.start = prev->mr_mem.end;
212 if (mr->mr_mem.start >= mr->mr_mem.end) {
213 TAILQ_REMOVE(&fdt_memory_ranges, mr, mr_list);
214 fdt_memory_range_free(mr);
215 }
216 }
217 }
218
219 /*
220 * Combine adjacent ranges.
221 */
222 TAILQ_FOREACH_SAFE(mr, &fdt_memory_ranges, mr_list, tmp) {
223 prev = TAILQ_PREV(mr, fdt_memory_rangehead, mr_list);
224 if (prev && prev->mr_mem.end == mr->mr_mem.start) {
225 prev->mr_mem.end = mr->mr_mem.end;
226 TAILQ_REMOVE(&fdt_memory_ranges, mr, mr_list);
227 fdt_memory_range_free(mr);
228 }
229 }
230 }
231
232 void
fdt_memory_remove_range(uint64_t start,uint64_t size)233 fdt_memory_remove_range(uint64_t start, uint64_t size)
234 {
235 struct fdt_memory_range *mr, *next, *tmp;
236 const uint64_t end = start + size;
237
238 TAILQ_FOREACH_SAFE(mr, &fdt_memory_ranges, mr_list, tmp) {
239 if (start <= mr->mr_mem.start && end >= mr->mr_mem.end) {
240 /*
241 * Removed range completely covers this range,
242 * just remove it.
243 */
244 TAILQ_REMOVE(&fdt_memory_ranges, mr, mr_list);
245 fdt_memory_range_free(mr);
246 } else if (start > mr->mr_mem.start && end < mr->mr_mem.end) {
247 /*
248 * Removed range is completely contained by this range,
249 * split it.
250 */
251 next = fdt_memory_range_alloc();
252 if (next == NULL)
253 panic("fdt_memory_remove_range");
254 next->mr_mem.start = end;
255 next->mr_mem.end = mr->mr_mem.end;
256 mr->mr_mem.end = start;
257 TAILQ_INSERT_AFTER(&fdt_memory_ranges, mr, next, mr_list);
258 } else if (start <= mr->mr_mem.start && end > mr->mr_mem.start && end < mr->mr_mem.end) {
259 /*
260 * Partial overlap at the beginning of the range.
261 */
262 mr->mr_mem.start = end;
263 } else if (start > mr->mr_mem.start && start < mr->mr_mem.end && end >= mr->mr_mem.end) {
264 /*
265 * Partial overlap at the end of the range.
266 */
267 mr->mr_mem.end = start;
268 }
269 KASSERT(mr->mr_mem.start < mr->mr_mem.end);
270 }
271 }
272
273 void
fdt_memory_foreach(void (* fn)(const struct fdt_memory *,void *),void * arg)274 fdt_memory_foreach(void (*fn)(const struct fdt_memory *, void *), void *arg)
275 {
276 struct fdt_memory_range *mr;
277
278 TAILQ_FOREACH(mr, &fdt_memory_ranges, mr_list)
279 fn(&mr->mr_mem, arg);
280 }
281