Skip to content

Commit d457332

Browse files
committed
Fixed malloc() bug. (Paul Stoffregen)
Fixes #857
1 parent cce70d2 commit d457332

File tree

1 file changed

+380
-0
lines changed

1 file changed

+380
-0
lines changed
+380
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,380 @@
1+
/* Copyright (c) 2002, 2004, 2010 Joerg Wunsch
2+
Copyright (c) 2010 Gerben van den Broeke
3+
All rights reserved.
4+
5+
malloc, free, realloc from avr-libc 1.7.0
6+
with minor modifications, by Paul Stoffregen
7+
8+
Redistribution and use in source and binary forms, with or without
9+
modification, are permitted provided that the following conditions are met:
10+
11+
* Redistributions of source code must retain the above copyright
12+
notice, this list of conditions and the following disclaimer.
13+
14+
* Redistributions in binary form must reproduce the above copyright
15+
notice, this list of conditions and the following disclaimer in
16+
the documentation and/or other materials provided with the
17+
distribution.
18+
19+
* Neither the name of the copyright holders nor the names of
20+
contributors may be used to endorse or promote products derived
21+
from this software without specific prior written permission.
22+
23+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24+
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25+
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26+
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
27+
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28+
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29+
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30+
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31+
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32+
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33+
POSSIBILITY OF SUCH DAMAGE.
34+
*/
35+
36+
37+
#include <stdlib.h>
38+
#include <inttypes.h>
39+
#include <string.h>
40+
#include <avr/io.h>
41+
42+
43+
#define __MALLOC_MARGIN__ 120
44+
45+
46+
struct __freelist {
47+
size_t sz;
48+
struct __freelist *nx;
49+
};
50+
51+
/*
52+
* Exported interface:
53+
*
54+
* When extending the data segment, the allocator will not try to go
55+
* beyond the current stack limit, decreased by __malloc_margin bytes.
56+
* Thus, all possible stack frames of interrupt routines that could
57+
* interrupt the current function, plus all further nested function
58+
* calls must not require more stack space, or they'll risk to collide
59+
* with the data segment.
60+
*/
61+
62+
63+
#define STACK_POINTER() ((char *)AVR_STACK_POINTER_REG)
64+
extern char __heap_start;
65+
char *__brkval = &__heap_start; // first location not yet allocated
66+
struct __freelist *__flp; // freelist pointer (head of freelist)
67+
char *__brkval_maximum = 100;
68+
69+
void *
70+
malloc(size_t len)
71+
{
72+
struct __freelist *fp1, *fp2, *sfp1, *sfp2;
73+
char *cp;
74+
size_t s, avail;
75+
76+
/*
77+
* Our minimum chunk size is the size of a pointer (plus the
78+
* size of the "sz" field, but we don't need to account for
79+
* this), otherwise we could not possibly fit a freelist entry
80+
* into the chunk later.
81+
*/
82+
if (len < sizeof(struct __freelist) - sizeof(size_t))
83+
len = sizeof(struct __freelist) - sizeof(size_t);
84+
85+
/*
86+
* First, walk the free list and try finding a chunk that
87+
* would match exactly. If we found one, we are done. While
88+
* walking, note down the smallest chunk we found that would
89+
* still fit the request -- we need it for step 2.
90+
*
91+
*/
92+
for (s = 0, fp1 = __flp, fp2 = 0;
93+
fp1;
94+
fp2 = fp1, fp1 = fp1->nx) {
95+
if (fp1->sz < len)
96+
continue;
97+
if (fp1->sz == len) {
98+
/*
99+
* Found it. Disconnect the chunk from the
100+
* freelist, and return it.
101+
*/
102+
if (fp2)
103+
fp2->nx = fp1->nx;
104+
else
105+
__flp = fp1->nx;
106+
return &(fp1->nx);
107+
}
108+
else {
109+
if (s == 0 || fp1->sz < s) {
110+
/* this is the smallest chunk found so far */
111+
s = fp1->sz;
112+
sfp1 = fp1;
113+
sfp2 = fp2;
114+
}
115+
}
116+
}
117+
/*
118+
* Step 2: If we found a chunk on the freelist that would fit
119+
* (but was too large), look it up again and use it, since it
120+
* is our closest match now. Since the freelist entry needs
121+
* to be split into two entries then, watch out that the
122+
* difference between the requested size and the size of the
123+
* chunk found is large enough for another freelist entry; if
124+
* not, just enlarge the request size to what we have found,
125+
* and use the entire chunk.
126+
*/
127+
if (s) {
128+
if (s - len < sizeof(struct __freelist)) {
129+
/* Disconnect it from freelist and return it. */
130+
if (sfp2)
131+
sfp2->nx = sfp1->nx;
132+
else
133+
__flp = sfp1->nx;
134+
return &(sfp1->nx);
135+
}
136+
/*
137+
* Split them up. Note that we leave the first part
138+
* as the new (smaller) freelist entry, and return the
139+
* upper portion to the caller. This saves us the
140+
* work to fix up the freelist chain; we just need to
141+
* fixup the size of the current entry, and note down
142+
* the size of the new chunk before returning it to
143+
* the caller.
144+
*/
145+
cp = (char *)sfp1;
146+
s -= len;
147+
cp += s;
148+
sfp2 = (struct __freelist *)cp;
149+
sfp2->sz = len;
150+
sfp1->sz = s - sizeof(size_t);
151+
return &(sfp2->nx);
152+
}
153+
/*
154+
* Step 3: If the request could not be satisfied from a
155+
* freelist entry, just prepare a new chunk. This means we
156+
* need to obtain more memory first. The largest address just
157+
* not allocated so far is remembered in the brkval variable.
158+
* Under Unix, the "break value" was the end of the data
159+
* segment as dynamically requested from the operating system.
160+
* Since we don't have an operating system, just make sure
161+
* that we don't collide with the stack.
162+
*/
163+
cp = STACK_POINTER() - __MALLOC_MARGIN__;
164+
if (cp <= __brkval)
165+
/*
166+
* Memory exhausted.
167+
*/
168+
return 0;
169+
avail = cp - __brkval;
170+
/*
171+
* Both tests below are needed to catch the case len >= 0xfffe.
172+
*/
173+
if (avail >= len && avail >= len + sizeof(size_t)) {
174+
fp1 = (struct __freelist *)__brkval;
175+
__brkval += len + sizeof(size_t);
176+
__brkval_maximum = __brkval;
177+
fp1->sz = len;
178+
return &(fp1->nx);
179+
}
180+
/*
181+
* Step 4: There's no help, just fail. :-/
182+
*/
183+
return 0;
184+
}
185+
186+
187+
void
188+
free(void *p)
189+
{
190+
struct __freelist *fp1, *fp2, *fpnew;
191+
char *cp1, *cp2, *cpnew;
192+
193+
/* ISO C says free(NULL) must be a no-op */
194+
if (p == 0)
195+
return;
196+
197+
cpnew = p;
198+
cpnew -= sizeof(size_t);
199+
fpnew = (struct __freelist *)cpnew;
200+
fpnew->nx = 0;
201+
202+
/*
203+
* Trivial case first: if there's no freelist yet, our entry
204+
* will be the only one on it. If this is the last entry, we
205+
* can reduce __brkval instead.
206+
*/
207+
if (__flp == 0) {
208+
if ((char *)p + fpnew->sz == __brkval)
209+
__brkval = cpnew;
210+
else
211+
__flp = fpnew;
212+
return;
213+
}
214+
215+
/*
216+
* Now, find the position where our new entry belongs onto the
217+
* freelist. Try to aggregate the chunk with adjacent chunks
218+
* if possible.
219+
*/
220+
for (fp1 = __flp, fp2 = 0;
221+
fp1;
222+
fp2 = fp1, fp1 = fp1->nx) {
223+
if (fp1 < fpnew)
224+
continue;
225+
cp1 = (char *)fp1;
226+
fpnew->nx = fp1;
227+
if ((char *)&(fpnew->nx) + fpnew->sz == cp1) {
228+
/* upper chunk adjacent, assimilate it */
229+
fpnew->sz += fp1->sz + sizeof(size_t);
230+
fpnew->nx = fp1->nx;
231+
}
232+
if (fp2 == 0) {
233+
/* new head of freelist */
234+
__flp = fpnew;
235+
return;
236+
}
237+
break;
238+
}
239+
/*
240+
* Note that we get here either if we hit the "break" above,
241+
* or if we fell off the end of the loop. The latter means
242+
* we've got a new topmost chunk. Either way, try aggregating
243+
* with the lower chunk if possible.
244+
*/
245+
fp2->nx = fpnew;
246+
cp2 = (char *)&(fp2->nx);
247+
if (cp2 + fp2->sz == cpnew) {
248+
/* lower junk adjacent, merge */
249+
fp2->sz += fpnew->sz + sizeof(size_t);
250+
fp2->nx = fpnew->nx;
251+
}
252+
/*
253+
* If there's a new topmost chunk, lower __brkval instead.
254+
*/
255+
for (fp1 = __flp, fp2 = 0;
256+
fp1->nx != 0;
257+
fp2 = fp1, fp1 = fp1->nx)
258+
/* advance to entry just before end of list */;
259+
cp2 = (char *)&(fp1->nx);
260+
if (cp2 + fp1->sz == __brkval) {
261+
if (fp2 == NULL)
262+
/* Freelist is empty now. */
263+
__flp = NULL;
264+
else
265+
fp2->nx = NULL;
266+
__brkval = cp2 - sizeof(size_t);
267+
}
268+
}
269+
270+
271+
272+
void *
273+
realloc(void *ptr, size_t len)
274+
{
275+
struct __freelist *fp1, *fp2, *fp3, *ofp3;
276+
char *cp, *cp1;
277+
void *memp;
278+
size_t s, incr;
279+
280+
/* Trivial case, required by C standard. */
281+
if (ptr == 0)
282+
return malloc(len);
283+
284+
cp1 = (char *)ptr;
285+
cp1 -= sizeof(size_t);
286+
fp1 = (struct __freelist *)cp1;
287+
288+
cp = (char *)ptr + len; /* new next pointer */
289+
if (cp < cp1)
290+
/* Pointer wrapped across top of RAM, fail. */
291+
return 0;
292+
293+
/*
294+
* See whether we are growing or shrinking. When shrinking,
295+
* we split off a chunk for the released portion, and call
296+
* free() on it. Therefore, we can only shrink if the new
297+
* size is at least sizeof(struct __freelist) smaller than the
298+
* previous size.
299+
*/
300+
if (len <= fp1->sz) {
301+
/* The first test catches a possible unsigned int
302+
* rollover condition. */
303+
if (fp1->sz <= sizeof(struct __freelist) ||
304+
len > fp1->sz - sizeof(struct __freelist))
305+
return ptr;
306+
fp2 = (struct __freelist *)cp;
307+
fp2->sz = fp1->sz - len - sizeof(size_t);
308+
fp1->sz = len;
309+
free(&(fp2->nx));
310+
return ptr;
311+
}
312+
313+
/*
314+
* If we get here, we are growing. First, see whether there
315+
* is space in the free list on top of our current chunk.
316+
*/
317+
incr = len - fp1->sz;
318+
cp = (char *)ptr + fp1->sz;
319+
fp2 = (struct __freelist *)cp;
320+
for (s = 0, ofp3 = 0, fp3 = __flp;
321+
fp3;
322+
ofp3 = fp3, fp3 = fp3->nx) {
323+
if (fp3 == fp2 && fp3->sz + sizeof(size_t) >= incr) {
324+
/* found something that fits */
325+
if (fp3->sz + sizeof(size_t) - incr > sizeof(struct __freelist)) {
326+
/* split off a new freelist entry */
327+
cp = (char *)ptr + len;
328+
fp2 = (struct __freelist *)cp;
329+
fp2->nx = fp3->nx;
330+
fp2->sz = fp3->sz - incr;
331+
fp1->sz = len;
332+
} else {
333+
/* it just fits, so use it entirely */
334+
fp1->sz += fp3->sz + sizeof(size_t);
335+
fp2 = fp3->nx;
336+
}
337+
if (ofp3)
338+
ofp3->nx = fp2;
339+
else
340+
__flp = fp2;
341+
return ptr;
342+
}
343+
/*
344+
* Find the largest chunk on the freelist while
345+
* walking it.
346+
*/
347+
if (fp3->sz > s)
348+
s = fp3->sz;
349+
}
350+
/*
351+
* If we are the topmost chunk in memory, and there was no
352+
* large enough chunk on the freelist that could be re-used
353+
* (by a call to malloc() below), quickly extend the
354+
* allocation area if possible, without need to copy the old
355+
* data.
356+
*/
357+
if (__brkval == (char *)ptr + fp1->sz && len > s) {
358+
cp = (char *)ptr + len;
359+
cp1 = STACK_POINTER() - __MALLOC_MARGIN__;
360+
if (cp < cp1) {
361+
__brkval = cp;
362+
__brkval_maximum = cp;
363+
fp1->sz = len;
364+
return ptr;
365+
}
366+
/* If that failed, we are out of luck. */
367+
return 0;
368+
}
369+
370+
/*
371+
* Call malloc() for a new chunk, then copy over the data, and
372+
* release the old region.
373+
*/
374+
if ((memp = malloc(len)) == 0)
375+
return 0;
376+
memcpy(memp, ptr, fp1->sz);
377+
free(ptr);
378+
return memp;
379+
}
380+

0 commit comments

Comments
 (0)