Home | History | Annotate | Line # | Download | only in dist
      1 /*	$NetBSD: ralloc.c,v 1.2 2016/01/13 21:56:38 christos Exp $	*/
      2 
      3 /* Block-relocating memory allocator.
      4    Copyright (C) 1993, 1995 Free Software Foundation, Inc.
      5 
      6 
      7 This file is part of the GNU C Library.  Its master source is NOT part of
      8 the C library, however.  The master source lives in /gd/gnu/lib.
      9 
     10 The GNU C Library is free software; you can redistribute it and/or
     11 modify it under the terms of the GNU Library General Public License as
     12 published by the Free Software Foundation; either version 2 of the
     13 License, or (at your option) any later version.
     14 
     15 The GNU C Library is distributed in the hope that it will be useful,
     16 but WITHOUT ANY WARRANTY; without even the implied warranty of
     17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     18 Library General Public License for more details.
     19 
     20 You should have received a copy of the GNU Library General Public
     21 License along with the GNU C Library; see the file COPYING.LIB.  If
     22 not, write to the Free Software Foundation, Inc., 675 Mass Ave,
     23 Cambridge, MA 02139, USA.  */
     24 
     25 /* NOTES:
     26 
     27    Only relocate the blocs necessary for SIZE in r_alloc_sbrk,
     28    rather than all of them.  This means allowing for a possible
     29    hole between the first bloc and the end of malloc storage.  */
     30 
     31 #ifdef emacs
     32 
     33 #include <config.h>
     34 #include "lisp.h"		/* Needed for VALBITS.  */
     35 
     36 #undef NULL
     37 
     38 /* The important properties of this type are that 1) it's a pointer, and
     39    2) arithmetic on it should work as if the size of the object pointed
     40    to has a size of 1.  */
     41 #if 0 /* Arithmetic on void* is a GCC extension.  */
     42 #ifdef __STDC__
     43 typedef void *POINTER;
     44 #else
     45 
     46 #ifdef	HAVE_CONFIG_H
     47 #include "config.h"
     48 #endif
     49 
     50 typedef char *POINTER;
     51 
     52 #endif
     53 #endif /* 0 */
     54 
     55 /* Unconditionally use char * for this.  */
     56 typedef char *POINTER;
     57 
     58 typedef unsigned long SIZE;
     59 
     60 /* Declared in dispnew.c, this version doesn't screw up if regions
     61    overlap.  */
     62 extern void safe_bcopy ();
     63 
     64 #include "getpagesize.h"
     65 
     66 #else	/* Not emacs.  */
     67 
     68 #include <stddef.h>
     69 
     70 typedef size_t SIZE;
     71 typedef void *POINTER;
     72 
     73 #include <unistd.h>
     74 #include <stdlib.h>
     75 #include <malloc.h>
     76 #include <string.h>
     77 
     78 #define safe_bcopy(x, y, z) memmove (y, x, z)
     79 
     80 #endif	/* emacs.  */
     81 
     82 #define NIL ((POINTER) 0)
     83 
     84 /* A flag to indicate whether we have initialized ralloc yet.  For
     85    Emacs's sake, please do not make this local to malloc_init; on some
     86    machines, the dumping procedure makes all static variables
     87    read-only.  On these machines, the word static is #defined to be
     88    the empty string, meaning that r_alloc_initialized becomes an
     89    automatic variable, and loses its value each time Emacs is started up.  */
     90 static int r_alloc_initialized = 0;
     91 
     92 static void r_alloc_init ();
     93 
     94 /* Declarations for working with the malloc, ralloc, and system breaks.  */
     96 
     97 /* Function to set the real break value.  */
     98 static POINTER (*real_morecore) ();
     99 
    100 /* The break value, as seen by malloc.  */
    101 static POINTER virtual_break_value;
    102 
    103 /* The address of the end of the last data in use by ralloc,
    104    including relocatable blocs as well as malloc data.  */
    105 static POINTER break_value;
    106 
    107 /* This is the size of a page.  We round memory requests to this boundary.  */
    108 static int page_size;
    109 
    110 /* Whenever we get memory from the system, get this many extra bytes.  This
    111    must be a multiple of page_size.  */
    112 static int extra_bytes;
    113 
    114 /* Macros for rounding.  Note that rounding to any value is possible
    115    by changing the definition of PAGE.  */
    116 #define PAGE (getpagesize ())
    117 #define ALIGNED(addr) (((unsigned long int) (addr) & (page_size - 1)) == 0)
    118 #define ROUNDUP(size) (((unsigned long int) (size) + page_size - 1) \
    119 		       & ~(page_size - 1))
    120 #define ROUND_TO_PAGE(addr) (addr & (~(page_size - 1)))
    121 
    122 #define MEM_ALIGN sizeof(double)
    123 #define MEM_ROUNDUP(addr) (((unsigned long int)(addr) + MEM_ALIGN - 1) \
    124 				   & ~(MEM_ALIGN - 1))
    125 
    126 /* Data structures of heaps and blocs.  */
    128 
    129 /* The relocatable objects, or blocs, and the malloc data
    130    both reside within one or more heaps.
    131    Each heap contains malloc data, running from `start' to `bloc_start',
    132    and relocatable objects, running from `bloc_start' to `free'.
    133 
    134    Relocatable objects may relocate within the same heap
    135    or may move into another heap; the heaps themselves may grow
    136    but they never move.
    137 
    138    We try to make just one heap and make it larger as necessary.
    139    But sometimes we can't do that, because we can't get continguous
    140    space to add onto the heap.  When that happens, we start a new heap.  */
    141 
    142 typedef struct heap
    143 {
    144   struct heap *next;
    145   struct heap *prev;
    146   /* Start of memory range of this heap.  */
    147   POINTER start;
    148   /* End of memory range of this heap.  */
    149   POINTER end;
    150   /* Start of relocatable data in this heap.  */
    151   POINTER bloc_start;
    152   /* Start of unused space in this heap.  */
    153   POINTER free;
    154   /* First bloc in this heap.  */
    155   struct bp *first_bloc;
    156   /* Last bloc in this heap.  */
    157   struct bp *last_bloc;
    158 } *heap_ptr;
    159 
    160 #define NIL_HEAP ((heap_ptr) 0)
    161 #define HEAP_PTR_SIZE (sizeof (struct heap))
    162 
    163 /* This is the first heap object.
    164    If we need additional heap objects, each one resides at the beginning of
    165    the space it covers.   */
    166 static struct heap heap_base;
    167 
    168 /* Head and tail of the list of heaps.  */
    169 static heap_ptr first_heap, last_heap;
    170 
    171 /* These structures are allocated in the malloc arena.
    172    The linked list is kept in order of increasing '.data' members.
    173    The data blocks abut each other; if b->next is non-nil, then
    174    b->data + b->size == b->next->data.  */
    175 typedef struct bp
    176 {
    177   struct bp *next;
    178   struct bp *prev;
    179   POINTER *variable;
    180   POINTER data;
    181   SIZE size;
    182   POINTER new_data;		/* tmporarily used for relocation */
    183   /* Heap this bloc is in.  */
    184   struct heap *heap;
    185 } *bloc_ptr;
    186 
    187 #define NIL_BLOC ((bloc_ptr) 0)
    188 #define BLOC_PTR_SIZE (sizeof (struct bp))
    189 
    190 /* Head and tail of the list of relocatable blocs.  */
    191 static bloc_ptr first_bloc, last_bloc;
    192 
    193 
    194 /* Functions to get and return memory from the system.  */
    196 
    197 /* Find the heap that ADDRESS falls within.  */
    198 
    199 static heap_ptr
    200 find_heap (address)
    201     POINTER address;
    202 {
    203   heap_ptr heap;
    204 
    205   for (heap = last_heap; heap; heap = heap->prev)
    206     {
    207       if (heap->start <= address && address <= heap->end)
    208 	return heap;
    209     }
    210 
    211   return NIL_HEAP;
    212 }
    213 
    214 /* Find SIZE bytes of space in a heap.
    215    Try to get them at ADDRESS (which must fall within some heap's range)
    216    if we can get that many within one heap.
    217 
    218    If enough space is not presently available in our reserve, this means
    219    getting more page-aligned space from the system. If the retuned space
    220    is not contiguos to the last heap, allocate a new heap, and append it
    221 
    222    obtain does not try to keep track of whether space is in use
    223    or not in use.  It just returns the address of SIZE bytes that
    224    fall within a single heap.  If you call obtain twice in a row
    225    with the same arguments, you typically get the same value.
    226    to the heap list.  It's the caller's responsibility to keep
    227    track of what space is in use.
    228 
    229    Return the address of the space if all went well, or zero if we couldn't
    230    allocate the memory.  */
    231 
    232 static POINTER
    233 obtain (address, size)
    234     POINTER address;
    235     SIZE size;
    236 {
    237   heap_ptr heap;
    238   SIZE already_available;
    239 
    240   /* Find the heap that ADDRESS falls within.  */
    241   for (heap = last_heap; heap; heap = heap->prev)
    242     {
    243       if (heap->start <= address && address <= heap->end)
    244 	break;
    245     }
    246 
    247   if (! heap)
    248     abort ();
    249 
    250   /* If we can't fit SIZE bytes in that heap,
    251      try successive later heaps.  */
    252   while (heap && address + size > heap->end)
    253     {
    254       heap = heap->next;
    255       if (heap == NIL_HEAP)
    256 	break;
    257       address = heap->bloc_start;
    258     }
    259 
    260   /* If we can't fit them within any existing heap,
    261      get more space.  */
    262   if (heap == NIL_HEAP)
    263     {
    264       POINTER new = (*real_morecore)(0);
    265       SIZE get;
    266 
    267       already_available = (char *)last_heap->end - (char *)address;
    268 
    269       if (new != last_heap->end)
    270 	{
    271 	  /* Someone else called sbrk.  Make a new heap.  */
    272 
    273 	  heap_ptr new_heap = (heap_ptr) MEM_ROUNDUP (new);
    274 	  POINTER bloc_start = (POINTER) MEM_ROUNDUP ((POINTER)(new_heap + 1));
    275 
    276 	  if ((*real_morecore) (bloc_start - new) != new)
    277 	    return 0;
    278 
    279 	  new_heap->start = new;
    280 	  new_heap->end = bloc_start;
    281 	  new_heap->bloc_start = bloc_start;
    282 	  new_heap->free = bloc_start;
    283 	  new_heap->next = NIL_HEAP;
    284 	  new_heap->prev = last_heap;
    285 	  new_heap->first_bloc = NIL_BLOC;
    286 	  new_heap->last_bloc = NIL_BLOC;
    287 	  last_heap->next = new_heap;
    288 	  last_heap = new_heap;
    289 
    290 	  address = bloc_start;
    291 	  already_available = 0;
    292 	}
    293 
    294       /* Add space to the last heap (which we may have just created).
    295 	 Get some extra, so we can come here less often.  */
    296 
    297       get = size + extra_bytes - already_available;
    298       get = (char *) ROUNDUP ((char *)last_heap->end + get)
    299 	- (char *) last_heap->end;
    300 
    301       if ((*real_morecore) (get) != last_heap->end)
    302 	return 0;
    303 
    304       last_heap->end += get;
    305     }
    306 
    307   return address;
    308 }
    309 
    310 /* Return unused heap space to the system
    311    if there is a lot of unused space now.
    312    This can make the last heap smaller;
    313    it can also eliminate the last heap entirely.  */
    314 
    315 static void
    316 relinquish ()
    317 {
    318   register heap_ptr h;
    319   int excess = 0;
    320 
    321   /* Add the amount of space beyond break_value
    322      in all heaps which have extend beyond break_value at all.  */
    323 
    324   for (h = last_heap; h && break_value < h->end; h = h->prev)
    325     {
    326       excess += (char *) h->end - (char *) ((break_value < h->bloc_start)
    327 					    ? h->bloc_start : break_value);
    328     }
    329 
    330   if (excess > extra_bytes * 2 && (*real_morecore) (0) == last_heap->end)
    331     {
    332       /* Keep extra_bytes worth of empty space.
    333 	 And don't free anything unless we can free at least extra_bytes.  */
    334       excess -= extra_bytes;
    335 
    336       if ((char *)last_heap->end - (char *)last_heap->bloc_start <= excess)
    337 	{
    338 	  /* This heap should have no blocs in it.  */
    339 	  if (last_heap->first_bloc != NIL_BLOC
    340 	      || last_heap->last_bloc != NIL_BLOC)
    341 	    abort ();
    342 
    343 	  /* Return the last heap, with its header, to the system.  */
    344 	  excess = (char *)last_heap->end - (char *)last_heap->start;
    345 	  last_heap = last_heap->prev;
    346 	  last_heap->next = NIL_HEAP;
    347 	}
    348       else
    349 	{
    350 	  excess = (char *) last_heap->end
    351 			- (char *) ROUNDUP ((char *)last_heap->end - excess);
    352 	  last_heap->end -= excess;
    353 	}
    354 
    355       if ((*real_morecore) (- excess) == 0)
    356 	abort ();
    357     }
    358 }
    359 
    360 /* The meat - allocating, freeing, and relocating blocs.  */
    362 
    363 /* Find the bloc referenced by the address in PTR.  Returns a pointer
    364    to that block.  */
    365 
    366 static bloc_ptr
    367 find_bloc (ptr)
    368      POINTER *ptr;
    369 {
    370   register bloc_ptr p = first_bloc;
    371 
    372   while (p != NIL_BLOC)
    373     {
    374       if (p->variable == ptr && p->data == *ptr)
    375 	return p;
    376 
    377       p = p->next;
    378     }
    379 
    380   return p;
    381 }
    382 
    383 /* Allocate a bloc of SIZE bytes and append it to the chain of blocs.
    384    Returns a pointer to the new bloc, or zero if we couldn't allocate
    385    memory for the new block.  */
    386 
    387 static bloc_ptr
    388 get_bloc (size)
    389      SIZE size;
    390 {
    391   register bloc_ptr new_bloc;
    392   register heap_ptr heap;
    393 
    394   if (! (new_bloc = (bloc_ptr) malloc (BLOC_PTR_SIZE))
    395       || ! (new_bloc->data = obtain (break_value, size)))
    396     {
    397       if (new_bloc)
    398 	free (new_bloc);
    399 
    400       return 0;
    401     }
    402 
    403   break_value = new_bloc->data + size;
    404 
    405   new_bloc->size = size;
    406   new_bloc->next = NIL_BLOC;
    407   new_bloc->variable = (POINTER *) NIL;
    408   new_bloc->new_data = 0;
    409 
    410   /* Record in the heap that this space is in use.  */
    411   heap = find_heap (new_bloc->data);
    412   heap->free = break_value;
    413 
    414   /* Maintain the correspondence between heaps and blocs.  */
    415   new_bloc->heap = heap;
    416   heap->last_bloc = new_bloc;
    417   if (heap->first_bloc == NIL_BLOC)
    418     heap->first_bloc = new_bloc;
    419 
    420   /* Put this bloc on the doubly-linked list of blocs.  */
    421   if (first_bloc)
    422     {
    423       new_bloc->prev = last_bloc;
    424       last_bloc->next = new_bloc;
    425       last_bloc = new_bloc;
    426     }
    427   else
    428     {
    429       first_bloc = last_bloc = new_bloc;
    430       new_bloc->prev = NIL_BLOC;
    431     }
    432 
    433   return new_bloc;
    434 }
    435 
    436 /* Calculate new locations of blocs in the list beginning with BLOC,
    438    relocating it to start at ADDRESS, in heap HEAP.  If enough space is
    439    not presently available in our reserve, call obtain for
    440    more space.
    441 
    442    Store the new location of each bloc in its new_data field.
    443    Do not touch the contents of blocs or break_value.  */
    444 
    445 static int
    446 relocate_blocs (bloc, heap, address)
    447     bloc_ptr bloc;
    448     heap_ptr heap;
    449     POINTER address;
    450 {
    451   register bloc_ptr b = bloc;
    452 
    453   while (b)
    454     {
    455       /* If bloc B won't fit within HEAP,
    456 	 move to the next heap and try again.  */
    457       while (heap && address + b->size > heap->end)
    458 	{
    459 	  heap = heap->next;
    460 	  if (heap == NIL_HEAP)
    461 	    break;
    462 	  address = heap->bloc_start;
    463 	}
    464 
    465       /* If BLOC won't fit in any heap,
    466 	 get enough new space to hold BLOC and all following blocs.  */
    467       if (heap == NIL_HEAP)
    468 	{
    469 	  register bloc_ptr tb = b;
    470 	  register SIZE s = 0;
    471 
    472 	  /* Add up the size of all the following blocs.  */
    473 	  while (tb != NIL_BLOC)
    474 	    {
    475 	      s += tb->size;
    476 	      tb = tb->next;
    477 	    }
    478 
    479 	  /* Get that space.  */
    480 	  address = obtain (address, s);
    481 	  if (address == 0)
    482 	    return 0;
    483 
    484 	  heap = last_heap;
    485 	}
    486 
    487       /* Record the new address of this bloc
    488 	 and update where the next bloc can start.  */
    489       b->new_data = address;
    490       address += b->size;
    491       b = b->next;
    492     }
    493 
    494   return 1;
    495 }
    496 
    497 /* Reorder the bloc BLOC to go before bloc BEFORE in the doubly linked list.
    498    This is necessary if we put the memory of space of BLOC
    499    before that of BEFORE.  */
    500 
    501 static void
    502 reorder_bloc (bloc, before)
    503      bloc_ptr bloc, before;
    504 {
    505   bloc_ptr prev, next;
    506 
    507   /* Splice BLOC out from where it is.  */
    508   prev = bloc->prev;
    509   next = bloc->next;
    510 
    511   if (prev)
    512     prev->next = next;
    513   if (next)
    514     next->prev = prev;
    515 
    516   /* Splice it in before BEFORE.  */
    517   prev = before->prev;
    518 
    519   if (prev)
    520     prev->next = bloc;
    521   bloc->prev = prev;
    522 
    523   before->prev = bloc;
    524   bloc->next = before;
    525 }
    526 
    527 /* Update the records of which heaps contain which blocs, starting
    529    with heap HEAP and bloc BLOC.  */
    530 
    531 static void
    532 update_heap_bloc_correspondence (bloc, heap)
    533      bloc_ptr bloc;
    534      heap_ptr heap;
    535 {
    536   register bloc_ptr b;
    537 
    538   /* Initialize HEAP's status to reflect blocs before BLOC.  */
    539   if (bloc != NIL_BLOC && bloc->prev != NIL_BLOC && bloc->prev->heap == heap)
    540     {
    541       /* The previous bloc is in HEAP.  */
    542       heap->last_bloc = bloc->prev;
    543       heap->free = bloc->prev->data + bloc->prev->size;
    544     }
    545   else
    546     {
    547       /* HEAP contains no blocs before BLOC.  */
    548       heap->first_bloc = NIL_BLOC;
    549       heap->last_bloc = NIL_BLOC;
    550       heap->free = heap->bloc_start;
    551     }
    552 
    553   /* Advance through blocs one by one.  */
    554   for (b = bloc; b != NIL_BLOC; b = b->next)
    555     {
    556       /* Advance through heaps, marking them empty,
    557 	 till we get to the one that B is in.  */
    558       while (heap)
    559 	{
    560 	  if (heap->bloc_start <= b->data && b->data <= heap->end)
    561 	    break;
    562 	  heap = heap->next;
    563 	  /* We know HEAP is not null now,
    564 	     because there has to be space for bloc B.  */
    565 	  heap->first_bloc = NIL_BLOC;
    566 	  heap->last_bloc = NIL_BLOC;
    567 	  heap->free = heap->bloc_start;
    568 	}
    569 
    570       /* Update HEAP's status for bloc B.  */
    571       heap->free = b->data + b->size;
    572       heap->last_bloc = b;
    573       if (heap->first_bloc == NIL_BLOC)
    574 	heap->first_bloc = b;
    575 
    576       /* Record that B is in HEAP.  */
    577       b->heap = heap;
    578     }
    579 
    580   /* If there are any remaining heaps and no blocs left,
    581      mark those heaps as empty.  */
    582   heap = heap->next;
    583   while (heap)
    584     {
    585       heap->first_bloc = NIL_BLOC;
    586       heap->last_bloc = NIL_BLOC;
    587       heap->free = heap->bloc_start;
    588       heap = heap->next;
    589     }
    590 }
    591 
    592 /* Resize BLOC to SIZE bytes.  This relocates the blocs
    594    that come after BLOC in memory.  */
    595 
    596 static int
    597 resize_bloc (bloc, size)
    598     bloc_ptr bloc;
    599     SIZE size;
    600 {
    601   register bloc_ptr b;
    602   heap_ptr heap;
    603   POINTER address;
    604   SIZE old_size;
    605 
    606   if (bloc == NIL_BLOC || size == bloc->size)
    607     return 1;
    608 
    609   for (heap = first_heap; heap != NIL_HEAP; heap = heap->next)
    610     {
    611       if (heap->bloc_start <= bloc->data && bloc->data <= heap->end)
    612 	break;
    613     }
    614 
    615   if (heap == NIL_HEAP)
    616     abort ();
    617 
    618   old_size = bloc->size;
    619   bloc->size = size;
    620 
    621   /* Note that bloc could be moved into the previous heap.  */
    622   address = (bloc->prev ? bloc->prev->data + bloc->prev->size
    623 	     : first_heap->bloc_start);
    624   while (heap)
    625     {
    626       if (heap->bloc_start <= address && address <= heap->end)
    627 	break;
    628       heap = heap->prev;
    629     }
    630 
    631   if (! relocate_blocs (bloc, heap, address))
    632     {
    633       bloc->size = old_size;
    634       return 0;
    635     }
    636 
    637   if (size > old_size)
    638     {
    639       for (b = last_bloc; b != bloc; b = b->prev)
    640 	{
    641 	  safe_bcopy (b->data, b->new_data, b->size);
    642 	  *b->variable = b->data = b->new_data;
    643 	}
    644       safe_bcopy (bloc->data, bloc->new_data, old_size);
    645       bzero (bloc->new_data + old_size, size - old_size);
    646       *bloc->variable = bloc->data = bloc->new_data;
    647     }
    648   else
    649     {
    650       for (b = bloc; b != NIL_BLOC; b = b->next)
    651 	{
    652 	  safe_bcopy (b->data, b->new_data, b->size);
    653 	  *b->variable = b->data = b->new_data;
    654 	}
    655     }
    656 
    657   update_heap_bloc_correspondence (bloc, heap);
    658 
    659   break_value = (last_bloc ? last_bloc->data + last_bloc->size
    660 		 : first_heap->bloc_start);
    661   return 1;
    662 }
    663 
    664 /* Free BLOC from the chain of blocs, relocating any blocs above it.
    666    This may return space to the system.  */
    667 
    668 static void
    669 free_bloc (bloc)
    670      bloc_ptr bloc;
    671 {
    672   heap_ptr heap = bloc->heap;
    673 
    674   resize_bloc (bloc, 0);
    675 
    676   if (bloc == first_bloc && bloc == last_bloc)
    677     {
    678       first_bloc = last_bloc = NIL_BLOC;
    679     }
    680   else if (bloc == last_bloc)
    681     {
    682       last_bloc = bloc->prev;
    683       last_bloc->next = NIL_BLOC;
    684     }
    685   else if (bloc == first_bloc)
    686     {
    687       first_bloc = bloc->next;
    688       first_bloc->prev = NIL_BLOC;
    689     }
    690   else
    691     {
    692       bloc->next->prev = bloc->prev;
    693       bloc->prev->next = bloc->next;
    694     }
    695 
    696   /* Update the records of which blocs are in HEAP.  */
    697   if (heap->first_bloc == bloc)
    698     {
    699       if (bloc->next->heap == heap)
    700 	heap->first_bloc = bloc->next;
    701       else
    702 	heap->first_bloc = heap->last_bloc = NIL_BLOC;
    703     }
    704   if (heap->last_bloc == bloc)
    705     {
    706       if (bloc->prev->heap == heap)
    707 	heap->last_bloc = bloc->prev;
    708       else
    709 	heap->first_bloc = heap->last_bloc = NIL_BLOC;
    710     }
    711 
    712   relinquish ();
    713   free (bloc);
    714 }
    715 
    716 /* Interface routines.  */
    718 
    719 static int use_relocatable_buffers;
    720 static int r_alloc_freeze_level;
    721 
    722 /* Obtain SIZE bytes of storage from the free pool, or the system, as
    723    necessary.  If relocatable blocs are in use, this means relocating
    724    them.  This function gets plugged into the GNU malloc's __morecore
    725    hook.
    726 
    727    We provide hysteresis, never relocating by less than extra_bytes.
    728 
    729    If we're out of memory, we should return zero, to imitate the other
    730    __morecore hook values - in particular, __default_morecore in the
    731    GNU malloc package.  */
    732 
    733 POINTER
    734 r_alloc_sbrk (size)
    735      long size;
    736 {
    737   register bloc_ptr b;
    738   POINTER address;
    739 
    740   if (! use_relocatable_buffers)
    741     return (*real_morecore) (size);
    742 
    743   if (size == 0)
    744     return virtual_break_value;
    745 
    746   if (size > 0)
    747     {
    748       /* Allocate a page-aligned space.  GNU malloc would reclaim an
    749 	 extra space if we passed an unaligned one.  But we could
    750 	 not always find a space which is contiguos to the previous.  */
    751       POINTER new_bloc_start;
    752       heap_ptr h = first_heap;
    753       SIZE get = ROUNDUP (size);
    754 
    755       address = (POINTER) ROUNDUP (virtual_break_value);
    756 
    757       /* Search the list upward for a heap which is large enough.  */
    758       while ((char *) h->end < (char *) MEM_ROUNDUP ((char *)address + get))
    759 	{
    760 	  h = h->next;
    761 	  if (h == NIL_HEAP)
    762 	    break;
    763 	  address = (POINTER) ROUNDUP (h->start);
    764 	}
    765 
    766       /* If not found, obtain more space.  */
    767       if (h == NIL_HEAP)
    768 	{
    769 	  get += extra_bytes + page_size;
    770 
    771 	  if (r_alloc_freeze_level > 0 || ! obtain (address, get))
    772 	    return 0;
    773 
    774 	  if (first_heap == last_heap)
    775 	    address = (POINTER) ROUNDUP (virtual_break_value);
    776 	  else
    777 	    address = (POINTER) ROUNDUP (last_heap->start);
    778 	  h = last_heap;
    779 	}
    780 
    781       new_bloc_start = (POINTER) MEM_ROUNDUP ((char *)address + get);
    782 
    783       if (first_heap->bloc_start < new_bloc_start)
    784 	{
    785 	  /* Move all blocs upward.  */
    786 	  if (r_alloc_freeze_level > 0
    787 	      || ! relocate_blocs (first_bloc, h, new_bloc_start))
    788 	    return 0;
    789 
    790 	  /* Note that (POINTER)(h+1) <= new_bloc_start since
    791 	     get >= page_size, so the following does not destroy the heap
    792 	     header.  */
    793 	  for (b = last_bloc; b != NIL_BLOC; b = b->prev)
    794 	    {
    795 	      safe_bcopy (b->data, b->new_data, b->size);
    796 	      *b->variable = b->data = b->new_data;
    797 	    }
    798 
    799 	  h->bloc_start = new_bloc_start;
    800 
    801 	  update_heap_bloc_correspondence (first_bloc, h);
    802 	}
    803 
    804       if (h != first_heap)
    805 	{
    806 	  /* Give up managing heaps below the one the new
    807 	     virtual_break_value points to.  */
    808 	  first_heap->prev = NIL_HEAP;
    809 	  first_heap->next = h->next;
    810 	  first_heap->start = h->start;
    811 	  first_heap->end = h->end;
    812 	  first_heap->free = h->free;
    813 	  first_heap->first_bloc = h->first_bloc;
    814 	  first_heap->last_bloc = h->last_bloc;
    815 	  first_heap->bloc_start = h->bloc_start;
    816 
    817 	  if (first_heap->next)
    818 	    first_heap->next->prev = first_heap;
    819 	  else
    820 	    last_heap = first_heap;
    821 	}
    822 
    823       bzero (address, size);
    824     }
    825   else /* size < 0 */
    826     {
    827       SIZE excess = (char *)first_heap->bloc_start
    828 		      - ((char *)virtual_break_value + size);
    829 
    830       address = virtual_break_value;
    831 
    832       if (r_alloc_freeze_level == 0 && excess > 2 * extra_bytes)
    833 	{
    834 	  excess -= extra_bytes;
    835 	  first_heap->bloc_start
    836 	    = (POINTER) MEM_ROUNDUP ((char *)first_heap->bloc_start - excess);
    837 
    838 	  relocate_blocs (first_bloc, first_heap, first_heap->bloc_start);
    839 
    840 	  for (b = first_bloc; b != NIL_BLOC; b = b->next)
    841 	    {
    842 	      safe_bcopy (b->data, b->new_data, b->size);
    843 	      *b->variable = b->data = b->new_data;
    844 	    }
    845 	}
    846 
    847       if ((char *)virtual_break_value + size < (char *)first_heap->start)
    848 	{
    849 	  /* We found an additional space below the first heap */
    850 	  first_heap->start = (POINTER) ((char *)virtual_break_value + size);
    851 	}
    852     }
    853 
    854   virtual_break_value = (POINTER) ((char *)address + size);
    855   break_value = (last_bloc
    856 		 ? last_bloc->data + last_bloc->size
    857 		 : first_heap->bloc_start);
    858   if (size < 0)
    859     relinquish ();
    860 
    861   return address;
    862 }
    863 
    864 /* Allocate a relocatable bloc of storage of size SIZE.  A pointer to
    865    the data is returned in *PTR.  PTR is thus the address of some variable
    866    which will use the data area.
    867 
    868    If we can't allocate the necessary memory, set *PTR to zero, and
    869    return zero.  */
    870 
    871 POINTER
    872 r_alloc (ptr, size)
    873      POINTER *ptr;
    874      SIZE size;
    875 {
    876   register bloc_ptr new_bloc;
    877 
    878   if (! r_alloc_initialized)
    879     r_alloc_init ();
    880 
    881   new_bloc = get_bloc (MEM_ROUNDUP (size));
    882   if (new_bloc)
    883     {
    884       new_bloc->variable = ptr;
    885       *ptr = new_bloc->data;
    886     }
    887   else
    888     *ptr = 0;
    889 
    890   return *ptr;
    891 }
    892 
    893 /* Free a bloc of relocatable storage whose data is pointed to by PTR.
    894    Store 0 in *PTR to show there's no block allocated.  */
    895 
    896 void
    897 r_alloc_free (ptr)
    898      register POINTER *ptr;
    899 {
    900   register bloc_ptr dead_bloc;
    901 
    902   dead_bloc = find_bloc (ptr);
    903   if (dead_bloc == NIL_BLOC)
    904     abort ();
    905 
    906   free_bloc (dead_bloc);
    907   *ptr = 0;
    908 }
    909 
    910 /* Given a pointer at address PTR to relocatable data, resize it to SIZE.
    911    Do this by shifting all blocks above this one up in memory, unless
    912    SIZE is less than or equal to the current bloc size, in which case
    913    do nothing.
    914 
    915    Change *PTR to reflect the new bloc, and return this value.
    916 
    917    If more memory cannot be allocated, then leave *PTR unchanged, and
    918    return zero.  */
    919 
    920 POINTER
    921 r_re_alloc (ptr, size)
    922      POINTER *ptr;
    923      SIZE size;
    924 {
    925   register bloc_ptr bloc;
    926 
    927   bloc = find_bloc (ptr);
    928   if (bloc == NIL_BLOC)
    929     abort ();
    930 
    931   if (size <= bloc->size)
    932     /* Wouldn't it be useful to actually resize the bloc here?  */
    933     return *ptr;
    934 
    935   if (! resize_bloc (bloc, MEM_ROUNDUP (size)))
    936     return 0;
    937 
    938   return *ptr;
    939 }
    940 
    941 /* Disable relocations, after making room for at least SIZE bytes
    942    of non-relocatable heap if possible.  The relocatable blocs are
    943    guaranteed to hold still until thawed, even if this means that
    944    malloc must return a null pointer.  */
    945 
    946 void
    947 r_alloc_freeze (size)
    948      long size;
    949 {
    950   /* If already frozen, we can't make any more room, so don't try.  */
    951   if (r_alloc_freeze_level > 0)
    952     size = 0;
    953   /* If we can't get the amount requested, half is better than nothing.  */
    954   while (size > 0 && r_alloc_sbrk (size) == 0)
    955     size /= 2;
    956   ++r_alloc_freeze_level;
    957   if (size > 0)
    958     r_alloc_sbrk (-size);
    959 }
    960 
    961 void
    962 r_alloc_thaw ()
    963 {
    964   if (--r_alloc_freeze_level < 0)
    965     abort ();
    966 }
    967 
    968 /* The hook `malloc' uses for the function which gets more space
    970    from the system.  */
    971 extern POINTER (*__morecore) ();
    972 
    973 /* Initialize various things for memory allocation.  */
    974 
    975 static void
    976 r_alloc_init ()
    977 {
    978   if (r_alloc_initialized)
    979     return;
    980 
    981   r_alloc_initialized = 1;
    982   real_morecore = __morecore;
    983   __morecore = r_alloc_sbrk;
    984 
    985   first_heap = last_heap = &heap_base;
    986   first_heap->next = first_heap->prev = NIL_HEAP;
    987   first_heap->start = first_heap->bloc_start
    988     = virtual_break_value = break_value = (*real_morecore) (0);
    989   if (break_value == NIL)
    990     abort ();
    991 
    992   page_size = PAGE;
    993   extra_bytes = ROUNDUP (50000);
    994 
    995   first_heap->end = (POINTER) ROUNDUP (first_heap->start);
    996 
    997   /* The extra call to real_morecore guarantees that the end of the
    998      address space is a multiple of page_size, even if page_size is
    999      not really the page size of the system running the binary in
   1000      which page_size is stored.  This allows a binary to be built on a
   1001      system with one page size and run on a system with a smaller page
   1002      size.  */
   1003   (*real_morecore) (first_heap->end - first_heap->start);
   1004 
   1005   /* Clear the rest of the last page; this memory is in our address space
   1006      even though it is after the sbrk value.  */
   1007   /* Doubly true, with the additional call that explicitly adds the
   1008      rest of that page to the address space.  */
   1009   bzero (first_heap->start, first_heap->end - first_heap->start);
   1010   virtual_break_value = break_value = first_heap->bloc_start = first_heap->end;
   1011   use_relocatable_buffers = 1;
   1012 }
   1013 #ifdef DEBUG
   1014 #include <assert.h>
   1015 
   1016 void
   1017 r_alloc_check ()
   1018 {
   1019     int found = 0;
   1020     heap_ptr h, ph = 0;
   1021     bloc_ptr b, pb = 0;
   1022 
   1023     if (!r_alloc_initialized)
   1024       return;
   1025 
   1026     assert (first_heap);
   1027     assert (last_heap->end <= (POINTER) sbrk (0));
   1028     assert ((POINTER) first_heap < first_heap->start);
   1029     assert (first_heap->start <= virtual_break_value);
   1030     assert (virtual_break_value <= first_heap->end);
   1031 
   1032     for (h = first_heap; h; h = h->next)
   1033       {
   1034 	assert (h->prev == ph);
   1035 	assert ((POINTER) ROUNDUP (h->end) == h->end);
   1036 	assert ((POINTER) MEM_ROUNDUP (h->start) == h->start);
   1037 	assert ((POINTER) MEM_ROUNDUP (h->bloc_start) == h->bloc_start);
   1038 	assert (h->start <= h->bloc_start && h->bloc_start <= h->end);
   1039 
   1040 	if (ph)
   1041 	  {
   1042 	    assert (ph->end < h->start);
   1043 	    assert (h->start <= (POINTER)h && (POINTER)(h+1) <= h->bloc_start);
   1044 	  }
   1045 
   1046 	if (h->bloc_start <= break_value && break_value <= h->end)
   1047 	    found = 1;
   1048 
   1049 	ph = h;
   1050       }
   1051 
   1052     assert (found);
   1053     assert (last_heap == ph);
   1054 
   1055     for (b = first_bloc; b; b = b->next)
   1056       {
   1057 	assert (b->prev == pb);
   1058 	assert ((POINTER) MEM_ROUNDUP (b->data) == b->data);
   1059 	assert ((SIZE) MEM_ROUNDUP (b->size) == b->size);
   1060 
   1061 	ph = 0;
   1062 	for (h = first_heap; h; h = h->next)
   1063 	  {
   1064 	    if (h->bloc_start <= b->data && b->data + b->size <= h->end)
   1065 		break;
   1066 	    ph = h;
   1067 	  }
   1068 
   1069 	assert (h);
   1070 
   1071 	if (pb && pb->data + pb->size != b->data)
   1072 	  {
   1073 	    assert (ph && b->data == h->bloc_start);
   1074 	    while (ph)
   1075 	      {
   1076 		if (ph->bloc_start <= pb->data
   1077 		    && pb->data + pb->size <= ph->end)
   1078 		  {
   1079 		    assert (pb->data + pb->size + b->size > ph->end);
   1080 		    break;
   1081 		  }
   1082 		else
   1083 		  {
   1084 		    assert (ph->bloc_start + b->size > ph->end);
   1085 		  }
   1086 		ph = ph->prev;
   1087 	      }
   1088 	  }
   1089 	pb = b;
   1090       }
   1091 
   1092     assert (last_bloc == pb);
   1093 
   1094     if (last_bloc)
   1095 	assert (last_bloc->data + last_bloc->size == break_value);
   1096     else
   1097 	assert (first_heap->bloc_start == break_value);
   1098 }
   1099 #endif /* DEBUG */
   1100