#define PAGE_GROUP_SIZE 32 #define K 4 #ifndef USE_GC_NONE extern const uintptr_t GC_page_size; #endif ti_lock_t _freepages_lock; #define LOCK_PAGEALLOC() ti_lock(&_freepages_lock) #define UNLOCK_PAGEALLOC() ti_unlock(&_freepages_lock) #ifdef MEMORY_SHARED ti_region_threadinfo_t *_ti_region_threadinfo = NULL; #else ti_region_threadinfo_t _ti_region_threadinfo; #endif #ifdef REGIONPROFILE static ti_lock_t rplock; #define RPLOCK ti_lock(&rplock) #define RPUNLOCK ti_unlock(&rplock) jIntPointer totalbytes, maxbytes; jIntPointer totalregions, maxregions; #endif jIntPointer allocation_pagesize; #ifdef ALIGN #undef ALIGN #endif #define ALIGN(x, n) (((x) + ((n) - 1)) & ~((n) - 1)) #define RALIGNMENT 8 #include "ti-gc.h" #include "regions.h" #include #include /* Doubly linked region list management */ void addregion(Region r) /* Effects: Adds p to the front of doubly-linked region list */ { ti_region_threadinfo_t *info = ti_myregioninfo(); r->previous = NULL; r->next = info->allregions; if (info->allregions) info->allregions->previous = r; info->allregions = r; } void remregion(Region r) /* Effects: Removes r from the doubly-linked region list */ { if (r->previous) r->previous->next = r->next; else ti_myregioninfo()->allregions = r->next; if (r->next) r->next->previous = r->previous; } #if 1 void clear(void *start, jIntPointer size) { long *clear, *clearend; clear = (long *)start; clearend = (long *)((char *)start + size); do *clear++ = 0; while (clear < clearend) ; } #else #define clear(start, size) memset(start, 0, size) #endif Region newregion(void) { char *first = (char *)alloc_single_page(NULL); Region r; ti_region_threadinfo_t *info = ti_myregioninfo(); #ifdef REGIONPROFILE RPLOCK; totalregions++; if (totalregions > maxregions) maxregions = totalregions; RPUNLOCK; #endif /* stagger regions across cache lines a bit */ info->rstart += 64; if (info->rstart > RPAGESIZE / K) info->rstart = 0; r = (Region)(first + info->rstart + offsetof(struct page, previous)); clear(r, sizeof *r); return r; } void initregion(Region r) { ti_region_threadinfo_t *info = ti_myregioninfo(); char *first = (char *)r - info->rstart - offsetof(struct page, previous); int fnb = PAGENB(first); /* Start using page with region header as a pointer-containing page */ r->normal.page.base = first; /* XXX: Need extra ALIGNs ? */ r->normal.page.allocfrom = ALIGN(info->rstart + offsetof(struct page, previous) + sizeof(*r), RALIGNMENT); /* Guarantee failure for all other blocks */ r->normal.superpage.allocfrom = K * RPAGESIZE + 1; r->normal.hyperpage.allocfrom = K * K * RPAGESIZE + 1; r->atomic.page.allocfrom = RPAGESIZE + 1; r->atomic.superpage.allocfrom = K * RPAGESIZE + 1; r->atomic.hyperpage.allocfrom = K * K * RPAGESIZE + 1; /* Remember that r owns this page. */ r->normal.pages = (struct page *)first; set_region(r->normal.pages, 1, r); /* Add to list of all regions */ addregion(r); } void *_ralloc(Region r, size_t size) { char *mem; if (!r) return ti_malloc(size); #ifdef REGIONPROFILE RPLOCK; totalbytes += size; if (totalbytes > maxbytes) maxbytes = totalbytes; RPUNLOCK; r->bytes += size; #endif size = ALIGN(size, RALIGNMENT); mem = (char *)qalloc(r, &r->normal, size); clear(mem, size); return mem; } const void *_ralloc_atomic(Region r, size_t size) { if (!r) return ti_malloc_atomic(size); else { #ifdef REGIONPROFILE RPLOCK; totalbytes += size; if (totalbytes > maxbytes) maxbytes = totalbytes; RPUNLOCK; r->bytes += size; #endif return qalloc(r, &r->atomic, ALIGN(size, RALIGNMENT)); } } #if 0 static void delpage(char *deleting, char *end) { while (deleting < end) { cleanup_t cln = GET_CLEANUP_FN(deleting); size_t s; if (!cln) break; s = cln(deleting); deleting += ALIGN(s, RALIGNMENT); } } #endif static void delregion(Region r) { char *p; unsigned long next, pnb; ti_trace_free(Region.delete); #ifdef REGIONPROFILE RPLOCK; totalbytes -= r->bytes; totalregions--; RPUNLOCK; #endif #if 0 /* Mark end of last page */ if (h->normal.allocfrom < RPAGESIZE) *(void **)((char *)h->normal.lastpage + h->normal.allocfrom) = NULL; /* Scan normal pages */ /* First page (last in list) is special (region object there) */ for (pnb = PAGENB(h->normal.lastpage); next = nextmap[pnb]; pnb = next) { p = PAGEADDR(pnb); delpage(p, p + RPAGESIZE); } /* We skip region on first page */ p = PAGEADDR(pnb); delpage((char *)(h + 1), p + RPAGESIZE); #endif /* Remove r from region list */ remregion(r); LOCK_PAGEALLOC(); free_all_pages(r, &r->atomic); free_all_pages(r, &r->normal); UNLOCK_PAGEALLOC(); } int deleteregion(Region hp) { delregion(hp); return 1; } #ifdef BACKEND_UNIPROC /* dummy region for NULL and GC addresses. It has id 0 */ struct T6Region4lang2ti zeroregion; #endif void m4initmT6Region4lang2ti() { /* Dummy fn so that this file gets included. It used to contain what became region_init... */ } #ifdef REGIONPROFILE static void print_memory_usage(void) { Region r; if (MYBOXPROC != 0) return; RPLOCK; fprintf(stderr, "%i: Current region usage %lu bytes, max %lu bytes\n", MYPROC, totalbytes, maxbytes); fprintf(stderr, "%i: Current regions %lu, max %lu\n", MYPROC, totalregions, maxregions); { #if defined(MEMORY_SHARED) /* Assumes all threads are stopped */ int i; for (i = 0; i < MYBOXPROCS; i++) for (r = ti_hisregioninfo(i)->allregions; r; r = r->next) #else for (r = ti_myregioninfo()->allregions; r; r = r->next) #endif fprintf(stderr, "region %x %d used\n", r, r->bytes); } RPUNLOCK; } #endif void *bounded_sub(void *p, IntPtr n) { if ((IntPtr)p < n) return 0; else return (void *)((IntPtr)p - n); } void *bounded_add(void *p, IntPtr n) { if ((IntPtr)p > (IntPtr)-n) return 0; else return (void *)((IntPtr)p + n); } #ifndef USE_GC_NONE extern void GC_setpagesize(); #endif void region_init(void) { int dummy_local; static int dummy_global; void *start, *end; int i; if (MYBOXPROC != 0) { /* only one thread per box runs this code */ local_barrier(); return; } #ifndef USE_GC_NONE GC_setpagesize(); allocation_pagesize = GC_page_size; #endif if (RPAGESIZE > allocation_pagesize) allocation_pagesize = RPAGESIZE; /* Our minimum allocation chunk is K pages. We want to be sure that this is a multiple of the minimum allocation size (if it isn't, either K or RPAGESIZE should be increased). Ideally, RPAGESIZE <= GC_page_size <= K * RPAGESIZE (otherwise we may waste some memory because all allocation requests are rounded to allocation_pagesize) */ assert(K * RPAGESIZE >= allocation_pagesize); /* CM: No longer needed. The page table region map is set to UNK_REGIONID as the default now. */ #if 0 /* Initialise stack & static data page -> RegionId tables to contain UNK_REGIONID. This is somewhat hackish: - we assume that the stack will range from -64 megs to +1 megs from it's current position - we assume all static data is found within one meg of dummy_global Correct solutions will be very machine specific */ set_region_range(bounded_sub(&dummy_local, 64 * 1024 * 1024), bounded_add(&dummy_local, 1 * 1024 * 1024), UNK_REGIONID); set_region_range(bounded_sub(&dummy_global, 1 * 1024 * 1024), bounded_add(&dummy_global, 1 * 1024 * 1024), UNK_REGIONID); /* null */ set_region_range(0, (void *)1, UNK_REGIONID); #endif ti_initregioninfo(); init_pages(); if (ti_lock_init(&_freepages_lock) < 0) { perror("ti_mutex_init"); exit(1); } #ifdef REGIONPROFILE if (ti_lock_init(&rplock) < 0) { perror("ti_mutex_init(rp)"); exit(1); } #endif #ifdef BACKEND_UNIPROC zeroregion.id = 1; /* Shared ! */ #endif #ifdef REGIONPROFILE atexit(print_memory_usage); #endif /* Initialize the region map or table */ #ifndef LARGE_ADDRESSES for (i = 0; i < MAXPAGE; i++) { __regionmap[i] = UNK_REGIONID; } #else /* !LARGE_ADDRESSES */ for (i = 0; i < (1 << MEMSLICE2); i++) { __regiontable[i] = (RegionId *) NULL; } #endif /* !LARGE_ADDRESSES */ local_barrier(); /* matches barrier by other threads above */ } /* GC support. Mark all non-atomic pages */ void (*ti_gc_other_roots_pusher)(); void ti_push_region_roots(void) { Region r; #if defined(MEMORY_SHARED) /* Assumes all threads are stopped */ int i; for (i = 0; i < MYBOXPROCS; i++) for (r = ti_hisregioninfo(i)->allregions; r; r = r->next) #else for (r = ti_myregioninfo()->allregions; r; r = r->next) #endif { struct page *p; for (p = r->normal.pages; p; p = p->next) ti_region_push_all(p, (char *)p + RPAGESIZE); for (p = r->normal.bigpages; p; p = p->next) ti_region_push_all(p, (char *)p + PAGE_PAGECOUNT(p) * RPAGESIZE); } if (ti_gc_other_roots_pusher) ti_gc_other_roots_pusher(); } void sharingViolation(const char *where) { fprintf(stderr, "fatal error on processor %i in %s:\n" "Attempt to assign a reference to a private object into a shared or GC object\n", MYPROC, where); fflush(stderr); abort(); }