Skip to content

Commit

Permalink
Move the scan/copy fast functions to the nursery collectors.
Browse files Browse the repository at this point in the history
The minor copy/scan functions used to be specialized based on the
major collector. It's better to do so based on the nursery collector
since we expect that more objects are internally promoted than moved
to the major heap.
  • Loading branch information
kumpera committed Apr 9, 2012
1 parent d7a20d4 commit afe9dfd
Show file tree
Hide file tree
Showing 12 changed files with 496 additions and 385 deletions.
3 changes: 3 additions & 0 deletions mono/metadata/Makefile.am
Original file line number Diff line number Diff line change
Expand Up @@ -220,7 +220,10 @@ libmonoruntime_la_SOURCES = \
sgen-gray.c \
sgen-gray.h \
sgen-major-copy-object.h \
sgen-minor-copy-object.h \
sgen-copy-object.h \
sgen-major-scan-object.h \
sgen-minor-scan-object.h \
sgen-protocol.h \
sgen-scan-object.h \
sgen-nursery-allocator.c \
Expand Down
258 changes: 258 additions & 0 deletions mono/metadata/sgen-copy-object.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,258 @@
/*
* Copyright 2001-2003 Ximian, Inc
* Copyright 2003-2010 Novell, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
extern long long stat_copy_object_called_nursery;
extern long long stat_objects_copied_nursery;

extern long long stat_nursery_copy_object_failed_from_space;
extern long long stat_nursery_copy_object_failed_forwarded;
extern long long stat_nursery_copy_object_failed_pinned;

extern long long stat_slots_allocated_in_vain;

/*
* This function can be used even if the vtable of obj is not valid
* anymore, which is the case in the parallel collector.
*/
static void
par_copy_object_no_checks (char *destination, MonoVTable *vt, void *obj, mword objsize, SgenGrayQueue *queue)
{
static const void *copy_labels [] = { &&LAB_0, &&LAB_1, &&LAB_2, &&LAB_3, &&LAB_4, &&LAB_5, &&LAB_6, &&LAB_7, &&LAB_8 };

DEBUG (9, g_assert (vt->klass->inited));
DEBUG (9, fprintf (gc_debug_file, " (to %p, %s size: %lu)\n", destination, ((MonoObject*)obj)->vtable->klass->name, (unsigned long)objsize));
binary_protocol_copy (obj, destination, vt, objsize);

if (objsize <= sizeof (gpointer) * 8) {
mword *dest = (mword*)destination;
goto *copy_labels [objsize / sizeof (gpointer)];
LAB_8:
(dest) [7] = ((mword*)obj) [7];
LAB_7:
(dest) [6] = ((mword*)obj) [6];
LAB_6:
(dest) [5] = ((mword*)obj) [5];
LAB_5:
(dest) [4] = ((mword*)obj) [4];
LAB_4:
(dest) [3] = ((mword*)obj) [3];
LAB_3:
(dest) [2] = ((mword*)obj) [2];
LAB_2:
(dest) [1] = ((mword*)obj) [1];
LAB_1:
;
LAB_0:
;
} else {
/*can't trust memcpy doing word copies */
mono_gc_memmove (destination + sizeof (mword), (char*)obj + sizeof (mword), objsize - sizeof (mword));
}
/* adjust array->bounds */
DEBUG (9, g_assert (vt->gc_descr));
if (G_UNLIKELY (vt->rank && ((MonoArray*)obj)->bounds)) {
MonoArray *array = (MonoArray*)destination;
array->bounds = (MonoArrayBounds*)((char*)destination + ((char*)((MonoArray*)obj)->bounds - (char*)obj));
DEBUG (9, fprintf (gc_debug_file, "Array instance %p: size: %lu, rank: %d, length: %lu\n", array, (unsigned long)objsize, vt->rank, (unsigned long)mono_array_length (array)));
}
if (G_UNLIKELY (mono_profiler_events & MONO_PROFILE_GC_MOVES))
sgen_register_moved_object (obj, destination);
obj = destination;
if (queue) {
DEBUG (9, fprintf (gc_debug_file, "Enqueuing gray object %p (%s)\n", obj, sgen_safe_name (obj)));
GRAY_OBJECT_ENQUEUE (queue, obj);
}
}

static void*
copy_object_no_checks (void *obj, SgenGrayQueue *queue)
{
MonoVTable *vt = ((MonoObject*)obj)->vtable;
gboolean has_references = SGEN_VTABLE_HAS_REFERENCES (vt);
mword objsize = SGEN_ALIGN_UP (sgen_par_object_get_size (vt, (MonoObject*)obj));
char *destination = collector_serial_alloc_for_promotion (obj, objsize, has_references);

if (G_UNLIKELY (!destination)) {
collector_pin_object (obj, queue);
sgen_set_pinned_from_failed_allocation (objsize);
return obj;
}

*(MonoVTable**)destination = vt;
par_copy_object_no_checks (destination, vt, obj, objsize, has_references ? queue : NULL);

/* set the forwarding pointer */
SGEN_FORWARD_OBJECT (obj, destination);

return destination;
}

#ifdef GENERATE_COPY_FUNCTIONS

/*
* This is how the copying happens from the nursery to the old generation.
* We assume that at this time all the pinned objects have been identified and
* marked as such.
* We run scan_object() for each pinned object so that each referenced
* objects if possible are copied. The new gray objects created can have
* scan_object() run on them right away, too.
* Then we run copy_object() for the precisely tracked roots. At this point
* all the roots are either gray or black. We run scan_object() on the gray
* objects until no more gray objects are created.
* At the end of the process we walk again the pinned list and we unmark
* the pinned flag. As we go we also create the list of free space for use
* in the next allocation runs.
*
* We need to remember objects from the old generation that point to the new one
* (or just addresses?).
*
* copy_object could be made into a macro once debugged (use inline for now).
*/

static void
serial_copy_object (void **obj_slot, SgenGrayQueue *queue)
{
char *forwarded;
char *obj = *obj_slot;

DEBUG (9, g_assert (current_collection_generation == GENERATION_NURSERY));

HEAVY_STAT (++stat_copy_object_called_nursery);

if (!sgen_ptr_in_nursery (obj)) {
HEAVY_STAT (++stat_nursery_copy_object_failed_from_space);
return;
}

DEBUG (9, fprintf (gc_debug_file, "Precise copy of %p from %p", obj, obj_slot));

/*
* Before we can copy the object we must make sure that we are
* allowed to, i.e. that the object not pinned, not already
* forwarded or belongs to the nursery To Space.
*/

if ((forwarded = SGEN_OBJECT_IS_FORWARDED (obj))) {
DEBUG (9, g_assert ((*(MonoVTable**)SGEN_LOAD_VTABLE(obj))->gc_descr));
DEBUG (9, fprintf (gc_debug_file, " (already forwarded to %p)\n", forwarded));
HEAVY_STAT (++stat_nursery_copy_object_failed_forwarded);
*obj_slot = forwarded;
return;
}
if (SGEN_OBJECT_IS_PINNED (obj)) {
DEBUG (9, g_assert (((MonoVTable*)SGEN_LOAD_VTABLE(obj))->gc_descr));
DEBUG (9, fprintf (gc_debug_file, " (pinned, no change)\n"));
HEAVY_STAT (++stat_nursery_copy_object_failed_pinned);
return;
}

if (sgen_nursery_is_to_space (obj)) {
DEBUG (9, g_assert (((MonoVTable*)SGEN_LOAD_VTABLE(obj))->gc_descr));
DEBUG (9, fprintf (gc_debug_file, " (tospace, no change)\n"));
HEAVY_STAT (++stat_nursery_copy_object_failed_to_space);
return;
}

HEAVY_STAT (++stat_objects_copied_nursery);

*obj_slot = copy_object_no_checks (obj, queue);
}

static void
parallel_copy_object (void **obj_slot, SgenGrayQueue *queue)
{
char *obj = *obj_slot;
mword vtable_word, objsize;
MonoVTable *vt;
void *destination;
gboolean has_references;

DEBUG (9, g_assert (current_collection_generation == GENERATION_NURSERY));

HEAVY_STAT (++stat_copy_object_called_nursery);

if (!sgen_ptr_in_nursery (obj)) {
HEAVY_STAT (++stat_nursery_copy_object_failed_from_space);
return;
}

vtable_word = *(mword*)obj;
vt = (MonoVTable*)(vtable_word & ~SGEN_VTABLE_BITS_MASK);

/*
* Before we can copy the object we must make sure that we are
* allowed to, i.e. that the object not pinned, not already
* forwarded and not in the nursery To Space.
*/

if (vtable_word & SGEN_FORWARDED_BIT) {
HEAVY_STAT (++stat_nursery_copy_object_failed_forwarded);
*obj_slot = vt;
return;
}
if (vtable_word & SGEN_PINNED_BIT) {
HEAVY_STAT (++stat_nursery_copy_object_failed_pinned);
return;
}

if (sgen_nursery_is_to_space (obj)) {
HEAVY_STAT (++stat_nursery_copy_object_failed_to_space);
return;
}

HEAVY_STAT (++stat_objects_copied_nursery);

objsize = SGEN_ALIGN_UP (sgen_par_object_get_size (vt, (MonoObject*)obj));
has_references = SGEN_VTABLE_HAS_REFERENCES (vt);

destination = collector_parallel_alloc_for_promotion (obj, objsize, has_references);

if (G_UNLIKELY (!destination)) {
sgen_parallel_pin_or_update (obj_slot, obj, vt, queue);
return;
}

*(MonoVTable**)destination = vt;

if (SGEN_CAS_PTR ((void*)obj, (void*)((mword)destination | SGEN_FORWARDED_BIT), vt) == vt) {
par_copy_object_no_checks (destination, vt, obj, objsize, has_references ? queue : NULL);
obj = destination;
*obj_slot = obj;
} else {
/* FIXME: unify with code in major_copy_or_mark_object() */

/* FIXME: Give destination back to the allocator. */
*(void**)destination = NULL;

vtable_word = *(mword*)obj;
g_assert (vtable_word & SGEN_FORWARDED_BIT);

obj = (void*)(vtable_word & ~SGEN_VTABLE_BITS_MASK);

*obj_slot = obj;

HEAVY_STAT (++stat_slots_allocated_in_vain);
}
}

#endif
38 changes: 36 additions & 2 deletions mono/metadata/sgen-gc.c
Original file line number Diff line number Diff line change
Expand Up @@ -296,6 +296,8 @@ long long stat_objects_copied_major = 0;
long long stat_scan_object_called_nursery = 0;
long long stat_scan_object_called_major = 0;

long long stat_slots_allocated_in_vain;

long long stat_nursery_copy_object_failed_from_space = 0;
long long stat_nursery_copy_object_failed_forwarded = 0;
long long stat_nursery_copy_object_failed_pinned = 0;
Expand Down Expand Up @@ -1369,6 +1371,36 @@ sgen_pin_object (void *object, GrayQueue *queue)
binary_protocol_pin (object, (gpointer)LOAD_VTABLE (object), safe_object_get_size (object));
}

void
sgen_parallel_pin_or_update (void **ptr, void *obj, MonoVTable *vt, SgenGrayQueue *queue)
{
for (;;) {
mword vtable_word;
gboolean major_pinned = FALSE;

if (sgen_ptr_in_nursery (obj)) {
if (SGEN_CAS_PTR (obj, (void*)((mword)vt | SGEN_PINNED_BIT), vt) == vt) {
sgen_pin_object (obj, queue);
break;
}
} else {
major_collector.pin_major_object (obj, queue);
major_pinned = TRUE;
}

vtable_word = *(mword*)obj;
/*someone else forwarded it, update the pointer and bail out*/
if (vtable_word & SGEN_FORWARDED_BIT) {
*ptr = (void*)(vtable_word & ~SGEN_VTABLE_BITS_MASK);
break;
}

/*someone pinned it, nothing to do.*/
if (vtable_word & SGEN_PINNED_BIT || major_pinned)
break;
}
}

/* Sort the addresses in array in increasing order.
* Done using a by-the book heap sort. Which has decent and stable performance, is pretty cache efficient.
*/
Expand Down Expand Up @@ -2237,6 +2269,8 @@ init_stats (void)
mono_counters_register ("# scan_object() called (nursery)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_scan_object_called_nursery);
mono_counters_register ("# scan_object() called (major)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_scan_object_called_major);

mono_counters_register ("Slots allocated in vain", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_slots_allocated_in_vain);

mono_counters_register ("# nursery copy_object() failed from space", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_nursery_copy_object_failed_from_space);
mono_counters_register ("# nursery copy_object() failed forwarded", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_nursery_copy_object_failed_forwarded);
mono_counters_register ("# nursery copy_object() failed pinned", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_nursery_copy_object_failed_pinned);
Expand Down Expand Up @@ -2522,9 +2556,9 @@ collect_nursery (size_t requested_size)

current_collection_generation = GENERATION_NURSERY;
if (sgen_collection_is_parallel ())
current_object_ops = major_collector.par_minor_ops;
current_object_ops = sgen_minor_collector.parallel_ops;
else
current_object_ops = major_collector.minor_ops;
current_object_ops = sgen_minor_collector.serial_ops;

reset_pinned_from_failed_allocation ();

Expand Down
7 changes: 5 additions & 2 deletions mono/metadata/sgen-gc.h
Original file line number Diff line number Diff line change
Expand Up @@ -582,6 +582,9 @@ typedef struct {
char* (*alloc_for_promotion) (char *obj, size_t objsize, gboolean has_references);
char* (*par_alloc_for_promotion) (char *obj, size_t objsize, gboolean has_references);

SgenObjectOperations serial_ops;
SgenObjectOperations parallel_ops;

void (*prepare_to_space) (char *to_space_bitmap, int space_bitmap_size);
void (*clear_fragments) (void);
SgenFragment* (*build_fragments_get_exclude_head) (void);
Expand Down Expand Up @@ -619,8 +622,6 @@ struct _SgenMajorCollector {
void* (*alloc_degraded) (MonoVTable *vtable, size_t size);

SgenObjectOperations major_ops;
SgenObjectOperations minor_ops;
SgenObjectOperations par_minor_ops;

void* (*alloc_object) (int size, gboolean has_references);
void* (*par_alloc_object) (int size, gboolean has_references);
Expand All @@ -629,6 +630,7 @@ struct _SgenMajorCollector {
void (*free_non_pinned_object) (char *obj, size_t size);
void (*find_pin_queue_start_ends) (SgenGrayQueue *queue);
void (*pin_objects) (SgenGrayQueue *queue);
void (*pin_major_object) (char *obj, SgenGrayQueue *queue);
void (*scan_card_table) (SgenGrayQueue *queue);
void (*iterate_live_block_ranges) (sgen_cardtable_block_callback callback);
void (*init_to_space) (void);
Expand Down Expand Up @@ -787,6 +789,7 @@ enum {
gboolean sgen_try_alloc_space (mword size, int space) MONO_INTERNAL;
void sgen_release_space (mword size, int space) MONO_INTERNAL;
void sgen_pin_object (void *object, SgenGrayQueue *queue) MONO_INTERNAL;
void sgen_parallel_pin_or_update (void **ptr, void *obj, MonoVTable *vt, SgenGrayQueue *queue) MONO_INTERNAL;
void sgen_collect_major_no_lock (const char *reason) MONO_INTERNAL;
void sgen_collect_nursery_no_lock (size_t requested_size) MONO_INTERNAL;
void sgen_minor_collect_or_expand_inner (size_t size) MONO_INTERNAL;
Expand Down
Loading

0 comments on commit afe9dfd

Please sign in to comment.