@@ -1925,7 +1925,16 @@ uint8_t* gc_heap::pad_for_alignment_large (uint8_t* newAlloc, int requiredAlignm
1925
1925
#endif //BACKGROUND_GC && !USE_REGIONS
1926
1926
1927
1927
// This is always power of 2.
1928
+ #ifdef HOST_64BIT
1928
1929
const size_t min_segment_size_hard_limit = 1024*1024*16;
1930
+ #else //HOST_64BIT
1931
+ const size_t min_segment_size_hard_limit = 1024*1024*4;
1932
+ #endif //HOST_64BIT
1933
+
1934
+ #ifndef HOST_64BIT
1935
+ // Max size of heap hard limit (2^31) to be able to be aligned and rounded up on power of 2 and not overflow
1936
+ const size_t max_heap_hard_limit = (size_t)2 * (size_t)1024 * (size_t)1024 * (size_t)1024;
1937
+ #endif //!HOST_64BIT
1929
1938
1930
1939
inline
1931
1940
size_t align_on_segment_hard_limit (size_t add)
@@ -7336,9 +7345,6 @@ bool gc_heap::virtual_commit (void* address, size_t size, int bucket, int h_numb
7336
7345
*
7337
7346
* Note : We never commit into free directly, so bucket != recorded_committed_free_bucket
7338
7347
*/
7339
- #ifndef HOST_64BIT
7340
- assert (heap_hard_limit == 0);
7341
- #endif //!HOST_64BIT
7342
7348
7343
7349
assert(0 <= bucket && bucket < recorded_committed_bucket_counts);
7344
7350
assert(bucket < total_oh_count || h_number == -1);
@@ -7481,9 +7487,6 @@ bool gc_heap::virtual_decommit (void* address, size_t size, int bucket, int h_nu
7481
7487
* Case 2: This is for bookkeeping - the bucket will be recorded_committed_bookkeeping_bucket, and the h_number will be -1
7482
7488
* Case 3: This is for free - the bucket will be recorded_committed_free_bucket, and the h_number will be -1
7483
7489
*/
7484
- #ifndef HOST_64BIT
7485
- assert (heap_hard_limit == 0);
7486
- #endif //!HOST_64BIT
7487
7490
7488
7491
bool decommit_succeeded_p = ((bucket != recorded_committed_bookkeeping_bucket) && use_large_pages_p) ? true : GCToOSInterface::VirtualDecommit (address, size);
7489
7492
@@ -14301,6 +14304,11 @@ HRESULT gc_heap::initialize_gc (size_t soh_segment_size,
14301
14304
return E_OUTOFMEMORY;
14302
14305
if (use_large_pages_p)
14303
14306
{
14307
+ #ifndef HOST_64BIT
14308
+ // Large pages are not supported on 32bit
14309
+ assert (false);
14310
+ #endif //!HOST_64BIT
14311
+
14304
14312
if (heap_hard_limit_oh[soh])
14305
14313
{
14306
14314
heap_hard_limit_oh[soh] = soh_segment_size * number_of_heaps;
@@ -20921,12 +20929,12 @@ int gc_heap::joined_generation_to_condemn (BOOL should_evaluate_elevation,
20921
20929
gc_data_global.gen_to_condemn_reasons.set_condition(gen_joined_limit_before_oom);
20922
20930
full_compact_gc_p = true;
20923
20931
}
20924
- else if ((current_total_committed * 10) >= (heap_hard_limit * 9))
20932
+ else if (((uint64_t) current_total_committed * (uint64_t) 10) >= ((uint64_t) heap_hard_limit * (uint64_t) 9))
20925
20933
{
20926
20934
size_t loh_frag = get_total_gen_fragmentation (loh_generation);
20927
20935
20928
20936
// If the LOH frag is >= 1/8 it's worth compacting it
20929
- if (( loh_frag * 8) >= heap_hard_limit)
20937
+ if (loh_frag >= heap_hard_limit / 8 )
20930
20938
{
20931
20939
dprintf (GTC_LOG, ("loh frag: %zd > 1/8 of limit %zd", loh_frag, (heap_hard_limit / 8)));
20932
20940
gc_data_global.gen_to_condemn_reasons.set_condition(gen_joined_limit_loh_frag);
@@ -20937,7 +20945,7 @@ int gc_heap::joined_generation_to_condemn (BOOL should_evaluate_elevation,
20937
20945
// If there's not much fragmentation but it looks like it'll be productive to
20938
20946
// collect LOH, do that.
20939
20947
size_t est_loh_reclaim = get_total_gen_estimated_reclaim (loh_generation);
20940
- if (( est_loh_reclaim * 8) >= heap_hard_limit)
20948
+ if (est_loh_reclaim >= heap_hard_limit / 8 )
20941
20949
{
20942
20950
gc_data_global.gen_to_condemn_reasons.set_condition(gen_joined_limit_loh_reclaim);
20943
20951
full_compact_gc_p = true;
@@ -43556,6 +43564,15 @@ void gc_heap::init_static_data()
43556
43564
);
43557
43565
#endif //MULTIPLE_HEAPS
43558
43566
43567
+ #ifndef HOST_64BIT
43568
+ if (heap_hard_limit)
43569
+ {
43570
+ size_t gen1_max_size_seg = soh_segment_size / 2;
43571
+ dprintf (GTC_LOG, ("limit gen1 max %zd->%zd", gen1_max_size, gen1_max_size_seg));
43572
+ gen1_max_size = min (gen1_max_size, gen1_max_size_seg);
43573
+ }
43574
+ #endif //!HOST_64BIT
43575
+
43559
43576
size_t gen1_max_size_config = (size_t)GCConfig::GetGCGen1MaxBudget();
43560
43577
43561
43578
if (gen1_max_size_config)
@@ -48681,6 +48698,11 @@ HRESULT GCHeap::Initialize()
48681
48698
{
48682
48699
if (gc_heap::heap_hard_limit)
48683
48700
{
48701
+ #ifndef HOST_64BIT
48702
+ // Regions are not supported on 32bit
48703
+ assert(false);
48704
+ #endif //!HOST_64BIT
48705
+
48684
48706
if (gc_heap::heap_hard_limit_oh[soh])
48685
48707
{
48686
48708
gc_heap::regions_range = gc_heap::heap_hard_limit;
@@ -48715,12 +48737,32 @@ HRESULT GCHeap::Initialize()
48715
48737
{
48716
48738
if (gc_heap::heap_hard_limit_oh[soh])
48717
48739
{
48740
+ // On 32bit we have next guarantees:
48741
+ // 0 <= seg_size_from_config <= 1Gb (from max_heap_hard_limit/2)
48742
+ // 0 <= (heap_hard_limit = heap_hard_limit_oh[soh] + heap_hard_limit_oh[loh] + heap_hard_limit_oh[poh]) < 4Gb (from gc_heap::compute_hard_limit_from_heap_limits)
48743
+ // 0 <= heap_hard_limit_oh[loh] <= 1Gb or < 2Gb
48744
+ // 0 <= heap_hard_limit_oh[poh] <= 1Gb or < 2Gb
48745
+ // 0 <= large_seg_size <= 1Gb or <= 2Gb (alignment and round up)
48746
+ // 0 <= pin_seg_size <= 1Gb or <= 2Gb (alignment and round up)
48747
+ // 0 <= soh_segment_size + large_seg_size + pin_seg_size <= 4Gb
48748
+ // 4Gb overflow is ok, because 0 size allocation will fail
48718
48749
large_seg_size = max (gc_heap::adjust_segment_size_hard_limit (gc_heap::heap_hard_limit_oh[loh], nhp), seg_size_from_config);
48719
48750
pin_seg_size = max (gc_heap::adjust_segment_size_hard_limit (gc_heap::heap_hard_limit_oh[poh], nhp), seg_size_from_config);
48720
48751
}
48721
48752
else
48722
48753
{
48754
+ // On 32bit we have next guarantees:
48755
+ // 0 <= heap_hard_limit <= 1Gb (from gc_heap::compute_hard_limit)
48756
+ // 0 <= soh_segment_size <= 1Gb
48757
+ // 0 <= large_seg_size <= 1Gb
48758
+ // 0 <= pin_seg_size <= 1Gb
48759
+ // 0 <= soh_segment_size + large_seg_size + pin_seg_size <= 3Gb
48760
+ #ifdef HOST_64BIT
48723
48761
large_seg_size = gc_heap::use_large_pages_p ? gc_heap::soh_segment_size : gc_heap::soh_segment_size * 2;
48762
+ #else //HOST_64BIT
48763
+ assert (!gc_heap::use_large_pages_p);
48764
+ large_seg_size = gc_heap::soh_segment_size;
48765
+ #endif //HOST_64BIT
48724
48766
pin_seg_size = large_seg_size;
48725
48767
}
48726
48768
if (gc_heap::use_large_pages_p)
@@ -52984,16 +53026,45 @@ int GCHeap::RefreshMemoryLimit()
52984
53026
return gc_heap::refresh_memory_limit();
52985
53027
}
52986
53028
53029
+ bool gc_heap::compute_hard_limit_from_heap_limits()
53030
+ {
53031
+ #ifndef HOST_64BIT
53032
+ // need to consider overflows:
53033
+ if (! ((heap_hard_limit_oh[soh] < max_heap_hard_limit && heap_hard_limit_oh[loh] <= max_heap_hard_limit / 2 && heap_hard_limit_oh[poh] <= max_heap_hard_limit / 2)
53034
+ || (heap_hard_limit_oh[soh] <= max_heap_hard_limit / 2 && heap_hard_limit_oh[loh] < max_heap_hard_limit && heap_hard_limit_oh[poh] <= max_heap_hard_limit / 2)
53035
+ || (heap_hard_limit_oh[soh] <= max_heap_hard_limit / 2 && heap_hard_limit_oh[loh] <= max_heap_hard_limit / 2 && heap_hard_limit_oh[poh] < max_heap_hard_limit)))
53036
+ {
53037
+ return false;
53038
+ }
53039
+ #endif //!HOST_64BIT
53040
+
53041
+ heap_hard_limit = heap_hard_limit_oh[soh] + heap_hard_limit_oh[loh] + heap_hard_limit_oh[poh];
53042
+ return true;
53043
+ }
53044
+
53045
+ // On 32bit we have next guarantees for limits:
53046
+ // 1) heap-specific limits:
53047
+ // 0 <= (heap_hard_limit = heap_hard_limit_oh[soh] + heap_hard_limit_oh[loh] + heap_hard_limit_oh[poh]) < 4Gb
53048
+ // a) 0 <= heap_hard_limit_oh[soh] < 2Gb, 0 <= heap_hard_limit_oh[loh] <= 1Gb, 0 <= heap_hard_limit_oh[poh] <= 1Gb
53049
+ // b) 0 <= heap_hard_limit_oh[soh] <= 1Gb, 0 <= heap_hard_limit_oh[loh] < 2Gb, 0 <= heap_hard_limit_oh[poh] <= 1Gb
53050
+ // c) 0 <= heap_hard_limit_oh[soh] <= 1Gb, 0 <= heap_hard_limit_oh[loh] <= 1Gb, 0 <= heap_hard_limit_oh[poh] < 2Gb
53051
+ // 2) same limit for all heaps:
53052
+ // 0 <= heap_hard_limit <= 1Gb
53053
+ //
53054
+ // These ranges guarantee that calculation of soh_segment_size, loh_segment_size and poh_segment_size with alignment and round up won't overflow,
53055
+ // as well as calculation of sum of them (overflow to 0 is allowed, because allocation with 0 size will fail later).
52987
53056
bool gc_heap::compute_hard_limit()
52988
53057
{
52989
53058
heap_hard_limit_oh[soh] = 0;
52990
- #ifdef HOST_64BIT
53059
+
52991
53060
heap_hard_limit = (size_t)GCConfig::GetGCHeapHardLimit();
52992
53061
heap_hard_limit_oh[soh] = (size_t)GCConfig::GetGCHeapHardLimitSOH();
52993
53062
heap_hard_limit_oh[loh] = (size_t)GCConfig::GetGCHeapHardLimitLOH();
52994
53063
heap_hard_limit_oh[poh] = (size_t)GCConfig::GetGCHeapHardLimitPOH();
52995
53064
53065
+ #ifdef HOST_64BIT
52996
53066
use_large_pages_p = GCConfig::GetGCLargePages();
53067
+ #endif //HOST_64BIT
52997
53068
52998
53069
if (heap_hard_limit_oh[soh] || heap_hard_limit_oh[loh] || heap_hard_limit_oh[poh])
52999
53070
{
@@ -53005,8 +53076,10 @@ bool gc_heap::compute_hard_limit()
53005
53076
{
53006
53077
return false;
53007
53078
}
53008
- heap_hard_limit = heap_hard_limit_oh[soh] +
53009
- heap_hard_limit_oh[loh] + heap_hard_limit_oh[poh];
53079
+ if (!compute_hard_limit_from_heap_limits())
53080
+ {
53081
+ return false;
53082
+ }
53010
53083
}
53011
53084
else
53012
53085
{
@@ -53034,9 +53107,22 @@ bool gc_heap::compute_hard_limit()
53034
53107
heap_hard_limit_oh[soh] = (size_t)(total_physical_mem * (uint64_t)percent_of_mem_soh / (uint64_t)100);
53035
53108
heap_hard_limit_oh[loh] = (size_t)(total_physical_mem * (uint64_t)percent_of_mem_loh / (uint64_t)100);
53036
53109
heap_hard_limit_oh[poh] = (size_t)(total_physical_mem * (uint64_t)percent_of_mem_poh / (uint64_t)100);
53037
- heap_hard_limit = heap_hard_limit_oh[soh] +
53038
- heap_hard_limit_oh[loh] + heap_hard_limit_oh[poh];
53110
+
53111
+ if (!compute_hard_limit_from_heap_limits())
53112
+ {
53113
+ return false;
53114
+ }
53039
53115
}
53116
+ #ifndef HOST_64BIT
53117
+ else
53118
+ {
53119
+ // need to consider overflows
53120
+ if (heap_hard_limit > max_heap_hard_limit / 2)
53121
+ {
53122
+ return false;
53123
+ }
53124
+ }
53125
+ #endif //!HOST_64BIT
53040
53126
}
53041
53127
53042
53128
if (heap_hard_limit_oh[soh] && (!heap_hard_limit_oh[poh]) && (!use_large_pages_p))
@@ -53050,9 +53136,17 @@ bool gc_heap::compute_hard_limit()
53050
53136
if ((percent_of_mem > 0) && (percent_of_mem < 100))
53051
53137
{
53052
53138
heap_hard_limit = (size_t)(total_physical_mem * (uint64_t)percent_of_mem / (uint64_t)100);
53139
+
53140
+ #ifndef HOST_64BIT
53141
+ // need to consider overflows
53142
+ if (heap_hard_limit > max_heap_hard_limit / 2)
53143
+ {
53144
+ return false;
53145
+ }
53146
+ #endif //!HOST_64BIT
53053
53147
}
53054
53148
}
53055
- #endif //HOST_64BIT
53149
+
53056
53150
return true;
53057
53151
}
53058
53152
@@ -53077,12 +53171,12 @@ bool gc_heap::compute_memory_settings(bool is_initialization, uint32_t& nhp, uin
53077
53171
}
53078
53172
}
53079
53173
}
53174
+ #endif //HOST_64BIT
53080
53175
53081
53176
if (heap_hard_limit && (heap_hard_limit < new_current_total_committed))
53082
53177
{
53083
53178
return false;
53084
53179
}
53085
- #endif //HOST_64BIT
53086
53180
53087
53181
#ifdef USE_REGIONS
53088
53182
{
@@ -53101,9 +53195,24 @@ bool gc_heap::compute_memory_settings(bool is_initialization, uint32_t& nhp, uin
53101
53195
seg_size_from_config = (size_t)GCConfig::GetSegmentSize();
53102
53196
if (seg_size_from_config)
53103
53197
{
53104
- seg_size_from_config = adjust_segment_size_hard_limit_va (seg_size_from_config);
53198
+ seg_size_from_config = use_large_pages_p ? align_on_segment_hard_limit (seg_size_from_config) :
53199
+ #ifdef HOST_64BIT
53200
+ round_up_power2 (seg_size_from_config);
53201
+ #else //HOST_64BIT
53202
+ round_down_power2 (seg_size_from_config);
53203
+ seg_size_from_config = min (seg_size_from_config, max_heap_hard_limit / 2);
53204
+ #endif //HOST_64BIT
53105
53205
}
53106
53206
53207
+ // On 32bit we have next guarantees:
53208
+ // 0 <= seg_size_from_config <= 1Gb (from max_heap_hard_limit/2)
53209
+ // a) heap-specific limits:
53210
+ // 0 <= (heap_hard_limit = heap_hard_limit_oh[soh] + heap_hard_limit_oh[loh] + heap_hard_limit_oh[poh]) < 4Gb (from gc_heap::compute_hard_limit_from_heap_limits)
53211
+ // 0 <= heap_hard_limit_oh[soh] <= 1Gb or < 2Gb
53212
+ // 0 <= soh_segment_size <= 1Gb or <= 2Gb (alignment and round up)
53213
+ // b) same limit for all heaps:
53214
+ // 0 <= heap_hard_limit <= 1Gb
53215
+ // 0 <= soh_segment_size <= 1Gb
53107
53216
size_t limit_to_check = (heap_hard_limit_oh[soh] ? heap_hard_limit_oh[soh] : heap_hard_limit);
53108
53217
soh_segment_size = max (adjust_segment_size_hard_limit (limit_to_check, nhp), seg_size_from_config);
53109
53218
}
0 commit comments