cpp

Coverage Report

Created: 2023-09-13 01:07

/home/andy/git/oilshell/oil/mycpp/mark_sweep_heap.cc
Line
Count
Source (jump to first uncovered line)
1
#include "mycpp/mark_sweep_heap.h"
2
3
#include <inttypes.h>  // PRId64
4
#include <stdlib.h>    // getenv()
5
#include <string.h>    // strlen()
6
#include <sys/time.h>  // gettimeofday()
7
#include <time.h>      // clock_gettime(), CLOCK_PROCESS_CPUTIME_ID
8
#include <unistd.h>    // STDERR_FILENO
9
10
#include "_build/detected-cpp-config.h"  // for GC_TIMING
11
#include "mycpp/gc_builtins.h"           // StringToInteger()
12
#include "mycpp/gc_slab.h"
13
14
// TODO: Remove this guard when we have separate binaries
15
#if MARK_SWEEP
16
17
69
void MarkSweepHeap::Init() {
18
69
  Init(1000);  // collect at 1000 objects in tests
19
69
}
20
21
71
void MarkSweepHeap::Init(int gc_threshold) {
22
71
  gc_threshold_ = gc_threshold;
23
24
71
  char* e;
25
71
  e = getenv("OILS_GC_THRESHOLD");
26
71
  if (e) {
27
0
    int result;
28
0
    if (StringToInteger(e, strlen(e), 10, &result)) {
29
      // Override collection threshold
30
0
      gc_threshold_ = result;
31
0
    }
32
0
  }
33
34
  // only for developers
35
71
  e = getenv("_OILS_GC_VERBOSE");
36
71
  if (e && strcmp(e, "1") == 0) {
37
0
    gc_verbose_ = true;
38
0
  }
39
40
71
  live_objs_.reserve(KiB(10));
41
71
  roots_.reserve(KiB(1));  // prevent resizing in common case
42
71
}
43
44
16
int MarkSweepHeap::MaybeCollect() {
45
  // Maybe collect BEFORE allocation, because the new object won't be rooted
46
  #if GC_ALWAYS
47
  int result = Collect();
48
  #else
49
16
  int result = -1;
50
16
  if (num_live() > gc_threshold_) {
51
0
    result = Collect();
52
0
  }
53
16
  #endif
54
55
16
  num_gc_points_++;  // this is a manual collection point
56
16
  return result;
57
16
}
58
59
  #if defined(BUMP_SMALL)
60
    #include "mycpp/bump_leak_heap.h"
61
62
BumpLeakHeap gBumpLeak;
63
  #endif
64
65
// Allocate and update stats
66
// TODO: Make this interface nicer.
67
8.14k
void* MarkSweepHeap::Allocate(size_t num_bytes, int* obj_id, int* pool_id) {
68
  // log("Allocate %d", num_bytes);
69
8.14k
  #ifndef NO_POOL_ALLOC
70
8.14k
  if (num_bytes <= pool1_.kMaxObjSize) {
71
4.87k
    *pool_id = 1;
72
4.87k
    return pool1_.Allocate(obj_id);
73
4.87k
  }
74
3.27k
  if (num_bytes <= pool2_.kMaxObjSize) {
75
2.06k
    *pool_id = 2;
76
2.06k
    return pool2_.Allocate(obj_id);
77
2.06k
  }
78
1.20k
  *pool_id = 0;  // malloc(), not a pool
79
1.20k
  #endif
80
81
  // Does the pool allocator approximate a bump allocator?  Use pool2_
82
  // threshold of 48 bytes.
83
  // These only work with GC off -- OILS_GC_THRESHOLD=[big]
84
  #ifdef BUMP_SMALL
85
  if (num_bytes <= 48) {
86
    return gBumpLeak.Allocate(num_bytes);
87
  }
88
  #endif
89
90
1.20k
  if (to_free_.empty()) {
91
    // Use higher object IDs
92
1.20k
    *obj_id = greatest_obj_id_;
93
1.20k
    greatest_obj_id_++;
94
95
    // This check is ON in release mode
96
1.20k
    CHECK(greatest_obj_id_ <= kMaxObjId);
97
1.20k
  } else {
98
0
    ObjHeader* dead = to_free_.back();
99
0
    to_free_.pop_back();
100
101
0
    *obj_id = dead->obj_id;  // reuse the dead object's ID
102
103
0
    free(dead);
104
0
  }
105
106
0
  void* result = malloc(num_bytes);
107
1.20k
  DCHECK(result != nullptr);
108
109
0
  live_objs_.push_back(static_cast<ObjHeader*>(result));
110
111
1.20k
  num_live_++;
112
1.20k
  num_allocated_++;
113
1.20k
  bytes_allocated_ += num_bytes;
114
115
1.20k
  return result;
116
3.27k
}
117
118
  #if 0
119
void* MarkSweepHeap::Reallocate(void* p, size_t num_bytes) {
120
  FAIL(kNotImplemented);
121
  // This causes a double-free in the GC!
122
  // return realloc(p, num_bytes);
123
}
124
  #endif
125
126
// "Leaf" for marking / TraceChildren
127
//
128
// - Abort if nullptr
129
// - Find the header (get rid of this when remove ObjHeader member)
130
// - Tag::{Opaque,FixedSized,Scanned} have their mark bits set
131
// - Tag::{FixedSize,Scanned} are also pushed on the gray stack
132
133
130
void MarkSweepHeap::MaybeMarkAndPush(RawObject* obj) {
134
130
  ObjHeader* header = ObjHeader::FromObject(obj);
135
130
  if (header->heap_tag == HeapTag::Global) {  // don't mark or push
136
8
    return;
137
8
  }
138
139
122
  int obj_id = header->obj_id;
140
122
  #ifndef NO_POOL_ALLOC
141
122
  if (header->pool_id == 1) {
142
112
    if (pool1_.IsMarked(obj_id)) {
143
28
      return;
144
28
    }
145
84
    pool1_.Mark(obj_id);
146
84
  } else if (header->pool_id == 2) {
147
10
    if (pool2_.IsMarked(obj_id)) {
148
0
      return;
149
0
    }
150
10
    pool2_.Mark(obj_id);
151
10
  } else
152
0
  #endif
153
0
  {
154
0
    if (mark_set_.IsMarked(obj_id)) {
155
0
      return;
156
0
    }
157
0
    mark_set_.Mark(obj_id);
158
0
  }
159
160
94
  switch (header->heap_tag) {
161
32
  case HeapTag::Opaque:  // e.g. strings have no children
162
32
    break;
163
164
8
  case HeapTag::Scanned:  // these 2 types have children
165
62
  case HeapTag::FixedSize:
166
62
    gray_stack_.push_back(header);  // Push the header, not the object!
167
62
    break;
168
169
0
  default:
170
0
    FAIL(kShouldNotGetHere);
171
94
  }
172
94
}
173
174
111
void MarkSweepHeap::TraceChildren() {
175
173
  while (!gray_stack_.empty()) {
176
62
    ObjHeader* header = gray_stack_.back();
177
62
    gray_stack_.pop_back();
178
179
62
    switch (header->heap_tag) {
180
54
    case HeapTag::FixedSize: {
181
54
      auto fixed = reinterpret_cast<LayoutFixed*>(header->ObjectAddress());
182
54
      int mask = FIELD_MASK(*header);
183
184
1.35k
      for (int i = 0; i < kFieldMaskBits; ++i) {
185
1.29k
        if (mask & (1 << i)) {
186
42
          RawObject* child = fixed->children_[i];
187
42
          if (child) {
188
36
            MaybeMarkAndPush(child);
189
36
          }
190
42
        }
191
1.29k
      }
192
54
      break;
193
0
    }
194
195
8
    case HeapTag::Scanned: {
196
8
      auto slab = reinterpret_cast<Slab<RawObject*>*>(header->ObjectAddress());
197
198
8
      int n = NUM_POINTERS(*header);
199
32
      for (int i = 0; i < n; ++i) {
200
24
        RawObject* child = slab->items_[i];
201
24
        if (child) {
202
10
          MaybeMarkAndPush(child);
203
10
        }
204
24
      }
205
8
      break;
206
0
    }
207
0
    default:
208
      // Only FixedSize and Scanned are pushed
209
0
      FAIL(kShouldNotGetHere);
210
62
    }
211
62
  }
212
111
}
213
214
111
void MarkSweepHeap::Sweep() {
215
111
  #ifndef NO_POOL_ALLOC
216
111
  pool1_.Sweep();
217
111
  pool2_.Sweep();
218
111
  #endif
219
220
111
  int last_live_index = 0;
221
111
  int num_objs = live_objs_.size();
222
1.31k
  for (int i = 0; i < num_objs; ++i) {
223
1.20k
    ObjHeader* obj = live_objs_[i];
224
1.20k
    assert(obj);  // malloc() shouldn't have returned nullptr
225
226
0
    bool is_live = mark_set_.IsMarked(obj->obj_id);
227
228
    // Compact live_objs_ and populate to_free_.  Note: doing the reverse could
229
    // be more efficient when many objects are dead.
230
1.20k
    if (is_live) {
231
0
      live_objs_[last_live_index++] = obj;
232
1.20k
    } else {
233
1.20k
      to_free_.push_back(obj);
234
      // free(obj);
235
1.20k
      num_live_--;
236
1.20k
    }
237
1.20k
  }
238
111
  live_objs_.resize(last_live_index);  // remove dangling objects
239
240
111
  num_collections_++;
241
111
  max_survived_ = std::max(max_survived_, num_live());
242
111
}
243
244
111
int MarkSweepHeap::Collect() {
245
111
  #ifdef GC_TIMING
246
111
  struct timespec start, end;
247
111
  if (clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &start) < 0) {
248
0
    assert(0);
249
0
  }
250
0
  #endif
251
252
0
  int num_roots = roots_.size();
253
111
  int num_globals = global_roots_.size();
254
255
111
  if (gc_verbose_) {
256
0
    log("");
257
0
    log("%2d. GC with %d roots (%d global) and %d live objects",
258
0
        num_collections_, num_roots + num_globals, num_globals, num_live());
259
0
  }
260
261
  // Resize it
262
111
  mark_set_.ReInit(greatest_obj_id_);
263
111
  #ifndef NO_POOL_ALLOC
264
111
  pool1_.PrepareForGc();
265
111
  pool2_.PrepareForGc();
266
111
  #endif
267
268
  // Mark roots.
269
  // Note: It might be nice to get rid of double pointers
270
245
  for (int i = 0; i < num_roots; ++i) {
271
134
    RawObject* root = *(roots_[i]);
272
134
    if (root) {
273
84
      MaybeMarkAndPush(root);
274
84
    }
275
134
  }
276
277
111
  for (int i = 0; i < num_globals; ++i) {
278
0
    RawObject* root = global_roots_[i];
279
0
    if (root) {
280
0
      MaybeMarkAndPush(root);
281
0
    }
282
0
  }
283
284
  // Traverse object graph.
285
111
  TraceChildren();
286
287
111
  Sweep();
288
289
111
  if (gc_verbose_) {
290
0
    log("    %d live after sweep", num_live());
291
0
  }
292
293
  // We know how many are live.  If the number of objects is close to the
294
  // threshold (above 75%), then set the threshold to 2 times the number of
295
  // live objects.  This is an ad hoc policy that removes observed "thrashing"
296
  // -- being at 99% of the threshold and doing FUTILE mark and sweep.
297
298
111
  int water_mark = (gc_threshold_ * 3) / 4;
299
111
  if (num_live() > water_mark) {
300
0
    gc_threshold_ = num_live() * 2;
301
0
    num_growths_++;
302
0
    if (gc_verbose_) {
303
0
      log("    exceeded %d live objects; gc_threshold set to %d", water_mark,
304
0
          gc_threshold_);
305
0
    }
306
0
  }
307
308
111
  #ifdef GC_TIMING
309
111
  if (clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &end) < 0) {
310
0
    assert(0);
311
0
  }
312
313
0
  double start_secs = start.tv_sec + start.tv_nsec / 1e9;
314
111
  double end_secs = end.tv_sec + end.tv_nsec / 1e9;
315
111
  double gc_millis = (end_secs - start_secs) * 1000.0;
316
317
111
  if (gc_verbose_) {
318
0
    log("    %.1f ms GC", gc_millis);
319
0
  }
320
321
111
  total_gc_millis_ += gc_millis;
322
111
  if (gc_millis > max_gc_millis_) {
323
57
    max_gc_millis_ = gc_millis;
324
57
  }
325
111
  #endif
326
327
111
  return num_live();  // for unit tests only
328
111
}
329
330
6
void MarkSweepHeap::PrintStats(int fd) {
331
6
  dprintf(fd, "  num live         = %10d\n", num_live());
332
  // max survived_ can be less than num_live(), because leave off the last GC
333
6
  dprintf(fd, "  max survived     = %10d\n", max_survived_);
334
6
  dprintf(fd, "\n");
335
336
6
  #ifndef NO_POOL_ALLOC
337
6
  dprintf(fd, "  num allocated    = %10d\n",
338
6
          num_allocated_ + pool1_.num_allocated() + pool2_.num_allocated());
339
6
  dprintf(fd, "  num in heap      = %10d\n", num_allocated_);
340
  #else
341
  dprintf(fd, "  num allocated    = %10d\n", num_allocated_);
342
  #endif
343
344
6
  #ifndef NO_POOL_ALLOC
345
6
  dprintf(fd, "  num in pool 1    = %10d\n", pool1_.num_allocated());
346
6
  dprintf(fd, "  num in pool 2    = %10d\n", pool2_.num_allocated());
347
6
  dprintf(
348
6
      fd, "bytes allocated    = %10" PRId64 "\n",
349
6
      bytes_allocated_ + pool1_.bytes_allocated() + pool2_.bytes_allocated());
350
  #else
351
  dprintf(fd, "bytes allocated    = %10" PRId64 "\n", bytes_allocated_);
352
  #endif
353
354
6
  dprintf(fd, "\n");
355
6
  dprintf(fd, "  num gc points    = %10d\n", num_gc_points_);
356
6
  dprintf(fd, "  num collections  = %10d\n", num_collections_);
357
6
  dprintf(fd, "\n");
358
6
  dprintf(fd, "   gc threshold    = %10d\n", gc_threshold_);
359
6
  dprintf(fd, "  num growths      = %10d\n", num_growths_);
360
6
  dprintf(fd, "\n");
361
6
  dprintf(fd, "  max gc millis    = %10.1f\n", max_gc_millis_);
362
6
  dprintf(fd, "total gc millis    = %10.1f\n", total_gc_millis_);
363
6
  dprintf(fd, "\n");
364
6
  dprintf(fd, "roots capacity     = %10d\n",
365
6
          static_cast<int>(roots_.capacity()));
366
6
  dprintf(fd, " objs capacity     = %10d\n",
367
6
          static_cast<int>(live_objs_.capacity()));
368
6
}
369
370
// Cleanup at the end of main() to remain ASAN-safe
371
57
void MarkSweepHeap::MaybePrintStats() {
372
57
  int stats_fd = -1;
373
57
  char* e = getenv("OILS_GC_STATS");
374
57
  if (e && strlen(e)) {  // env var set and non-empty
375
0
    stats_fd = STDERR_FILENO;
376
57
  } else {
377
    // A raw file descriptor lets benchmarks extract stats even if the script
378
    // writes to stdout and stderr.  Shells can't use open() without potential
379
    // conflicts.
380
381
57
    e = getenv("OILS_GC_STATS_FD");
382
57
    if (e && strlen(e)) {
383
      // Try setting 'stats_fd'.  If there's an error, it will be unchanged, and
384
      // we don't PrintStats();
385
0
      StringToInteger(e, strlen(e), 10, &stats_fd);
386
0
    }
387
57
  }
388
389
57
  if (stats_fd != -1) {
390
0
    PrintStats(stats_fd);
391
0
  }
392
57
}
393
394
55
void MarkSweepHeap::FreeEverything() {
395
55
  roots_.clear();
396
55
  global_roots_.clear();
397
398
55
  Collect();
399
400
  // Collect() told us what to free()
401
1.20k
  for (auto obj : to_free_) {
402
1.20k
    free(obj);
403
1.20k
  }
404
55
  #ifndef NO_POOL_ALLOC
405
55
  pool1_.Free();
406
55
  pool2_.Free();
407
55
  #endif
408
55
}
409
410
55
void MarkSweepHeap::CleanProcessExit() {
411
55
  char* e = getenv("OILS_GC_ON_EXIT");
412
  // collect by default; OILS_GC_ON_EXIT=0 overrides
413
55
  if (e && strcmp(e, "0") == 0) {
414
0
    ;
415
55
  } else {
416
55
    FreeEverything();
417
55
  }
418
55
  MaybePrintStats();
419
55
}
420
421
// for the main binary
422
2
void MarkSweepHeap::ProcessExit() {
423
  #ifdef CLEAN_PROCESS_EXIT
424
  FreeEverything();
425
  #else
426
2
  char* e = getenv("OILS_GC_ON_EXIT");
427
  // don't collect by default; OILS_GC_ON_EXIT=1 overrides
428
2
  if (e && strcmp(e, "1") == 0) {
429
0
    FreeEverything();
430
0
  }
431
2
  #endif
432
433
2
  MaybePrintStats();
434
2
}
435
436
MarkSweepHeap gHeap;
437
438
#endif  // MARK_SWEEP