Source file
src/runtime/runtime1.go
1
2
3
4
5 package runtime
6
7 import (
8 "internal/bytealg"
9 "internal/goarch"
10 "internal/runtime/atomic"
11 "internal/strconv"
12 "unsafe"
13 )
14
15
16
17
18
19
20 const (
21 tracebackCrash = 1 << iota
22 tracebackAll
23 tracebackShift = iota
24 )
25
26 var traceback_cache uint32 = 2 << tracebackShift
27 var traceback_env uint32
28
29
30
31
32
33
34
35
36
37
38 func gotraceback() (level int32, all, crash bool) {
39 gp := getg()
40 t := atomic.Load(&traceback_cache)
41 crash = t&tracebackCrash != 0
42 all = gp.m.throwing > throwTypeUser || t&tracebackAll != 0
43 if gp.m.traceback != 0 {
44 level = int32(gp.m.traceback)
45 } else if gp.m.throwing >= throwTypeRuntime {
46
47
48 level = 2
49 } else {
50 level = int32(t >> tracebackShift)
51 }
52 return
53 }
54
55 var (
56 argc int32
57 argv **byte
58 )
59
60
61
62
63 func argv_index(argv **byte, i int32) *byte {
64 return *(**byte)(add(unsafe.Pointer(argv), uintptr(i)*goarch.PtrSize))
65 }
66
67 func args(c int32, v **byte) {
68 argc = c
69 argv = v
70 sysargs(c, v)
71 }
72
73 func goargs() {
74 if GOOS == "windows" {
75 return
76 }
77 argslice = make([]string, argc)
78 for i := int32(0); i < argc; i++ {
79 argslice[i] = gostringnocopy(argv_index(argv, i))
80 }
81 }
82
83 func goenvs_unix() {
84
85
86
87 n := int32(0)
88 for argv_index(argv, argc+1+n) != nil {
89 n++
90 }
91
92 envs = make([]string, n)
93 for i := int32(0); i < n; i++ {
94 envs[i] = gostring(argv_index(argv, argc+1+i))
95 }
96 }
97
98 func environ() []string {
99 return envs
100 }
101
102
103
104 var test_z64, test_x64 uint64
105
106 func testAtomic64() {
107 test_z64 = 42
108 test_x64 = 0
109 if atomic.Cas64(&test_z64, test_x64, 1) {
110 throw("cas64 failed")
111 }
112 if test_x64 != 0 {
113 throw("cas64 failed")
114 }
115 test_x64 = 42
116 if !atomic.Cas64(&test_z64, test_x64, 1) {
117 throw("cas64 failed")
118 }
119 if test_x64 != 42 || test_z64 != 1 {
120 throw("cas64 failed")
121 }
122 if atomic.Load64(&test_z64) != 1 {
123 throw("load64 failed")
124 }
125 atomic.Store64(&test_z64, (1<<40)+1)
126 if atomic.Load64(&test_z64) != (1<<40)+1 {
127 throw("store64 failed")
128 }
129 if atomic.Xadd64(&test_z64, (1<<40)+1) != (2<<40)+2 {
130 throw("xadd64 failed")
131 }
132 if atomic.Load64(&test_z64) != (2<<40)+2 {
133 throw("xadd64 failed")
134 }
135 if atomic.Xchg64(&test_z64, (3<<40)+3) != (2<<40)+2 {
136 throw("xchg64 failed")
137 }
138 if atomic.Load64(&test_z64) != (3<<40)+3 {
139 throw("xchg64 failed")
140 }
141 }
142
143 func check() {
144 var (
145 a int8
146 b uint8
147 c int16
148 d uint16
149 e int32
150 f uint32
151 g int64
152 h uint64
153 i, i1 float32
154 j, j1 float64
155 k unsafe.Pointer
156 l *uint16
157 m [4]byte
158 )
159 type x1t struct {
160 x uint8
161 }
162 type y1t struct {
163 x1 x1t
164 y uint8
165 }
166 var x1 x1t
167 var y1 y1t
168
169 if unsafe.Sizeof(a) != 1 {
170 throw("bad a")
171 }
172 if unsafe.Sizeof(b) != 1 {
173 throw("bad b")
174 }
175 if unsafe.Sizeof(c) != 2 {
176 throw("bad c")
177 }
178 if unsafe.Sizeof(d) != 2 {
179 throw("bad d")
180 }
181 if unsafe.Sizeof(e) != 4 {
182 throw("bad e")
183 }
184 if unsafe.Sizeof(f) != 4 {
185 throw("bad f")
186 }
187 if unsafe.Sizeof(g) != 8 {
188 throw("bad g")
189 }
190 if unsafe.Sizeof(h) != 8 {
191 throw("bad h")
192 }
193 if unsafe.Sizeof(i) != 4 {
194 throw("bad i")
195 }
196 if unsafe.Sizeof(j) != 8 {
197 throw("bad j")
198 }
199 if unsafe.Sizeof(k) != goarch.PtrSize {
200 throw("bad k")
201 }
202 if unsafe.Sizeof(l) != goarch.PtrSize {
203 throw("bad l")
204 }
205 if unsafe.Sizeof(x1) != 1 {
206 throw("bad unsafe.Sizeof x1")
207 }
208 if unsafe.Offsetof(y1.y) != 1 {
209 throw("bad offsetof y1.y")
210 }
211 if unsafe.Sizeof(y1) != 2 {
212 throw("bad unsafe.Sizeof y1")
213 }
214
215 var z uint32
216 z = 1
217 if !atomic.Cas(&z, 1, 2) {
218 throw("cas1")
219 }
220 if z != 2 {
221 throw("cas2")
222 }
223
224 z = 4
225 if atomic.Cas(&z, 5, 6) {
226 throw("cas3")
227 }
228 if z != 4 {
229 throw("cas4")
230 }
231
232 z = 0xffffffff
233 if !atomic.Cas(&z, 0xffffffff, 0xfffffffe) {
234 throw("cas5")
235 }
236 if z != 0xfffffffe {
237 throw("cas6")
238 }
239
240 m = [4]byte{1, 1, 1, 1}
241 atomic.Or8(&m[1], 0xf0)
242 if m[0] != 1 || m[1] != 0xf1 || m[2] != 1 || m[3] != 1 {
243 throw("atomicor8")
244 }
245
246 m = [4]byte{0xff, 0xff, 0xff, 0xff}
247 atomic.And8(&m[1], 0x1)
248 if m[0] != 0xff || m[1] != 0x1 || m[2] != 0xff || m[3] != 0xff {
249 throw("atomicand8")
250 }
251
252 *(*uint64)(unsafe.Pointer(&j)) = ^uint64(0)
253 if j == j {
254 throw("float64nan")
255 }
256 if !(j != j) {
257 throw("float64nan1")
258 }
259
260 *(*uint64)(unsafe.Pointer(&j1)) = ^uint64(1)
261 if j == j1 {
262 throw("float64nan2")
263 }
264 if !(j != j1) {
265 throw("float64nan3")
266 }
267
268 *(*uint32)(unsafe.Pointer(&i)) = ^uint32(0)
269 if i == i {
270 throw("float32nan")
271 }
272 if i == i {
273 throw("float32nan1")
274 }
275
276 *(*uint32)(unsafe.Pointer(&i1)) = ^uint32(1)
277 if i == i1 {
278 throw("float32nan2")
279 }
280 if i == i1 {
281 throw("float32nan3")
282 }
283
284 testAtomic64()
285
286 if fixedStack != round2(fixedStack) {
287 throw("FixedStack is not power-of-2")
288 }
289
290 if !checkASM() {
291 throw("assembly checks failed")
292 }
293 }
294
295 type dbgVar struct {
296 name string
297 value *int32
298 atomic *atomic.Int32
299 def int32
300 }
301
302
303
304
305
306 var debug struct {
307 cgocheck int32
308 clobberfree int32
309 containermaxprocs int32
310 decoratemappings int32
311 disablethp int32
312 dontfreezetheworld int32
313 efence int32
314 gccheckmark int32
315 gcpacertrace int32
316 gcshrinkstackoff int32
317 gcstoptheworld int32
318 gctrace int32
319 invalidptr int32
320 madvdontneed int32
321 scavtrace int32
322 scheddetail int32
323 schedtrace int32
324 tracebackancestors int32
325 updatemaxprocs int32
326 asyncpreemptoff int32
327 harddecommit int32
328 adaptivestackstart int32
329 tracefpunwindoff int32
330 traceadvanceperiod int32
331 traceCheckStackOwnership int32
332 profstackdepth int32
333 dataindependenttiming int32
334
335
336
337
338 malloc bool
339 inittrace int32
340 sbrk int32
341 checkfinalizers int32
342
343
344
345
346
347
348
349
350 traceallocfree atomic.Int32
351
352 panicnil atomic.Int32
353
354
355
356
357
358
359
360
361
362 asynctimerchan atomic.Int32
363
364
365
366 tracebacklabels atomic.Int32
367 }
368
369 var dbgvars = []*dbgVar{
370 {name: "adaptivestackstart", value: &debug.adaptivestackstart},
371 {name: "asyncpreemptoff", value: &debug.asyncpreemptoff},
372 {name: "asynctimerchan", atomic: &debug.asynctimerchan},
373 {name: "cgocheck", value: &debug.cgocheck},
374 {name: "clobberfree", value: &debug.clobberfree},
375 {name: "containermaxprocs", value: &debug.containermaxprocs, def: 1},
376 {name: "dataindependenttiming", value: &debug.dataindependenttiming},
377 {name: "decoratemappings", value: &debug.decoratemappings, def: 1},
378 {name: "disablethp", value: &debug.disablethp},
379 {name: "dontfreezetheworld", value: &debug.dontfreezetheworld},
380 {name: "checkfinalizers", value: &debug.checkfinalizers},
381 {name: "efence", value: &debug.efence},
382 {name: "gccheckmark", value: &debug.gccheckmark},
383 {name: "gcpacertrace", value: &debug.gcpacertrace},
384 {name: "gcshrinkstackoff", value: &debug.gcshrinkstackoff},
385 {name: "gcstoptheworld", value: &debug.gcstoptheworld},
386 {name: "gctrace", value: &debug.gctrace},
387 {name: "harddecommit", value: &debug.harddecommit},
388 {name: "inittrace", value: &debug.inittrace},
389 {name: "invalidptr", value: &debug.invalidptr},
390 {name: "madvdontneed", value: &debug.madvdontneed},
391 {name: "panicnil", atomic: &debug.panicnil},
392 {name: "profstackdepth", value: &debug.profstackdepth, def: 128},
393 {name: "sbrk", value: &debug.sbrk},
394 {name: "scavtrace", value: &debug.scavtrace},
395 {name: "scheddetail", value: &debug.scheddetail},
396 {name: "schedtrace", value: &debug.schedtrace},
397 {name: "traceadvanceperiod", value: &debug.traceadvanceperiod},
398 {name: "traceallocfree", atomic: &debug.traceallocfree},
399 {name: "tracecheckstackownership", value: &debug.traceCheckStackOwnership},
400 {name: "tracebackancestors", value: &debug.tracebackancestors},
401 {name: "tracebacklabels", atomic: &debug.tracebacklabels, def: 0},
402 {name: "tracefpunwindoff", value: &debug.tracefpunwindoff},
403 {name: "updatemaxprocs", value: &debug.updatemaxprocs, def: 1},
404 }
405
406 func parseRuntimeDebugVars(godebug string) {
407
408 debug.cgocheck = 1
409 debug.invalidptr = 1
410 debug.adaptivestackstart = 1
411 if GOOS == "linux" {
412
413
414
415
416
417
418
419
420 debug.madvdontneed = 1
421 }
422 debug.traceadvanceperiod = defaultTraceAdvancePeriod
423
424
425 for _, v := range dbgvars {
426 if v.def != 0 {
427
428 if v.value != nil {
429 *v.value = v.def
430 } else if v.atomic != nil {
431 v.atomic.Store(v.def)
432 }
433 }
434 }
435
436 parsegodebug(godebugDefault, nil)
437
438
439 parsegodebug(godebug, nil)
440
441 debug.malloc = (debug.inittrace | debug.sbrk | debug.checkfinalizers) != 0
442 debug.profstackdepth = min(debug.profstackdepth, maxProfStackDepth)
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457 if debug.gccheckmark > 0 {
458 debug.asyncpreemptoff = 1
459 }
460 }
461
462 func finishDebugVarsSetup() {
463 p := new(string)
464 *p = gogetenv("GODEBUG")
465 godebugEnv.Store(p)
466
467 setTraceback(gogetenv("GOTRACEBACK"))
468 traceback_env = traceback_cache
469 }
470
471
472
473 func reparsedebugvars(env string) {
474 seen := make(map[string]bool)
475
476 parsegodebug(env, seen)
477
478 parsegodebug(godebugDefault, seen)
479
480 for _, v := range dbgvars {
481 if v.atomic != nil && !seen[v.name] {
482 v.atomic.Store(0)
483 }
484 }
485 }
486
487
488
489
490
491
492
493
494
495
496
497 func parsegodebug(godebug string, seen map[string]bool) {
498 for p := godebug; p != ""; {
499 var field string
500 if seen == nil {
501
502 i := bytealg.IndexByteString(p, ',')
503 if i < 0 {
504 field, p = p, ""
505 } else {
506 field, p = p[:i], p[i+1:]
507 }
508 } else {
509
510 i := len(p) - 1
511 for i >= 0 && p[i] != ',' {
512 i--
513 }
514 if i < 0 {
515 p, field = "", p
516 } else {
517 p, field = p[:i], p[i+1:]
518 }
519 }
520 i := bytealg.IndexByteString(field, '=')
521 if i < 0 {
522 continue
523 }
524 key, value := field[:i], field[i+1:]
525 if seen[key] {
526 continue
527 }
528 if seen != nil {
529 seen[key] = true
530 }
531
532
533
534
535 if seen == nil && key == "memprofilerate" {
536 if n, err := strconv.Atoi(value); err == nil {
537 MemProfileRate = n
538 }
539 } else {
540 for _, v := range dbgvars {
541 if v.name == key {
542 if n, err := strconv.ParseInt(value, 10, 32); err == nil {
543 if seen == nil && v.value != nil {
544 *v.value = int32(n)
545 } else if v.atomic != nil {
546 v.atomic.Store(int32(n))
547 }
548 }
549 }
550 }
551 }
552 }
553
554 if debug.cgocheck > 1 {
555 throw("cgocheck > 1 mode is no longer supported at runtime. Use GOEXPERIMENT=cgocheck2 at build time instead.")
556 }
557 }
558
559
560 func setTraceback(level string) {
561 var t uint32
562 switch level {
563 case "none":
564 t = 0
565 case "single", "":
566 t = 1 << tracebackShift
567 case "all":
568 t = 1<<tracebackShift | tracebackAll
569 case "system":
570 t = 2<<tracebackShift | tracebackAll
571 case "crash":
572 t = 2<<tracebackShift | tracebackAll | tracebackCrash
573 case "wer":
574 if GOOS == "windows" {
575 t = 2<<tracebackShift | tracebackAll | tracebackCrash
576 enableWER()
577 break
578 }
579 fallthrough
580 default:
581 t = tracebackAll
582 if n, err := strconv.Atoi(level); err == nil && n == int(uint32(n)) {
583 t |= uint32(n) << tracebackShift
584 }
585 }
586
587
588 if islibrary || isarchive {
589 t |= tracebackCrash
590 }
591
592 t |= traceback_env
593
594 atomic.Store(&traceback_cache, t)
595 }
596
597
598
599
600 func acquirem() *m {
601 gp := getg()
602 gp.m.locks++
603 return gp.m
604 }
605
606
607 func releasem(mp *m) {
608 gp := getg()
609 mp.locks--
610 if mp.locks == 0 && gp.preempt {
611
612 gp.stackguard0 = stackPreempt
613 }
614 }
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631 func reflect_typelinks() ([]unsafe.Pointer, [][]int32) {
632 modules := activeModules()
633 sections := []unsafe.Pointer{unsafe.Pointer(modules[0].types)}
634 ret := [][]int32{modules[0].typelinks}
635 for _, md := range modules[1:] {
636 sections = append(sections, unsafe.Pointer(md.types))
637 ret = append(ret, md.typelinks)
638 }
639 return sections, ret
640 }
641
642
643
644
645
646
647
648
649
650
651
652
653 func reflect_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer {
654 return unsafe.Pointer(resolveNameOff(ptrInModule, nameOff(off)).Bytes)
655 }
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671 func reflect_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
672 return unsafe.Pointer(toRType((*_type)(rtype)).typeOff(typeOff(off)))
673 }
674
675
676
677
678
679
680
681
682
683
684
685
686 func reflect_resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
687 return toRType((*_type)(rtype)).textOff(textOff(off))
688 }
689
690
691
692
693 func reflectlite_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer {
694 return unsafe.Pointer(resolveNameOff(ptrInModule, nameOff(off)).Bytes)
695 }
696
697
698
699
700 func reflectlite_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
701 return unsafe.Pointer(toRType((*_type)(rtype)).typeOff(typeOff(off)))
702 }
703
704
705
706
707 func reflect_addReflectOff(ptr unsafe.Pointer) int32 {
708 reflectOffsLock()
709 if reflectOffs.m == nil {
710 reflectOffs.m = make(map[int32]unsafe.Pointer)
711 reflectOffs.minv = make(map[unsafe.Pointer]int32)
712 reflectOffs.next = -1
713 }
714 id, found := reflectOffs.minv[ptr]
715 if !found {
716 id = reflectOffs.next
717 reflectOffs.next--
718 reflectOffs.m[id] = ptr
719 reflectOffs.minv[ptr] = id
720 }
721 reflectOffsUnlock()
722 return id
723 }
724
725
726 func fips_getIndicator() uint8 {
727 return getg().fipsIndicator
728 }
729
730
731 func fips_setIndicator(indicator uint8) {
732 getg().fipsIndicator = indicator
733 }
734
View as plain text