Source file
src/runtime/type.go
1
2
3
4
5
6
7 package runtime
8
9 import (
10 "internal/abi"
11 "internal/goarch"
12 "internal/runtime/atomic"
13 "unsafe"
14 )
15
16
17 func maps_typeString(typ *abi.Type) string {
18 return toRType(typ).string()
19 }
20
21 type nameOff = abi.NameOff
22 type typeOff = abi.TypeOff
23 type textOff = abi.TextOff
24
25 type _type = abi.Type
26
27
28 type rtype struct {
29 *abi.Type
30 }
31
32 func (t rtype) string() string {
33 s := t.nameOff(t.Str).Name()
34 if t.TFlag&abi.TFlagExtraStar != 0 {
35 return s[1:]
36 }
37 return s
38 }
39
40 func (t rtype) uncommon() *uncommontype {
41 return t.Uncommon()
42 }
43
44 func (t rtype) name() string {
45 if t.TFlag&abi.TFlagNamed == 0 {
46 return ""
47 }
48 s := t.string()
49 i := len(s) - 1
50 sqBrackets := 0
51 for i >= 0 && (s[i] != '.' || sqBrackets != 0) {
52 switch s[i] {
53 case ']':
54 sqBrackets++
55 case '[':
56 sqBrackets--
57 }
58 i--
59 }
60 return s[i+1:]
61 }
62
63
64
65
66
67 func (t rtype) pkgpath() string {
68 if u := t.uncommon(); u != nil {
69 return t.nameOff(u.PkgPath).Name()
70 }
71 switch t.Kind() {
72 case abi.Struct:
73 st := (*structtype)(unsafe.Pointer(t.Type))
74 return st.PkgPath.Name()
75 case abi.Interface:
76 it := (*interfacetype)(unsafe.Pointer(t.Type))
77 return it.PkgPath.Name()
78 }
79 return ""
80 }
81
82
83
84
85
86
87 func getGCMask(t *_type) *byte {
88 if t.TFlag&abi.TFlagGCMaskOnDemand != 0 {
89
90 return getGCMaskOnDemand(t)
91 }
92 return t.GCData
93 }
94
95
96
97 var inProgress byte
98
99
100
101
102 func getGCMaskOnDemand(t *_type) *byte {
103
104
105
106
107
108
109 addr := unsafe.Pointer(t.GCData)
110
111 if GOOS == "aix" {
112 addr = add(addr, firstmoduledata.data-aixStaticDataBase)
113 }
114
115 for {
116 p := (*byte)(atomic.Loadp(addr))
117 switch p {
118 default:
119 return p
120 case &inProgress:
121
122
123
124 osyield()
125 continue
126 case nil:
127
128 if !atomic.Casp1((*unsafe.Pointer)(addr), nil, unsafe.Pointer(&inProgress)) {
129 continue
130 }
131
132
133 bytes := goarch.PtrSize * divRoundUp(t.PtrBytes/goarch.PtrSize, 8*goarch.PtrSize)
134 p = (*byte)(persistentalloc(bytes, goarch.PtrSize, &memstats.other_sys))
135 systemstack(func() {
136 buildGCMask(t, bitCursor{ptr: p, n: 0})
137 })
138
139
140 atomic.StorepNoWB(addr, unsafe.Pointer(p))
141 return p
142 }
143 }
144 }
145
146
147
148 type bitCursor struct {
149 ptr *byte
150 n uintptr
151 }
152
153
154
155 func (b bitCursor) write(data *byte, cnt uintptr) {
156
157 p := addb(b.ptr, b.n/8)
158
159
160
161 n := b.n % 8
162 buf := uintptr(*p) & (1<<n - 1)
163
164
165 for cnt > 8 {
166
167 buf |= uintptr(*data) << n
168 n += 8
169 data = addb(data, 1)
170 cnt -= 8
171
172 *p = byte(buf)
173 buf >>= 8
174 n -= 8
175 p = addb(p, 1)
176 }
177
178 buf |= (uintptr(*data) & (1<<cnt - 1)) << n
179 n += cnt
180
181
182 if n > 8 {
183 *p = byte(buf)
184 buf >>= 8
185 n -= 8
186 p = addb(p, 1)
187 }
188 *p &^= 1<<n - 1
189 *p |= byte(buf)
190 }
191
192 func (b bitCursor) offset(cnt uintptr) bitCursor {
193 return bitCursor{ptr: b.ptr, n: b.n + cnt}
194 }
195
196
197
198 func buildGCMask(t *_type, dst bitCursor) {
199
200
201
202
203
204
205
206
207 top:
208 if t.PtrBytes == 0 {
209 throw("pointerless type")
210 }
211 if t.TFlag&abi.TFlagGCMaskOnDemand == 0 {
212
213 dst.write(t.GCData, t.PtrBytes/goarch.PtrSize)
214 return
215 }
216
217
218 switch t.Kind() {
219 case abi.Array:
220 a := t.ArrayType()
221 if a.Len == 1 {
222
223
224 t = a.Elem
225 goto top
226 }
227 e := a.Elem
228 for i := uintptr(0); i < a.Len; i++ {
229 buildGCMask(e, dst)
230 dst = dst.offset(e.Size_ / goarch.PtrSize)
231 }
232 case abi.Struct:
233 s := t.StructType()
234 var bigField abi.StructField
235 for _, f := range s.Fields {
236 ft := f.Typ
237 if !ft.Pointers() {
238 continue
239 }
240 if ft.Size_ > t.Size_/2 {
241
242
243
244 bigField = f
245 continue
246 }
247 buildGCMask(ft, dst.offset(f.Offset/goarch.PtrSize))
248 }
249 if bigField.Typ != nil {
250
251 t = bigField.Typ
252 dst = dst.offset(bigField.Offset / goarch.PtrSize)
253 goto top
254 }
255 default:
256 throw("unexpected kind")
257 }
258 }
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273 var reflectOffs struct {
274 lock mutex
275 next int32
276 m map[int32]unsafe.Pointer
277 minv map[unsafe.Pointer]int32
278 }
279
280 func reflectOffsLock() {
281 lock(&reflectOffs.lock)
282 if raceenabled {
283 raceacquire(unsafe.Pointer(&reflectOffs.lock))
284 }
285 }
286
287 func reflectOffsUnlock() {
288 if raceenabled {
289 racerelease(unsafe.Pointer(&reflectOffs.lock))
290 }
291 unlock(&reflectOffs.lock)
292 }
293
294 func resolveNameOff(ptrInModule unsafe.Pointer, off nameOff) name {
295 if off == 0 {
296 return name{}
297 }
298 base := uintptr(ptrInModule)
299 for md := &firstmoduledata; md != nil; md = md.next {
300 if base >= md.types && base < md.etypes {
301 res := md.types + uintptr(off)
302 if res > md.etypes {
303 println("runtime: nameOff", hex(off), "out of range", hex(md.types), "-", hex(md.etypes))
304 throw("runtime: name offset out of range")
305 }
306 return name{Bytes: (*byte)(unsafe.Pointer(res))}
307 }
308 }
309
310
311 reflectOffsLock()
312 res, found := reflectOffs.m[int32(off)]
313 reflectOffsUnlock()
314 if !found {
315 println("runtime: nameOff", hex(off), "base", hex(base), "not in ranges:")
316 for next := &firstmoduledata; next != nil; next = next.next {
317 println("\ttypes", hex(next.types), "etypes", hex(next.etypes))
318 }
319 throw("runtime: name offset base pointer out of range")
320 }
321 return name{Bytes: (*byte)(res)}
322 }
323
324 func (t rtype) nameOff(off nameOff) name {
325 return resolveNameOff(unsafe.Pointer(t.Type), off)
326 }
327
328 func resolveTypeOff(ptrInModule unsafe.Pointer, off typeOff) *_type {
329 if off == 0 || off == -1 {
330
331
332 return nil
333 }
334 base := uintptr(ptrInModule)
335 var md *moduledata
336 for next := &firstmoduledata; next != nil; next = next.next {
337 if base >= next.types && base < next.etypes {
338 md = next
339 break
340 }
341 }
342 if md == nil {
343 reflectOffsLock()
344 res := reflectOffs.m[int32(off)]
345 reflectOffsUnlock()
346 if res == nil {
347 println("runtime: typeOff", hex(off), "base", hex(base), "not in ranges:")
348 for next := &firstmoduledata; next != nil; next = next.next {
349 println("\ttypes", hex(next.types), "etypes", hex(next.etypes))
350 }
351 throw("runtime: type offset base pointer out of range")
352 }
353 return (*_type)(res)
354 }
355 res := md.types + uintptr(off)
356 resType := (*_type)(unsafe.Pointer(res))
357 if t := md.typemap[resType]; t != nil {
358 return t
359 }
360 if res > md.etypes {
361 println("runtime: typeOff", hex(off), "out of range", hex(md.types), "-", hex(md.etypes))
362 throw("runtime: type offset out of range")
363 }
364 return resType
365 }
366
367 func (t rtype) typeOff(off typeOff) *_type {
368 return resolveTypeOff(unsafe.Pointer(t.Type), off)
369 }
370
371 func (t rtype) textOff(off textOff) unsafe.Pointer {
372 if off == -1 {
373
374
375 return unsafe.Pointer(abi.FuncPCABIInternal(unreachableMethod))
376 }
377 base := uintptr(unsafe.Pointer(t.Type))
378 var md *moduledata
379 for next := &firstmoduledata; next != nil; next = next.next {
380 if base >= next.types && base < next.etypes {
381 md = next
382 break
383 }
384 }
385 if md == nil {
386 reflectOffsLock()
387 res := reflectOffs.m[int32(off)]
388 reflectOffsUnlock()
389 if res == nil {
390 println("runtime: textOff", hex(off), "base", hex(base), "not in ranges:")
391 for next := &firstmoduledata; next != nil; next = next.next {
392 println("\ttypes", hex(next.types), "etypes", hex(next.etypes))
393 }
394 throw("runtime: text offset base pointer out of range")
395 }
396 return res
397 }
398 res := md.textAddr(uint32(off))
399 return unsafe.Pointer(res)
400 }
401
402 type uncommontype = abi.UncommonType
403
404 type interfacetype = abi.InterfaceType
405
406 type arraytype = abi.ArrayType
407
408 type chantype = abi.ChanType
409
410 type slicetype = abi.SliceType
411
412 type functype = abi.FuncType
413
414 type ptrtype = abi.PtrType
415
416 type name = abi.Name
417
418 type structtype = abi.StructType
419
420 func pkgPath(n name) string {
421 if n.Bytes == nil || *n.Data(0)&(1<<2) == 0 {
422 return ""
423 }
424 i, l := n.ReadVarint(1)
425 off := 1 + i + l
426 if *n.Data(0)&(1<<1) != 0 {
427 i2, l2 := n.ReadVarint(off)
428 off += i2 + l2
429 }
430 var nameOff nameOff
431 copy((*[4]byte)(unsafe.Pointer(&nameOff))[:], (*[4]byte)(unsafe.Pointer(n.Data(off)))[:])
432 pkgPathName := resolveNameOff(unsafe.Pointer(n.Bytes), nameOff)
433 return pkgPathName.Name()
434 }
435
436
437
438 func typelinksinit() {
439 lockInit(&moduleToTypelinksLock, lockRankTypelinks)
440
441 if firstmoduledata.next == nil {
442 return
443 }
444
445 modules := activeModules()
446 prev := modules[0]
447 prevTypelinks := moduleTypelinks(modules[0])
448 typehash := make(map[uint32][]*_type, len(prevTypelinks))
449 for _, md := range modules[1:] {
450
451 collect:
452 for _, tl := range prevTypelinks {
453 t := tl
454 if prev.typemap != nil {
455 t = prev.typemap[tl]
456 }
457
458 tlist := typehash[t.Hash]
459 for _, tcur := range tlist {
460 if tcur == t {
461 continue collect
462 }
463 }
464 typehash[t.Hash] = append(tlist, t)
465 }
466
467 mdTypelinks := moduleTypelinks(md)
468
469 if md.typemap == nil {
470
471
472
473 tm := make(map[*_type]*_type, len(mdTypelinks))
474 pinnedTypemaps = append(pinnedTypemaps, tm)
475 md.typemap = tm
476 for _, t := range mdTypelinks {
477 set := t
478 for _, candidate := range typehash[t.Hash] {
479 seen := map[_typePair]struct{}{}
480 if typesEqual(t, candidate, seen) {
481 set = candidate
482 break
483 }
484 }
485 md.typemap[t] = set
486 }
487 }
488
489 prev = md
490 prevTypelinks = mdTypelinks
491 }
492 }
493
494
495
496 var (
497 moduleToTypelinks map[*moduledata][]*_type
498 moduleToTypelinksLock mutex
499 )
500
501
502
503
504
505
506
507
508 func moduleTypelinks(md *moduledata) []*_type {
509 lock(&moduleToTypelinksLock)
510
511 if typelinks, ok := moduleToTypelinks[md]; ok {
512 unlock(&moduleToTypelinksLock)
513 return typelinks
514 }
515
516
517 ret := make([]*_type, 0, md.typedesclen/(2*unsafe.Sizeof(_type{})))
518
519 td := md.types
520
521
522
523 td++
524
525 etypedesc := md.types + md.typedesclen
526 for td < etypedesc {
527
528
529 if GOARCH == "arm" {
530 td = alignUp(td, 0x8)
531 } else {
532 td = alignUp(td, 0x20)
533 }
534
535
536
537
538 typ := (*_type)(unsafe.Pointer(td))
539
540 ret = append(ret, typ)
541
542 var typSize, add uintptr
543 switch typ.Kind_ {
544 case abi.Array:
545 typSize = unsafe.Sizeof(abi.ArrayType{})
546 case abi.Chan:
547 typSize = unsafe.Sizeof(abi.ChanType{})
548 case abi.Func:
549 typSize = unsafe.Sizeof(abi.FuncType{})
550 ft := (*abi.FuncType)(unsafe.Pointer(typ))
551 add = uintptr(ft.NumIn()+ft.NumOut()) * goarch.PtrSize
552 case abi.Interface:
553 typSize = unsafe.Sizeof(abi.InterfaceType{})
554 it := (*abi.InterfaceType)(unsafe.Pointer(typ))
555 add = uintptr(len(it.Methods)) * unsafe.Sizeof(abi.Imethod{})
556 case abi.Map:
557 typSize = unsafe.Sizeof(abi.MapType{})
558 case abi.Pointer:
559 typSize = unsafe.Sizeof(abi.PtrType{})
560 case abi.Slice:
561 typSize = unsafe.Sizeof(abi.SliceType{})
562 case abi.Struct:
563 typSize = unsafe.Sizeof(abi.StructType{})
564 st := (*abi.StructType)(unsafe.Pointer(typ))
565 add = uintptr(len(st.Fields)) * unsafe.Sizeof(abi.StructField{})
566
567 case abi.Bool,
568 abi.Int, abi.Int8, abi.Int16, abi.Int32, abi.Int64,
569 abi.Uint, abi.Uint8, abi.Uint16, abi.Uint32, abi.Uint64, abi.Uintptr,
570 abi.Float32, abi.Float64,
571 abi.Complex64, abi.Complex128,
572 abi.String,
573 abi.UnsafePointer:
574
575 typSize = unsafe.Sizeof(_type{})
576
577 default:
578 println("type descriptor at", hex(td), "is kind", typ.Kind_)
579 throw("invalid type descriptor")
580 }
581
582 td += typSize
583
584 mcount := uintptr(0)
585 if typ.TFlag&abi.TFlagUncommon != 0 {
586 ut := (*abi.UncommonType)(unsafe.Pointer(td))
587 mcount = uintptr(ut.Mcount)
588 td += unsafe.Sizeof(abi.UncommonType{})
589 }
590
591 td += add
592
593 if mcount > 0 {
594 td += mcount * unsafe.Sizeof(abi.Method{})
595 }
596 }
597
598 if moduleToTypelinks == nil {
599 moduleToTypelinks = make(map[*moduledata][]*_type)
600 }
601 moduleToTypelinks[md] = ret
602
603 unlock(&moduleToTypelinksLock)
604 return ret
605 }
606
607 type _typePair struct {
608 t1 *_type
609 t2 *_type
610 }
611
612 func toRType(t *abi.Type) rtype {
613 return rtype{t}
614 }
615
616
617
618
619
620
621
622
623
624
625
626
627
628 func typesEqual(t, v *_type, seen map[_typePair]struct{}) bool {
629 tp := _typePair{t, v}
630 if _, ok := seen[tp]; ok {
631 return true
632 }
633
634
635
636
637 seen[tp] = struct{}{}
638
639 if t == v {
640 return true
641 }
642 kind := t.Kind()
643 if kind != v.Kind() {
644 return false
645 }
646 rt, rv := toRType(t), toRType(v)
647 if rt.string() != rv.string() {
648 return false
649 }
650 ut := t.Uncommon()
651 uv := v.Uncommon()
652 if ut != nil || uv != nil {
653 if ut == nil || uv == nil {
654 return false
655 }
656 pkgpatht := rt.nameOff(ut.PkgPath).Name()
657 pkgpathv := rv.nameOff(uv.PkgPath).Name()
658 if pkgpatht != pkgpathv {
659 return false
660 }
661 }
662 if abi.Bool <= kind && kind <= abi.Complex128 {
663 return true
664 }
665 switch kind {
666 case abi.String, abi.UnsafePointer:
667 return true
668 case abi.Array:
669 at := (*arraytype)(unsafe.Pointer(t))
670 av := (*arraytype)(unsafe.Pointer(v))
671 return typesEqual(at.Elem, av.Elem, seen) && at.Len == av.Len
672 case abi.Chan:
673 ct := (*chantype)(unsafe.Pointer(t))
674 cv := (*chantype)(unsafe.Pointer(v))
675 return ct.Dir == cv.Dir && typesEqual(ct.Elem, cv.Elem, seen)
676 case abi.Func:
677 ft := (*functype)(unsafe.Pointer(t))
678 fv := (*functype)(unsafe.Pointer(v))
679 if ft.OutCount != fv.OutCount || ft.InCount != fv.InCount {
680 return false
681 }
682 tin, vin := ft.InSlice(), fv.InSlice()
683 for i := 0; i < len(tin); i++ {
684 if !typesEqual(tin[i], vin[i], seen) {
685 return false
686 }
687 }
688 tout, vout := ft.OutSlice(), fv.OutSlice()
689 for i := 0; i < len(tout); i++ {
690 if !typesEqual(tout[i], vout[i], seen) {
691 return false
692 }
693 }
694 return true
695 case abi.Interface:
696 it := (*interfacetype)(unsafe.Pointer(t))
697 iv := (*interfacetype)(unsafe.Pointer(v))
698 if it.PkgPath.Name() != iv.PkgPath.Name() {
699 return false
700 }
701 if len(it.Methods) != len(iv.Methods) {
702 return false
703 }
704 for i := range it.Methods {
705 tm := &it.Methods[i]
706 vm := &iv.Methods[i]
707
708
709 tname := resolveNameOff(unsafe.Pointer(tm), tm.Name)
710 vname := resolveNameOff(unsafe.Pointer(vm), vm.Name)
711 if tname.Name() != vname.Name() {
712 return false
713 }
714 if pkgPath(tname) != pkgPath(vname) {
715 return false
716 }
717 tityp := resolveTypeOff(unsafe.Pointer(tm), tm.Typ)
718 vityp := resolveTypeOff(unsafe.Pointer(vm), vm.Typ)
719 if !typesEqual(tityp, vityp, seen) {
720 return false
721 }
722 }
723 return true
724 case abi.Map:
725 mt := (*abi.MapType)(unsafe.Pointer(t))
726 mv := (*abi.MapType)(unsafe.Pointer(v))
727 return typesEqual(mt.Key, mv.Key, seen) && typesEqual(mt.Elem, mv.Elem, seen)
728 case abi.Pointer:
729 pt := (*ptrtype)(unsafe.Pointer(t))
730 pv := (*ptrtype)(unsafe.Pointer(v))
731 return typesEqual(pt.Elem, pv.Elem, seen)
732 case abi.Slice:
733 st := (*slicetype)(unsafe.Pointer(t))
734 sv := (*slicetype)(unsafe.Pointer(v))
735 return typesEqual(st.Elem, sv.Elem, seen)
736 case abi.Struct:
737 st := (*structtype)(unsafe.Pointer(t))
738 sv := (*structtype)(unsafe.Pointer(v))
739 if len(st.Fields) != len(sv.Fields) {
740 return false
741 }
742 if st.PkgPath.Name() != sv.PkgPath.Name() {
743 return false
744 }
745 for i := range st.Fields {
746 tf := &st.Fields[i]
747 vf := &sv.Fields[i]
748 if tf.Name.Name() != vf.Name.Name() {
749 return false
750 }
751 if !typesEqual(tf.Typ, vf.Typ, seen) {
752 return false
753 }
754 if tf.Name.Tag() != vf.Name.Tag() {
755 return false
756 }
757 if tf.Offset != vf.Offset {
758 return false
759 }
760 if tf.Name.IsEmbedded() != vf.Name.IsEmbedded() {
761 return false
762 }
763 }
764 return true
765 default:
766 println("runtime: impossible type kind", kind)
767 throw("runtime: impossible type kind")
768 return false
769 }
770 }
771
View as plain text