Source file src/runtime/type.go

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Runtime type representation.
     6  
     7  package runtime
     8  
     9  import (
    10  	"internal/abi"
    11  	"internal/goarch"
    12  	"internal/runtime/atomic"
    13  	"unsafe"
    14  )
    15  
    16  //go:linkname maps_typeString internal/runtime/maps.typeString
    17  func maps_typeString(typ *abi.Type) string {
    18  	return toRType(typ).string()
    19  }
    20  
    21  type nameOff = abi.NameOff
    22  type typeOff = abi.TypeOff
    23  type textOff = abi.TextOff
    24  
    25  type _type = abi.Type
    26  
    27  // rtype is a wrapper that allows us to define additional methods.
    28  type rtype struct {
    29  	*abi.Type // embedding is okay here (unlike reflect) because none of this is public
    30  }
    31  
    32  func (t rtype) string() string {
    33  	s := t.nameOff(t.Str).Name()
    34  	if t.TFlag&abi.TFlagExtraStar != 0 {
    35  		return s[1:]
    36  	}
    37  	return s
    38  }
    39  
    40  func (t rtype) uncommon() *uncommontype {
    41  	return t.Uncommon()
    42  }
    43  
    44  func (t rtype) name() string {
    45  	if t.TFlag&abi.TFlagNamed == 0 {
    46  		return ""
    47  	}
    48  	s := t.string()
    49  	i := len(s) - 1
    50  	sqBrackets := 0
    51  	for i >= 0 && (s[i] != '.' || sqBrackets != 0) {
    52  		switch s[i] {
    53  		case ']':
    54  			sqBrackets++
    55  		case '[':
    56  			sqBrackets--
    57  		}
    58  		i--
    59  	}
    60  	return s[i+1:]
    61  }
    62  
    63  // pkgpath returns the path of the package where t was defined, if
    64  // available. This is not the same as the reflect package's PkgPath
    65  // method, in that it returns the package path for struct and interface
    66  // types, not just named types.
    67  func (t rtype) pkgpath() string {
    68  	if u := t.uncommon(); u != nil {
    69  		return t.nameOff(u.PkgPath).Name()
    70  	}
    71  	switch t.Kind() {
    72  	case abi.Struct:
    73  		st := (*structtype)(unsafe.Pointer(t.Type))
    74  		return st.PkgPath.Name()
    75  	case abi.Interface:
    76  		it := (*interfacetype)(unsafe.Pointer(t.Type))
    77  		return it.PkgPath.Name()
    78  	}
    79  	return ""
    80  }
    81  
    82  // getGCMask returns the pointer/nonpointer bitmask for type t.
    83  //
    84  // nosplit because it is used during write barriers and must not be preempted.
    85  //
    86  //go:nosplit
    87  func getGCMask(t *_type) *byte {
    88  	if t.TFlag&abi.TFlagGCMaskOnDemand != 0 {
    89  		// Split the rest into getGCMaskOnDemand so getGCMask itself is inlineable.
    90  		return getGCMaskOnDemand(t)
    91  	}
    92  	return t.GCData
    93  }
    94  
    95  // inProgress is a byte whose address is a sentinel indicating that
    96  // some thread is currently building the GC bitmask for a type.
    97  var inProgress byte
    98  
    99  // nosplit because it is used during write barriers and must not be preempted.
   100  //
   101  //go:nosplit
   102  func getGCMaskOnDemand(t *_type) *byte {
   103  	// For large types, GCData doesn't point directly to a bitmask.
   104  	// Instead it points to a pointer to a bitmask, and the runtime
   105  	// is responsible for (on first use) creating the bitmask and
   106  	// storing a pointer to it in that slot.
   107  	// TODO: we could use &t.GCData as the slot, but types are
   108  	// in read-only memory currently.
   109  	addr := unsafe.Pointer(t.GCData)
   110  
   111  	if GOOS == "aix" {
   112  		addr = add(addr, firstmoduledata.data-aixStaticDataBase)
   113  	}
   114  
   115  	for {
   116  		p := (*byte)(atomic.Loadp(addr))
   117  		switch p {
   118  		default: // Already built.
   119  			return p
   120  		case &inProgress: // Someone else is currently building it.
   121  			// Just wait until the builder is done.
   122  			// We can't block here, so spinning while having
   123  			// the OS thread yield is about the best we can do.
   124  			osyield()
   125  			continue
   126  		case nil: // Not built yet.
   127  			// Attempt to get exclusive access to build it.
   128  			if !atomic.Casp1((*unsafe.Pointer)(addr), nil, unsafe.Pointer(&inProgress)) {
   129  				continue
   130  			}
   131  
   132  			// Build gcmask for this type.
   133  			bytes := goarch.PtrSize * divRoundUp(t.PtrBytes/goarch.PtrSize, 8*goarch.PtrSize)
   134  			p = (*byte)(persistentalloc(bytes, goarch.PtrSize, &memstats.other_sys))
   135  			systemstack(func() {
   136  				buildGCMask(t, bitCursor{ptr: p, n: 0})
   137  			})
   138  
   139  			// Store the newly-built gcmask for future callers.
   140  			atomic.StorepNoWB(addr, unsafe.Pointer(p))
   141  			return p
   142  		}
   143  	}
   144  }
   145  
   146  // A bitCursor is a simple cursor to memory to which we
   147  // can write a set of bits.
   148  type bitCursor struct {
   149  	ptr *byte   // base of region
   150  	n   uintptr // cursor points to bit n of region
   151  }
   152  
   153  // Write to b cnt bits starting at bit 0 of data.
   154  // Requires cnt>0.
   155  func (b bitCursor) write(data *byte, cnt uintptr) {
   156  	// Starting byte for writing.
   157  	p := addb(b.ptr, b.n/8)
   158  
   159  	// Note: if we're starting halfway through a byte, we load the
   160  	// existing lower bits so we don't clobber them.
   161  	n := b.n % 8                    // # of valid bits in buf
   162  	buf := uintptr(*p) & (1<<n - 1) // buffered bits to start
   163  
   164  	// Work 8 bits at a time.
   165  	for cnt > 8 {
   166  		// Read 8 more bits, now buf has 8-15 valid bits in it.
   167  		buf |= uintptr(*data) << n
   168  		n += 8
   169  		data = addb(data, 1)
   170  		cnt -= 8
   171  		// Write 8 of the buffered bits out.
   172  		*p = byte(buf)
   173  		buf >>= 8
   174  		n -= 8
   175  		p = addb(p, 1)
   176  	}
   177  	// Read remaining bits.
   178  	buf |= (uintptr(*data) & (1<<cnt - 1)) << n
   179  	n += cnt
   180  
   181  	// Flush remaining bits.
   182  	if n > 8 {
   183  		*p = byte(buf)
   184  		buf >>= 8
   185  		n -= 8
   186  		p = addb(p, 1)
   187  	}
   188  	*p &^= 1<<n - 1
   189  	*p |= byte(buf)
   190  }
   191  
   192  func (b bitCursor) offset(cnt uintptr) bitCursor {
   193  	return bitCursor{ptr: b.ptr, n: b.n + cnt}
   194  }
   195  
   196  // buildGCMask writes the ptr/nonptr bitmap for t to dst.
   197  // t must have a pointer.
   198  func buildGCMask(t *_type, dst bitCursor) {
   199  	// Note: we want to avoid a situation where buildGCMask gets into a
   200  	// very deep recursion, because M stacks are fixed size and pretty small
   201  	// (16KB). We do that by ensuring that any recursive
   202  	// call operates on a type at most half the size of its parent.
   203  	// Thus, the recursive chain can be at most 64 calls deep (on a
   204  	// 64-bit machine).
   205  	// Recursion is avoided by using a "tail call" (jumping to the
   206  	// "top" label) for any recursive call with a large subtype.
   207  top:
   208  	if t.PtrBytes == 0 {
   209  		throw("pointerless type")
   210  	}
   211  	if t.TFlag&abi.TFlagGCMaskOnDemand == 0 {
   212  		// copy t.GCData to dst
   213  		dst.write(t.GCData, t.PtrBytes/goarch.PtrSize)
   214  		return
   215  	}
   216  	// The above case should handle all kinds except
   217  	// possibly arrays and structs.
   218  	switch t.Kind() {
   219  	case abi.Array:
   220  		a := t.ArrayType()
   221  		if a.Len == 1 {
   222  			// Avoid recursive call for element type that
   223  			// isn't smaller than the parent type.
   224  			t = a.Elem
   225  			goto top
   226  		}
   227  		e := a.Elem
   228  		for i := uintptr(0); i < a.Len; i++ {
   229  			buildGCMask(e, dst)
   230  			dst = dst.offset(e.Size_ / goarch.PtrSize)
   231  		}
   232  	case abi.Struct:
   233  		s := t.StructType()
   234  		var bigField abi.StructField
   235  		for _, f := range s.Fields {
   236  			ft := f.Typ
   237  			if !ft.Pointers() {
   238  				continue
   239  			}
   240  			if ft.Size_ > t.Size_/2 {
   241  				// Avoid recursive call for field type that
   242  				// is larger than half of the parent type.
   243  				// There can be only one.
   244  				bigField = f
   245  				continue
   246  			}
   247  			buildGCMask(ft, dst.offset(f.Offset/goarch.PtrSize))
   248  		}
   249  		if bigField.Typ != nil {
   250  			// Note: this case causes bits to be written out of order.
   251  			t = bigField.Typ
   252  			dst = dst.offset(bigField.Offset / goarch.PtrSize)
   253  			goto top
   254  		}
   255  	default:
   256  		throw("unexpected kind")
   257  	}
   258  }
   259  
   260  // reflectOffs holds type offsets defined at run time by the reflect package.
   261  //
   262  // When a type is defined at run time, its *rtype data lives on the heap.
   263  // There are a wide range of possible addresses the heap may use, that
   264  // may not be representable as a 32-bit offset. Moreover the GC may
   265  // one day start moving heap memory, in which case there is no stable
   266  // offset that can be defined.
   267  //
   268  // To provide stable offsets, we add pin *rtype objects in a global map
   269  // and treat the offset as an identifier. We use negative offsets that
   270  // do not overlap with any compile-time module offsets.
   271  //
   272  // Entries are created by reflect.addReflectOff.
   273  var reflectOffs struct {
   274  	lock mutex
   275  	next int32
   276  	m    map[int32]unsafe.Pointer
   277  	minv map[unsafe.Pointer]int32
   278  }
   279  
   280  func reflectOffsLock() {
   281  	lock(&reflectOffs.lock)
   282  	if raceenabled {
   283  		raceacquire(unsafe.Pointer(&reflectOffs.lock))
   284  	}
   285  }
   286  
   287  func reflectOffsUnlock() {
   288  	if raceenabled {
   289  		racerelease(unsafe.Pointer(&reflectOffs.lock))
   290  	}
   291  	unlock(&reflectOffs.lock)
   292  }
   293  
   294  func resolveNameOff(ptrInModule unsafe.Pointer, off nameOff) name {
   295  	if off == 0 {
   296  		return name{}
   297  	}
   298  	base := uintptr(ptrInModule)
   299  	for md := &firstmoduledata; md != nil; md = md.next {
   300  		if base >= md.types && base < md.etypes {
   301  			res := md.types + uintptr(off)
   302  			if res > md.etypes {
   303  				println("runtime: nameOff", hex(off), "out of range", hex(md.types), "-", hex(md.etypes))
   304  				throw("runtime: name offset out of range")
   305  			}
   306  			return name{Bytes: (*byte)(unsafe.Pointer(res))}
   307  		}
   308  	}
   309  
   310  	// No module found. see if it is a run time name.
   311  	reflectOffsLock()
   312  	res, found := reflectOffs.m[int32(off)]
   313  	reflectOffsUnlock()
   314  	if !found {
   315  		println("runtime: nameOff", hex(off), "base", hex(base), "not in ranges:")
   316  		for next := &firstmoduledata; next != nil; next = next.next {
   317  			println("\ttypes", hex(next.types), "etypes", hex(next.etypes))
   318  		}
   319  		throw("runtime: name offset base pointer out of range")
   320  	}
   321  	return name{Bytes: (*byte)(res)}
   322  }
   323  
   324  func (t rtype) nameOff(off nameOff) name {
   325  	return resolveNameOff(unsafe.Pointer(t.Type), off)
   326  }
   327  
   328  func resolveTypeOff(ptrInModule unsafe.Pointer, off typeOff) *_type {
   329  	if off == 0 || off == -1 {
   330  		// -1 is the sentinel value for unreachable code.
   331  		// See cmd/link/internal/ld/data.go:relocsym.
   332  		return nil
   333  	}
   334  	base := uintptr(ptrInModule)
   335  	var md *moduledata
   336  	for next := &firstmoduledata; next != nil; next = next.next {
   337  		if base >= next.types && base < next.etypes {
   338  			md = next
   339  			break
   340  		}
   341  	}
   342  	if md == nil {
   343  		reflectOffsLock()
   344  		res := reflectOffs.m[int32(off)]
   345  		reflectOffsUnlock()
   346  		if res == nil {
   347  			println("runtime: typeOff", hex(off), "base", hex(base), "not in ranges:")
   348  			for next := &firstmoduledata; next != nil; next = next.next {
   349  				println("\ttypes", hex(next.types), "etypes", hex(next.etypes))
   350  			}
   351  			throw("runtime: type offset base pointer out of range")
   352  		}
   353  		return (*_type)(res)
   354  	}
   355  	res := md.types + uintptr(off)
   356  	resType := (*_type)(unsafe.Pointer(res))
   357  	if t := md.typemap[resType]; t != nil {
   358  		return t
   359  	}
   360  	if res > md.etypes {
   361  		println("runtime: typeOff", hex(off), "out of range", hex(md.types), "-", hex(md.etypes))
   362  		throw("runtime: type offset out of range")
   363  	}
   364  	return resType
   365  }
   366  
   367  func (t rtype) typeOff(off typeOff) *_type {
   368  	return resolveTypeOff(unsafe.Pointer(t.Type), off)
   369  }
   370  
   371  func (t rtype) textOff(off textOff) unsafe.Pointer {
   372  	if off == -1 {
   373  		// -1 is the sentinel value for unreachable code.
   374  		// See cmd/link/internal/ld/data.go:relocsym.
   375  		return unsafe.Pointer(abi.FuncPCABIInternal(unreachableMethod))
   376  	}
   377  	base := uintptr(unsafe.Pointer(t.Type))
   378  	var md *moduledata
   379  	for next := &firstmoduledata; next != nil; next = next.next {
   380  		if base >= next.types && base < next.etypes {
   381  			md = next
   382  			break
   383  		}
   384  	}
   385  	if md == nil {
   386  		reflectOffsLock()
   387  		res := reflectOffs.m[int32(off)]
   388  		reflectOffsUnlock()
   389  		if res == nil {
   390  			println("runtime: textOff", hex(off), "base", hex(base), "not in ranges:")
   391  			for next := &firstmoduledata; next != nil; next = next.next {
   392  				println("\ttypes", hex(next.types), "etypes", hex(next.etypes))
   393  			}
   394  			throw("runtime: text offset base pointer out of range")
   395  		}
   396  		return res
   397  	}
   398  	res := md.textAddr(uint32(off))
   399  	return unsafe.Pointer(res)
   400  }
   401  
   402  type uncommontype = abi.UncommonType
   403  
   404  type interfacetype = abi.InterfaceType
   405  
   406  type arraytype = abi.ArrayType
   407  
   408  type chantype = abi.ChanType
   409  
   410  type slicetype = abi.SliceType
   411  
   412  type functype = abi.FuncType
   413  
   414  type ptrtype = abi.PtrType
   415  
   416  type name = abi.Name
   417  
   418  type structtype = abi.StructType
   419  
   420  func pkgPath(n name) string {
   421  	if n.Bytes == nil || *n.Data(0)&(1<<2) == 0 {
   422  		return ""
   423  	}
   424  	i, l := n.ReadVarint(1)
   425  	off := 1 + i + l
   426  	if *n.Data(0)&(1<<1) != 0 {
   427  		i2, l2 := n.ReadVarint(off)
   428  		off += i2 + l2
   429  	}
   430  	var nameOff nameOff
   431  	copy((*[4]byte)(unsafe.Pointer(&nameOff))[:], (*[4]byte)(unsafe.Pointer(n.Data(off)))[:])
   432  	pkgPathName := resolveNameOff(unsafe.Pointer(n.Bytes), nameOff)
   433  	return pkgPathName.Name()
   434  }
   435  
   436  // typelinksinit scans the types from extra modules and builds the
   437  // moduledata typemap used to de-duplicate type pointers.
   438  func typelinksinit() {
   439  	lockInit(&moduleToTypelinksLock, lockRankTypelinks)
   440  
   441  	if firstmoduledata.next == nil {
   442  		return
   443  	}
   444  
   445  	modules := activeModules()
   446  	prev := modules[0]
   447  	prevTypelinks := moduleTypelinks(modules[0])
   448  	typehash := make(map[uint32][]*_type, len(prevTypelinks))
   449  	for _, md := range modules[1:] {
   450  		// Collect types from the previous module into typehash.
   451  	collect:
   452  		for _, tl := range prevTypelinks {
   453  			t := tl
   454  			if prev.typemap != nil {
   455  				t = prev.typemap[tl]
   456  			}
   457  			// Add to typehash if not seen before.
   458  			tlist := typehash[t.Hash]
   459  			for _, tcur := range tlist {
   460  				if tcur == t {
   461  					continue collect
   462  				}
   463  			}
   464  			typehash[t.Hash] = append(tlist, t)
   465  		}
   466  
   467  		mdTypelinks := moduleTypelinks(md)
   468  
   469  		if md.typemap == nil {
   470  			// If any of this module's typelinks match a type from a
   471  			// prior module, prefer that prior type by adding the offset
   472  			// to this module's typemap.
   473  			tm := make(map[*_type]*_type, len(mdTypelinks))
   474  			pinnedTypemaps = append(pinnedTypemaps, tm)
   475  			md.typemap = tm
   476  			for _, t := range mdTypelinks {
   477  				set := t
   478  				for _, candidate := range typehash[t.Hash] {
   479  					seen := map[_typePair]struct{}{}
   480  					if typesEqual(t, candidate, seen) {
   481  						set = candidate
   482  						break
   483  					}
   484  				}
   485  				md.typemap[t] = set
   486  			}
   487  		}
   488  
   489  		prev = md
   490  		prevTypelinks = mdTypelinks
   491  	}
   492  }
   493  
   494  // moduleToTypelinks maps from moduledata to typelinks.
   495  // We build this lazily as needed, since most programs do not need it.
   496  var (
   497  	moduleToTypelinks     map[*moduledata][]*_type
   498  	moduleToTypelinksLock mutex
   499  )
   500  
   501  // moduleTypelinks takes a moduledata and returns the type
   502  // descriptors that the reflect package needs to know about.
   503  // These are the typelinks. They are the types that the user
   504  // can construct. This is used to ensure that we use a unique
   505  // type descriptor for all types. The returned types are sorted
   506  // by type string; the sorting is done by the linker.
   507  // This slice is constructed as needed.
   508  func moduleTypelinks(md *moduledata) []*_type {
   509  	lock(&moduleToTypelinksLock)
   510  
   511  	if typelinks, ok := moduleToTypelinks[md]; ok {
   512  		unlock(&moduleToTypelinksLock)
   513  		return typelinks
   514  	}
   515  
   516  	// Allocate a very rough estimate of the number of types.
   517  	ret := make([]*_type, 0, md.typedesclen/(2*unsafe.Sizeof(_type{})))
   518  
   519  	td := md.types
   520  
   521  	// We have to increment by 1 to match the increment done in
   522  	// cmd/link/internal/data.go createRelroSect in allocateDataSections.
   523  	td++
   524  
   525  	etypedesc := md.types + md.typedesclen
   526  	for td < etypedesc {
   527  		// TODO: The fact that type descriptors are aligned to
   528  		// 0x20 does not make sense.
   529  		if GOARCH == "arm" {
   530  			td = alignUp(td, 0x8)
   531  		} else {
   532  			td = alignUp(td, 0x20)
   533  		}
   534  
   535  		// This code must match the data structures built by
   536  		// cmd/compile/internal/reflectdata/reflect.go:writeType.
   537  
   538  		typ := (*_type)(unsafe.Pointer(td))
   539  
   540  		ret = append(ret, typ)
   541  
   542  		var typSize, add uintptr
   543  		switch typ.Kind_ {
   544  		case abi.Array:
   545  			typSize = unsafe.Sizeof(abi.ArrayType{})
   546  		case abi.Chan:
   547  			typSize = unsafe.Sizeof(abi.ChanType{})
   548  		case abi.Func:
   549  			typSize = unsafe.Sizeof(abi.FuncType{})
   550  			ft := (*abi.FuncType)(unsafe.Pointer(typ))
   551  			add = uintptr(ft.NumIn()+ft.NumOut()) * goarch.PtrSize
   552  		case abi.Interface:
   553  			typSize = unsafe.Sizeof(abi.InterfaceType{})
   554  			it := (*abi.InterfaceType)(unsafe.Pointer(typ))
   555  			add = uintptr(len(it.Methods)) * unsafe.Sizeof(abi.Imethod{})
   556  		case abi.Map:
   557  			typSize = unsafe.Sizeof(abi.MapType{})
   558  		case abi.Pointer:
   559  			typSize = unsafe.Sizeof(abi.PtrType{})
   560  		case abi.Slice:
   561  			typSize = unsafe.Sizeof(abi.SliceType{})
   562  		case abi.Struct:
   563  			typSize = unsafe.Sizeof(abi.StructType{})
   564  			st := (*abi.StructType)(unsafe.Pointer(typ))
   565  			add = uintptr(len(st.Fields)) * unsafe.Sizeof(abi.StructField{})
   566  
   567  		case abi.Bool,
   568  			abi.Int, abi.Int8, abi.Int16, abi.Int32, abi.Int64,
   569  			abi.Uint, abi.Uint8, abi.Uint16, abi.Uint32, abi.Uint64, abi.Uintptr,
   570  			abi.Float32, abi.Float64,
   571  			abi.Complex64, abi.Complex128,
   572  			abi.String,
   573  			abi.UnsafePointer:
   574  
   575  			typSize = unsafe.Sizeof(_type{})
   576  
   577  		default:
   578  			println("type descriptor at", hex(td), "is kind", typ.Kind_)
   579  			throw("invalid type descriptor")
   580  		}
   581  
   582  		td += typSize
   583  
   584  		mcount := uintptr(0)
   585  		if typ.TFlag&abi.TFlagUncommon != 0 {
   586  			ut := (*abi.UncommonType)(unsafe.Pointer(td))
   587  			mcount = uintptr(ut.Mcount)
   588  			td += unsafe.Sizeof(abi.UncommonType{})
   589  		}
   590  
   591  		td += add
   592  
   593  		if mcount > 0 {
   594  			td += mcount * unsafe.Sizeof(abi.Method{})
   595  		}
   596  	}
   597  
   598  	if moduleToTypelinks == nil {
   599  		moduleToTypelinks = make(map[*moduledata][]*_type)
   600  	}
   601  	moduleToTypelinks[md] = ret
   602  
   603  	unlock(&moduleToTypelinksLock)
   604  	return ret
   605  }
   606  
   607  type _typePair struct {
   608  	t1 *_type
   609  	t2 *_type
   610  }
   611  
   612  func toRType(t *abi.Type) rtype {
   613  	return rtype{t}
   614  }
   615  
   616  // typesEqual reports whether two types are equal.
   617  //
   618  // Everywhere in the runtime and reflect packages, it is assumed that
   619  // there is exactly one *_type per Go type, so that pointer equality
   620  // can be used to test if types are equal. There is one place that
   621  // breaks this assumption: buildmode=shared. In this case a type can
   622  // appear as two different pieces of memory. This is hidden from the
   623  // runtime and reflect package by the per-module typemap built in
   624  // typelinksinit. It uses typesEqual to map types from later modules
   625  // back into earlier ones.
   626  //
   627  // Only typelinksinit needs this function.
   628  func typesEqual(t, v *_type, seen map[_typePair]struct{}) bool {
   629  	tp := _typePair{t, v}
   630  	if _, ok := seen[tp]; ok {
   631  		return true
   632  	}
   633  
   634  	// mark these types as seen, and thus equivalent which prevents an infinite loop if
   635  	// the two types are identical, but recursively defined and loaded from
   636  	// different modules
   637  	seen[tp] = struct{}{}
   638  
   639  	if t == v {
   640  		return true
   641  	}
   642  	kind := t.Kind()
   643  	if kind != v.Kind() {
   644  		return false
   645  	}
   646  	rt, rv := toRType(t), toRType(v)
   647  	if rt.string() != rv.string() {
   648  		return false
   649  	}
   650  	ut := t.Uncommon()
   651  	uv := v.Uncommon()
   652  	if ut != nil || uv != nil {
   653  		if ut == nil || uv == nil {
   654  			return false
   655  		}
   656  		pkgpatht := rt.nameOff(ut.PkgPath).Name()
   657  		pkgpathv := rv.nameOff(uv.PkgPath).Name()
   658  		if pkgpatht != pkgpathv {
   659  			return false
   660  		}
   661  	}
   662  	if abi.Bool <= kind && kind <= abi.Complex128 {
   663  		return true
   664  	}
   665  	switch kind {
   666  	case abi.String, abi.UnsafePointer:
   667  		return true
   668  	case abi.Array:
   669  		at := (*arraytype)(unsafe.Pointer(t))
   670  		av := (*arraytype)(unsafe.Pointer(v))
   671  		return typesEqual(at.Elem, av.Elem, seen) && at.Len == av.Len
   672  	case abi.Chan:
   673  		ct := (*chantype)(unsafe.Pointer(t))
   674  		cv := (*chantype)(unsafe.Pointer(v))
   675  		return ct.Dir == cv.Dir && typesEqual(ct.Elem, cv.Elem, seen)
   676  	case abi.Func:
   677  		ft := (*functype)(unsafe.Pointer(t))
   678  		fv := (*functype)(unsafe.Pointer(v))
   679  		if ft.OutCount != fv.OutCount || ft.InCount != fv.InCount {
   680  			return false
   681  		}
   682  		tin, vin := ft.InSlice(), fv.InSlice()
   683  		for i := 0; i < len(tin); i++ {
   684  			if !typesEqual(tin[i], vin[i], seen) {
   685  				return false
   686  			}
   687  		}
   688  		tout, vout := ft.OutSlice(), fv.OutSlice()
   689  		for i := 0; i < len(tout); i++ {
   690  			if !typesEqual(tout[i], vout[i], seen) {
   691  				return false
   692  			}
   693  		}
   694  		return true
   695  	case abi.Interface:
   696  		it := (*interfacetype)(unsafe.Pointer(t))
   697  		iv := (*interfacetype)(unsafe.Pointer(v))
   698  		if it.PkgPath.Name() != iv.PkgPath.Name() {
   699  			return false
   700  		}
   701  		if len(it.Methods) != len(iv.Methods) {
   702  			return false
   703  		}
   704  		for i := range it.Methods {
   705  			tm := &it.Methods[i]
   706  			vm := &iv.Methods[i]
   707  			// Note the mhdr array can be relocated from
   708  			// another module. See #17724.
   709  			tname := resolveNameOff(unsafe.Pointer(tm), tm.Name)
   710  			vname := resolveNameOff(unsafe.Pointer(vm), vm.Name)
   711  			if tname.Name() != vname.Name() {
   712  				return false
   713  			}
   714  			if pkgPath(tname) != pkgPath(vname) {
   715  				return false
   716  			}
   717  			tityp := resolveTypeOff(unsafe.Pointer(tm), tm.Typ)
   718  			vityp := resolveTypeOff(unsafe.Pointer(vm), vm.Typ)
   719  			if !typesEqual(tityp, vityp, seen) {
   720  				return false
   721  			}
   722  		}
   723  		return true
   724  	case abi.Map:
   725  		mt := (*abi.MapType)(unsafe.Pointer(t))
   726  		mv := (*abi.MapType)(unsafe.Pointer(v))
   727  		return typesEqual(mt.Key, mv.Key, seen) && typesEqual(mt.Elem, mv.Elem, seen)
   728  	case abi.Pointer:
   729  		pt := (*ptrtype)(unsafe.Pointer(t))
   730  		pv := (*ptrtype)(unsafe.Pointer(v))
   731  		return typesEqual(pt.Elem, pv.Elem, seen)
   732  	case abi.Slice:
   733  		st := (*slicetype)(unsafe.Pointer(t))
   734  		sv := (*slicetype)(unsafe.Pointer(v))
   735  		return typesEqual(st.Elem, sv.Elem, seen)
   736  	case abi.Struct:
   737  		st := (*structtype)(unsafe.Pointer(t))
   738  		sv := (*structtype)(unsafe.Pointer(v))
   739  		if len(st.Fields) != len(sv.Fields) {
   740  			return false
   741  		}
   742  		if st.PkgPath.Name() != sv.PkgPath.Name() {
   743  			return false
   744  		}
   745  		for i := range st.Fields {
   746  			tf := &st.Fields[i]
   747  			vf := &sv.Fields[i]
   748  			if tf.Name.Name() != vf.Name.Name() {
   749  				return false
   750  			}
   751  			if !typesEqual(tf.Typ, vf.Typ, seen) {
   752  				return false
   753  			}
   754  			if tf.Name.Tag() != vf.Name.Tag() {
   755  				return false
   756  			}
   757  			if tf.Offset != vf.Offset {
   758  				return false
   759  			}
   760  			if tf.Name.IsEmbedded() != vf.Name.IsEmbedded() {
   761  				return false
   762  			}
   763  		}
   764  		return true
   765  	default:
   766  		println("runtime: impossible type kind", kind)
   767  		throw("runtime: impossible type kind")
   768  		return false
   769  	}
   770  }
   771  

View as plain text