Source file src/runtime/type.go

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Runtime type representation.
     6  
     7  package runtime
     8  
     9  import (
    10  	"internal/abi"
    11  	"internal/goarch"
    12  	"internal/goexperiment"
    13  	"internal/runtime/atomic"
    14  	"unsafe"
    15  )
    16  
    17  //go:linkname maps_typeString internal/runtime/maps.typeString
    18  func maps_typeString(typ *abi.Type) string {
    19  	return toRType(typ).string()
    20  }
    21  
    22  type nameOff = abi.NameOff
    23  type typeOff = abi.TypeOff
    24  type textOff = abi.TextOff
    25  
    26  type _type = abi.Type
    27  
    28  // rtype is a wrapper that allows us to define additional methods.
    29  type rtype struct {
    30  	*abi.Type // embedding is okay here (unlike reflect) because none of this is public
    31  }
    32  
    33  func (t rtype) string() string {
    34  	s := t.nameOff(t.Str).Name()
    35  	if t.TFlag&abi.TFlagExtraStar != 0 {
    36  		return s[1:]
    37  	}
    38  	return s
    39  }
    40  
    41  func (t rtype) uncommon() *uncommontype {
    42  	return t.Uncommon()
    43  }
    44  
    45  func (t rtype) name() string {
    46  	if t.TFlag&abi.TFlagNamed == 0 {
    47  		return ""
    48  	}
    49  	s := t.string()
    50  	i := len(s) - 1
    51  	sqBrackets := 0
    52  	for i >= 0 && (s[i] != '.' || sqBrackets != 0) {
    53  		switch s[i] {
    54  		case ']':
    55  			sqBrackets++
    56  		case '[':
    57  			sqBrackets--
    58  		}
    59  		i--
    60  	}
    61  	return s[i+1:]
    62  }
    63  
    64  // pkgpath returns the path of the package where t was defined, if
    65  // available. This is not the same as the reflect package's PkgPath
    66  // method, in that it returns the package path for struct and interface
    67  // types, not just named types.
    68  func (t rtype) pkgpath() string {
    69  	if u := t.uncommon(); u != nil {
    70  		return t.nameOff(u.PkgPath).Name()
    71  	}
    72  	switch t.Kind_ & abi.KindMask {
    73  	case abi.Struct:
    74  		st := (*structtype)(unsafe.Pointer(t.Type))
    75  		return st.PkgPath.Name()
    76  	case abi.Interface:
    77  		it := (*interfacetype)(unsafe.Pointer(t.Type))
    78  		return it.PkgPath.Name()
    79  	}
    80  	return ""
    81  }
    82  
    83  // getGCMask returns the pointer/nonpointer bitmask for type t.
    84  //
    85  // nosplit because it is used during write barriers and must not be preempted.
    86  //
    87  //go:nosplit
    88  func getGCMask(t *_type) *byte {
    89  	if t.TFlag&abi.TFlagGCMaskOnDemand != 0 {
    90  		// Split the rest into getGCMaskOnDemand so getGCMask itself is inlineable.
    91  		return getGCMaskOnDemand(t)
    92  	}
    93  	return t.GCData
    94  }
    95  
    96  // inProgress is a byte whose address is a sentinel indicating that
    97  // some thread is currently building the GC bitmask for a type.
    98  var inProgress byte
    99  
   100  // nosplit because it is used during write barriers and must not be preempted.
   101  //
   102  //go:nosplit
   103  func getGCMaskOnDemand(t *_type) *byte {
   104  	// For large types, GCData doesn't point directly to a bitmask.
   105  	// Instead it points to a pointer to a bitmask, and the runtime
   106  	// is responsible for (on first use) creating the bitmask and
   107  	// storing a pointer to it in that slot.
   108  	// TODO: we could use &t.GCData as the slot, but types are
   109  	// in read-only memory currently.
   110  	addr := unsafe.Pointer(t.GCData)
   111  
   112  	if GOOS == "aix" {
   113  		addr = add(addr, firstmoduledata.data-aixStaticDataBase)
   114  	}
   115  
   116  	for {
   117  		p := (*byte)(atomic.Loadp(addr))
   118  		switch p {
   119  		default: // Already built.
   120  			return p
   121  		case &inProgress: // Someone else is currently building it.
   122  			// Just wait until the builder is done.
   123  			// We can't block here, so spinning while having
   124  			// the OS thread yield is about the best we can do.
   125  			osyield()
   126  			continue
   127  		case nil: // Not built yet.
   128  			// Attempt to get exclusive access to build it.
   129  			if !atomic.Casp1((*unsafe.Pointer)(addr), nil, unsafe.Pointer(&inProgress)) {
   130  				continue
   131  			}
   132  
   133  			// Build gcmask for this type.
   134  			bytes := goarch.PtrSize * divRoundUp(t.PtrBytes/goarch.PtrSize, 8*goarch.PtrSize)
   135  			p = (*byte)(persistentalloc(bytes, goarch.PtrSize, &memstats.other_sys))
   136  			systemstack(func() {
   137  				buildGCMask(t, bitCursor{ptr: p, n: 0})
   138  			})
   139  
   140  			// Store the newly-built gcmask for future callers.
   141  			atomic.StorepNoWB(addr, unsafe.Pointer(p))
   142  			return p
   143  		}
   144  	}
   145  }
   146  
   147  // A bitCursor is a simple cursor to memory to which we
   148  // can write a set of bits.
   149  type bitCursor struct {
   150  	ptr *byte   // base of region
   151  	n   uintptr // cursor points to bit n of region
   152  }
   153  
   154  // Write to b cnt bits starting at bit 0 of data.
   155  // Requires cnt>0.
   156  func (b bitCursor) write(data *byte, cnt uintptr) {
   157  	// Starting byte for writing.
   158  	p := addb(b.ptr, b.n/8)
   159  
   160  	// Note: if we're starting halfway through a byte, we load the
   161  	// existing lower bits so we don't clobber them.
   162  	n := b.n % 8                    // # of valid bits in buf
   163  	buf := uintptr(*p) & (1<<n - 1) // buffered bits to start
   164  
   165  	// Work 8 bits at a time.
   166  	for cnt > 8 {
   167  		// Read 8 more bits, now buf has 8-15 valid bits in it.
   168  		buf |= uintptr(*data) << n
   169  		n += 8
   170  		data = addb(data, 1)
   171  		cnt -= 8
   172  		// Write 8 of the buffered bits out.
   173  		*p = byte(buf)
   174  		buf >>= 8
   175  		n -= 8
   176  		p = addb(p, 1)
   177  	}
   178  	// Read remaining bits.
   179  	buf |= (uintptr(*data) & (1<<cnt - 1)) << n
   180  	n += cnt
   181  
   182  	// Flush remaining bits.
   183  	if n > 8 {
   184  		*p = byte(buf)
   185  		buf >>= 8
   186  		n -= 8
   187  		p = addb(p, 1)
   188  	}
   189  	*p &^= 1<<n - 1
   190  	*p |= byte(buf)
   191  }
   192  
   193  func (b bitCursor) offset(cnt uintptr) bitCursor {
   194  	return bitCursor{ptr: b.ptr, n: b.n + cnt}
   195  }
   196  
   197  // buildGCMask writes the ptr/nonptr bitmap for t to dst.
   198  // t must have a pointer.
   199  func buildGCMask(t *_type, dst bitCursor) {
   200  	// Note: we want to avoid a situation where buildGCMask gets into a
   201  	// very deep recursion, because M stacks are fixed size and pretty small
   202  	// (16KB). We do that by ensuring that any recursive
   203  	// call operates on a type at most half the size of its parent.
   204  	// Thus, the recursive chain can be at most 64 calls deep (on a
   205  	// 64-bit machine).
   206  	// Recursion is avoided by using a "tail call" (jumping to the
   207  	// "top" label) for any recursive call with a large subtype.
   208  top:
   209  	if t.PtrBytes == 0 {
   210  		throw("pointerless type")
   211  	}
   212  	if t.TFlag&abi.TFlagGCMaskOnDemand == 0 {
   213  		// copy t.GCData to dst
   214  		dst.write(t.GCData, t.PtrBytes/goarch.PtrSize)
   215  		return
   216  	}
   217  	// The above case should handle all kinds except
   218  	// possibly arrays and structs.
   219  	switch t.Kind() {
   220  	case abi.Array:
   221  		a := t.ArrayType()
   222  		if a.Len == 1 {
   223  			// Avoid recursive call for element type that
   224  			// isn't smaller than the parent type.
   225  			t = a.Elem
   226  			goto top
   227  		}
   228  		e := a.Elem
   229  		for i := uintptr(0); i < a.Len; i++ {
   230  			buildGCMask(e, dst)
   231  			dst = dst.offset(e.Size_ / goarch.PtrSize)
   232  		}
   233  	case abi.Struct:
   234  		s := t.StructType()
   235  		var bigField abi.StructField
   236  		for _, f := range s.Fields {
   237  			ft := f.Typ
   238  			if !ft.Pointers() {
   239  				continue
   240  			}
   241  			if ft.Size_ > t.Size_/2 {
   242  				// Avoid recursive call for field type that
   243  				// is larger than half of the parent type.
   244  				// There can be only one.
   245  				bigField = f
   246  				continue
   247  			}
   248  			buildGCMask(ft, dst.offset(f.Offset/goarch.PtrSize))
   249  		}
   250  		if bigField.Typ != nil {
   251  			// Note: this case causes bits to be written out of order.
   252  			t = bigField.Typ
   253  			dst = dst.offset(bigField.Offset / goarch.PtrSize)
   254  			goto top
   255  		}
   256  	default:
   257  		throw("unexpected kind")
   258  	}
   259  }
   260  
   261  // reflectOffs holds type offsets defined at run time by the reflect package.
   262  //
   263  // When a type is defined at run time, its *rtype data lives on the heap.
   264  // There are a wide range of possible addresses the heap may use, that
   265  // may not be representable as a 32-bit offset. Moreover the GC may
   266  // one day start moving heap memory, in which case there is no stable
   267  // offset that can be defined.
   268  //
   269  // To provide stable offsets, we add pin *rtype objects in a global map
   270  // and treat the offset as an identifier. We use negative offsets that
   271  // do not overlap with any compile-time module offsets.
   272  //
   273  // Entries are created by reflect.addReflectOff.
   274  var reflectOffs struct {
   275  	lock mutex
   276  	next int32
   277  	m    map[int32]unsafe.Pointer
   278  	minv map[unsafe.Pointer]int32
   279  }
   280  
   281  func reflectOffsLock() {
   282  	lock(&reflectOffs.lock)
   283  	if raceenabled {
   284  		raceacquire(unsafe.Pointer(&reflectOffs.lock))
   285  	}
   286  }
   287  
   288  func reflectOffsUnlock() {
   289  	if raceenabled {
   290  		racerelease(unsafe.Pointer(&reflectOffs.lock))
   291  	}
   292  	unlock(&reflectOffs.lock)
   293  }
   294  
   295  func resolveNameOff(ptrInModule unsafe.Pointer, off nameOff) name {
   296  	if off == 0 {
   297  		return name{}
   298  	}
   299  	base := uintptr(ptrInModule)
   300  	for md := &firstmoduledata; md != nil; md = md.next {
   301  		if base >= md.types && base < md.etypes {
   302  			res := md.types + uintptr(off)
   303  			if res > md.etypes {
   304  				println("runtime: nameOff", hex(off), "out of range", hex(md.types), "-", hex(md.etypes))
   305  				throw("runtime: name offset out of range")
   306  			}
   307  			return name{Bytes: (*byte)(unsafe.Pointer(res))}
   308  		}
   309  	}
   310  
   311  	// No module found. see if it is a run time name.
   312  	reflectOffsLock()
   313  	res, found := reflectOffs.m[int32(off)]
   314  	reflectOffsUnlock()
   315  	if !found {
   316  		println("runtime: nameOff", hex(off), "base", hex(base), "not in ranges:")
   317  		for next := &firstmoduledata; next != nil; next = next.next {
   318  			println("\ttypes", hex(next.types), "etypes", hex(next.etypes))
   319  		}
   320  		throw("runtime: name offset base pointer out of range")
   321  	}
   322  	return name{Bytes: (*byte)(res)}
   323  }
   324  
   325  func (t rtype) nameOff(off nameOff) name {
   326  	return resolveNameOff(unsafe.Pointer(t.Type), off)
   327  }
   328  
   329  func resolveTypeOff(ptrInModule unsafe.Pointer, off typeOff) *_type {
   330  	if off == 0 || off == -1 {
   331  		// -1 is the sentinel value for unreachable code.
   332  		// See cmd/link/internal/ld/data.go:relocsym.
   333  		return nil
   334  	}
   335  	base := uintptr(ptrInModule)
   336  	var md *moduledata
   337  	for next := &firstmoduledata; next != nil; next = next.next {
   338  		if base >= next.types && base < next.etypes {
   339  			md = next
   340  			break
   341  		}
   342  	}
   343  	if md == nil {
   344  		reflectOffsLock()
   345  		res := reflectOffs.m[int32(off)]
   346  		reflectOffsUnlock()
   347  		if res == nil {
   348  			println("runtime: typeOff", hex(off), "base", hex(base), "not in ranges:")
   349  			for next := &firstmoduledata; next != nil; next = next.next {
   350  				println("\ttypes", hex(next.types), "etypes", hex(next.etypes))
   351  			}
   352  			throw("runtime: type offset base pointer out of range")
   353  		}
   354  		return (*_type)(res)
   355  	}
   356  	if t := md.typemap[off]; t != nil {
   357  		return t
   358  	}
   359  	res := md.types + uintptr(off)
   360  	if res > md.etypes {
   361  		println("runtime: typeOff", hex(off), "out of range", hex(md.types), "-", hex(md.etypes))
   362  		throw("runtime: type offset out of range")
   363  	}
   364  	return (*_type)(unsafe.Pointer(res))
   365  }
   366  
   367  func (t rtype) typeOff(off typeOff) *_type {
   368  	return resolveTypeOff(unsafe.Pointer(t.Type), off)
   369  }
   370  
   371  func (t rtype) textOff(off textOff) unsafe.Pointer {
   372  	if off == -1 {
   373  		// -1 is the sentinel value for unreachable code.
   374  		// See cmd/link/internal/ld/data.go:relocsym.
   375  		return unsafe.Pointer(abi.FuncPCABIInternal(unreachableMethod))
   376  	}
   377  	base := uintptr(unsafe.Pointer(t.Type))
   378  	var md *moduledata
   379  	for next := &firstmoduledata; next != nil; next = next.next {
   380  		if base >= next.types && base < next.etypes {
   381  			md = next
   382  			break
   383  		}
   384  	}
   385  	if md == nil {
   386  		reflectOffsLock()
   387  		res := reflectOffs.m[int32(off)]
   388  		reflectOffsUnlock()
   389  		if res == nil {
   390  			println("runtime: textOff", hex(off), "base", hex(base), "not in ranges:")
   391  			for next := &firstmoduledata; next != nil; next = next.next {
   392  				println("\ttypes", hex(next.types), "etypes", hex(next.etypes))
   393  			}
   394  			throw("runtime: text offset base pointer out of range")
   395  		}
   396  		return res
   397  	}
   398  	res := md.textAddr(uint32(off))
   399  	return unsafe.Pointer(res)
   400  }
   401  
   402  type uncommontype = abi.UncommonType
   403  
   404  type interfacetype = abi.InterfaceType
   405  
   406  type arraytype = abi.ArrayType
   407  
   408  type chantype = abi.ChanType
   409  
   410  type slicetype = abi.SliceType
   411  
   412  type functype = abi.FuncType
   413  
   414  type ptrtype = abi.PtrType
   415  
   416  type name = abi.Name
   417  
   418  type structtype = abi.StructType
   419  
   420  func pkgPath(n name) string {
   421  	if n.Bytes == nil || *n.Data(0)&(1<<2) == 0 {
   422  		return ""
   423  	}
   424  	i, l := n.ReadVarint(1)
   425  	off := 1 + i + l
   426  	if *n.Data(0)&(1<<1) != 0 {
   427  		i2, l2 := n.ReadVarint(off)
   428  		off += i2 + l2
   429  	}
   430  	var nameOff nameOff
   431  	copy((*[4]byte)(unsafe.Pointer(&nameOff))[:], (*[4]byte)(unsafe.Pointer(n.Data(off)))[:])
   432  	pkgPathName := resolveNameOff(unsafe.Pointer(n.Bytes), nameOff)
   433  	return pkgPathName.Name()
   434  }
   435  
   436  // typelinksinit scans the types from extra modules and builds the
   437  // moduledata typemap used to de-duplicate type pointers.
   438  func typelinksinit() {
   439  	if firstmoduledata.next == nil {
   440  		return
   441  	}
   442  	typehash := make(map[uint32][]*_type, len(firstmoduledata.typelinks))
   443  
   444  	modules := activeModules()
   445  	prev := modules[0]
   446  	for _, md := range modules[1:] {
   447  		// Collect types from the previous module into typehash.
   448  	collect:
   449  		for _, tl := range prev.typelinks {
   450  			var t *_type
   451  			if prev.typemap == nil {
   452  				t = (*_type)(unsafe.Pointer(prev.types + uintptr(tl)))
   453  			} else {
   454  				t = prev.typemap[typeOff(tl)]
   455  			}
   456  			// Add to typehash if not seen before.
   457  			tlist := typehash[t.Hash]
   458  			for _, tcur := range tlist {
   459  				if tcur == t {
   460  					continue collect
   461  				}
   462  			}
   463  			typehash[t.Hash] = append(tlist, t)
   464  		}
   465  
   466  		if md.typemap == nil {
   467  			// If any of this module's typelinks match a type from a
   468  			// prior module, prefer that prior type by adding the offset
   469  			// to this module's typemap.
   470  			tm := make(map[typeOff]*_type, len(md.typelinks))
   471  			pinnedTypemaps = append(pinnedTypemaps, tm)
   472  			md.typemap = tm
   473  			for _, tl := range md.typelinks {
   474  				t := (*_type)(unsafe.Pointer(md.types + uintptr(tl)))
   475  				for _, candidate := range typehash[t.Hash] {
   476  					seen := map[_typePair]struct{}{}
   477  					if typesEqual(t, candidate, seen) {
   478  						t = candidate
   479  						break
   480  					}
   481  				}
   482  				md.typemap[typeOff(tl)] = t
   483  			}
   484  		}
   485  
   486  		prev = md
   487  	}
   488  }
   489  
   490  type _typePair struct {
   491  	t1 *_type
   492  	t2 *_type
   493  }
   494  
   495  func toRType(t *abi.Type) rtype {
   496  	return rtype{t}
   497  }
   498  
   499  // typesEqual reports whether two types are equal.
   500  //
   501  // Everywhere in the runtime and reflect packages, it is assumed that
   502  // there is exactly one *_type per Go type, so that pointer equality
   503  // can be used to test if types are equal. There is one place that
   504  // breaks this assumption: buildmode=shared. In this case a type can
   505  // appear as two different pieces of memory. This is hidden from the
   506  // runtime and reflect package by the per-module typemap built in
   507  // typelinksinit. It uses typesEqual to map types from later modules
   508  // back into earlier ones.
   509  //
   510  // Only typelinksinit needs this function.
   511  func typesEqual(t, v *_type, seen map[_typePair]struct{}) bool {
   512  	tp := _typePair{t, v}
   513  	if _, ok := seen[tp]; ok {
   514  		return true
   515  	}
   516  
   517  	// mark these types as seen, and thus equivalent which prevents an infinite loop if
   518  	// the two types are identical, but recursively defined and loaded from
   519  	// different modules
   520  	seen[tp] = struct{}{}
   521  
   522  	if t == v {
   523  		return true
   524  	}
   525  	kind := t.Kind_ & abi.KindMask
   526  	if kind != v.Kind_&abi.KindMask {
   527  		return false
   528  	}
   529  	rt, rv := toRType(t), toRType(v)
   530  	if rt.string() != rv.string() {
   531  		return false
   532  	}
   533  	ut := t.Uncommon()
   534  	uv := v.Uncommon()
   535  	if ut != nil || uv != nil {
   536  		if ut == nil || uv == nil {
   537  			return false
   538  		}
   539  		pkgpatht := rt.nameOff(ut.PkgPath).Name()
   540  		pkgpathv := rv.nameOff(uv.PkgPath).Name()
   541  		if pkgpatht != pkgpathv {
   542  			return false
   543  		}
   544  	}
   545  	if abi.Bool <= kind && kind <= abi.Complex128 {
   546  		return true
   547  	}
   548  	switch kind {
   549  	case abi.String, abi.UnsafePointer:
   550  		return true
   551  	case abi.Array:
   552  		at := (*arraytype)(unsafe.Pointer(t))
   553  		av := (*arraytype)(unsafe.Pointer(v))
   554  		return typesEqual(at.Elem, av.Elem, seen) && at.Len == av.Len
   555  	case abi.Chan:
   556  		ct := (*chantype)(unsafe.Pointer(t))
   557  		cv := (*chantype)(unsafe.Pointer(v))
   558  		return ct.Dir == cv.Dir && typesEqual(ct.Elem, cv.Elem, seen)
   559  	case abi.Func:
   560  		ft := (*functype)(unsafe.Pointer(t))
   561  		fv := (*functype)(unsafe.Pointer(v))
   562  		if ft.OutCount != fv.OutCount || ft.InCount != fv.InCount {
   563  			return false
   564  		}
   565  		tin, vin := ft.InSlice(), fv.InSlice()
   566  		for i := 0; i < len(tin); i++ {
   567  			if !typesEqual(tin[i], vin[i], seen) {
   568  				return false
   569  			}
   570  		}
   571  		tout, vout := ft.OutSlice(), fv.OutSlice()
   572  		for i := 0; i < len(tout); i++ {
   573  			if !typesEqual(tout[i], vout[i], seen) {
   574  				return false
   575  			}
   576  		}
   577  		return true
   578  	case abi.Interface:
   579  		it := (*interfacetype)(unsafe.Pointer(t))
   580  		iv := (*interfacetype)(unsafe.Pointer(v))
   581  		if it.PkgPath.Name() != iv.PkgPath.Name() {
   582  			return false
   583  		}
   584  		if len(it.Methods) != len(iv.Methods) {
   585  			return false
   586  		}
   587  		for i := range it.Methods {
   588  			tm := &it.Methods[i]
   589  			vm := &iv.Methods[i]
   590  			// Note the mhdr array can be relocated from
   591  			// another module. See #17724.
   592  			tname := resolveNameOff(unsafe.Pointer(tm), tm.Name)
   593  			vname := resolveNameOff(unsafe.Pointer(vm), vm.Name)
   594  			if tname.Name() != vname.Name() {
   595  				return false
   596  			}
   597  			if pkgPath(tname) != pkgPath(vname) {
   598  				return false
   599  			}
   600  			tityp := resolveTypeOff(unsafe.Pointer(tm), tm.Typ)
   601  			vityp := resolveTypeOff(unsafe.Pointer(vm), vm.Typ)
   602  			if !typesEqual(tityp, vityp, seen) {
   603  				return false
   604  			}
   605  		}
   606  		return true
   607  	case abi.Map:
   608  		if goexperiment.SwissMap {
   609  			mt := (*abi.SwissMapType)(unsafe.Pointer(t))
   610  			mv := (*abi.SwissMapType)(unsafe.Pointer(v))
   611  			return typesEqual(mt.Key, mv.Key, seen) && typesEqual(mt.Elem, mv.Elem, seen)
   612  		}
   613  		mt := (*abi.OldMapType)(unsafe.Pointer(t))
   614  		mv := (*abi.OldMapType)(unsafe.Pointer(v))
   615  		return typesEqual(mt.Key, mv.Key, seen) && typesEqual(mt.Elem, mv.Elem, seen)
   616  	case abi.Pointer:
   617  		pt := (*ptrtype)(unsafe.Pointer(t))
   618  		pv := (*ptrtype)(unsafe.Pointer(v))
   619  		return typesEqual(pt.Elem, pv.Elem, seen)
   620  	case abi.Slice:
   621  		st := (*slicetype)(unsafe.Pointer(t))
   622  		sv := (*slicetype)(unsafe.Pointer(v))
   623  		return typesEqual(st.Elem, sv.Elem, seen)
   624  	case abi.Struct:
   625  		st := (*structtype)(unsafe.Pointer(t))
   626  		sv := (*structtype)(unsafe.Pointer(v))
   627  		if len(st.Fields) != len(sv.Fields) {
   628  			return false
   629  		}
   630  		if st.PkgPath.Name() != sv.PkgPath.Name() {
   631  			return false
   632  		}
   633  		for i := range st.Fields {
   634  			tf := &st.Fields[i]
   635  			vf := &sv.Fields[i]
   636  			if tf.Name.Name() != vf.Name.Name() {
   637  				return false
   638  			}
   639  			if !typesEqual(tf.Typ, vf.Typ, seen) {
   640  				return false
   641  			}
   642  			if tf.Name.Tag() != vf.Name.Tag() {
   643  				return false
   644  			}
   645  			if tf.Offset != vf.Offset {
   646  				return false
   647  			}
   648  			if tf.Name.IsEmbedded() != vf.Name.IsEmbedded() {
   649  				return false
   650  			}
   651  		}
   652  		return true
   653  	default:
   654  		println("runtime: impossible type kind", kind)
   655  		throw("runtime: impossible type kind")
   656  		return false
   657  	}
   658  }
   659  

View as plain text