Source file src/cmd/compile/internal/walk/walk.go

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package walk
     6  
     7  import (
     8  	"fmt"
     9  	"internal/abi"
    10  
    11  	"cmd/compile/internal/base"
    12  	"cmd/compile/internal/ir"
    13  	"cmd/compile/internal/reflectdata"
    14  	"cmd/compile/internal/rttype"
    15  	"cmd/compile/internal/ssagen"
    16  	"cmd/compile/internal/typecheck"
    17  	"cmd/compile/internal/types"
    18  	"cmd/internal/src"
    19  )
    20  
    21  // The constant is known to runtime.
    22  const tmpstringbufsize = 32
    23  
    24  func Walk(fn *ir.Func) {
    25  	ir.CurFunc = fn
    26  
    27  	// Set and then clear a package-level cache of static values for this fn.
    28  	// (At some point, it might be worthwhile to have a walkState structure
    29  	// that gets passed everywhere where things like this can go.)
    30  	staticValues = findStaticValues(fn)
    31  	defer func() { staticValues = nil }()
    32  
    33  	errorsBefore := base.Errors()
    34  	order(fn)
    35  	if base.Errors() > errorsBefore {
    36  		return
    37  	}
    38  
    39  	if base.Flag.W != 0 {
    40  		s := fmt.Sprintf("\nbefore walk %v", ir.CurFunc.Sym())
    41  		ir.DumpList(s, ir.CurFunc.Body)
    42  	}
    43  
    44  	walkStmtList(ir.CurFunc.Body)
    45  	if base.Flag.W != 0 {
    46  		s := fmt.Sprintf("after walk %v", ir.CurFunc.Sym())
    47  		ir.DumpList(s, ir.CurFunc.Body)
    48  	}
    49  
    50  	// Eagerly compute sizes of all variables for SSA.
    51  	for _, n := range fn.Dcl {
    52  		types.CalcSize(n.Type())
    53  	}
    54  }
    55  
    56  // walkRecv walks an ORECV node.
    57  func walkRecv(n *ir.UnaryExpr) ir.Node {
    58  	if n.Typecheck() == 0 {
    59  		base.Fatalf("missing typecheck: %+v", n)
    60  	}
    61  	init := ir.TakeInit(n)
    62  
    63  	n.X = walkExpr(n.X, &init)
    64  	call := walkExpr(mkcall1(chanfn("chanrecv1", 2, n.X.Type()), nil, &init, n.X, typecheck.NodNil()), &init)
    65  	return ir.InitExpr(init, call)
    66  }
    67  
    68  func convas(n *ir.AssignStmt, init *ir.Nodes) *ir.AssignStmt {
    69  	if n.Op() != ir.OAS {
    70  		base.Fatalf("convas: not OAS %v", n.Op())
    71  	}
    72  	n.SetTypecheck(1)
    73  
    74  	if n.X == nil || n.Y == nil {
    75  		return n
    76  	}
    77  
    78  	lt := n.X.Type()
    79  	rt := n.Y.Type()
    80  	if lt == nil || rt == nil {
    81  		return n
    82  	}
    83  
    84  	if ir.IsBlank(n.X) {
    85  		n.Y = typecheck.DefaultLit(n.Y, nil)
    86  		return n
    87  	}
    88  
    89  	if !types.Identical(lt, rt) {
    90  		n.Y = typecheck.AssignConv(n.Y, lt, "assignment")
    91  		n.Y = walkExpr(n.Y, init)
    92  	}
    93  	types.CalcSize(n.Y.Type())
    94  
    95  	return n
    96  }
    97  
    98  func vmkcall(fn ir.Node, t *types.Type, init *ir.Nodes, va []ir.Node) *ir.CallExpr {
    99  	if init == nil {
   100  		base.Fatalf("mkcall with nil init: %v", fn)
   101  	}
   102  	if fn.Type() == nil || fn.Type().Kind() != types.TFUNC {
   103  		base.Fatalf("mkcall %v %v", fn, fn.Type())
   104  	}
   105  
   106  	n := fn.Type().NumParams()
   107  	if n != len(va) {
   108  		base.Fatalf("vmkcall %v needs %v args got %v", fn, n, len(va))
   109  	}
   110  
   111  	call := typecheck.Call(base.Pos, fn, va, false).(*ir.CallExpr)
   112  	call.SetType(t)
   113  	return walkExpr(call, init).(*ir.CallExpr)
   114  }
   115  
   116  func mkcall(name string, t *types.Type, init *ir.Nodes, args ...ir.Node) *ir.CallExpr {
   117  	return vmkcall(typecheck.LookupRuntime(name), t, init, args)
   118  }
   119  
   120  func mkcallstmt(name string, args ...ir.Node) ir.Node {
   121  	return mkcallstmt1(typecheck.LookupRuntime(name), args...)
   122  }
   123  
   124  func mkcall1(fn ir.Node, t *types.Type, init *ir.Nodes, args ...ir.Node) *ir.CallExpr {
   125  	return vmkcall(fn, t, init, args)
   126  }
   127  
   128  func mkcallstmt1(fn ir.Node, args ...ir.Node) ir.Node {
   129  	var init ir.Nodes
   130  	n := vmkcall(fn, nil, &init, args)
   131  	if len(init) == 0 {
   132  		return n
   133  	}
   134  	init.Append(n)
   135  	return ir.NewBlockStmt(n.Pos(), init)
   136  }
   137  
   138  func chanfn(name string, n int, t *types.Type) ir.Node {
   139  	if !t.IsChan() {
   140  		base.Fatalf("chanfn %v", t)
   141  	}
   142  	switch n {
   143  	case 1:
   144  		return typecheck.LookupRuntime(name, t.Elem())
   145  	case 2:
   146  		return typecheck.LookupRuntime(name, t.Elem(), t.Elem())
   147  	}
   148  	base.Fatalf("chanfn %d", n)
   149  	return nil
   150  }
   151  
   152  func mapfn(name string, t *types.Type, isfat bool) ir.Node {
   153  	if !t.IsMap() {
   154  		base.Fatalf("mapfn %v", t)
   155  	}
   156  	if mapfast(t) == mapslow || isfat {
   157  		return typecheck.LookupRuntime(name, t.Key(), t.Elem(), t.Key(), t.Elem())
   158  	}
   159  	return typecheck.LookupRuntime(name, t.Key(), t.Elem(), t.Elem())
   160  }
   161  
   162  func mapfndel(name string, t *types.Type) ir.Node {
   163  	if !t.IsMap() {
   164  		base.Fatalf("mapfn %v", t)
   165  	}
   166  	if mapfast(t) == mapslow {
   167  		return typecheck.LookupRuntime(name, t.Key(), t.Elem(), t.Key())
   168  	}
   169  	return typecheck.LookupRuntime(name, t.Key(), t.Elem())
   170  }
   171  
   172  const (
   173  	mapslow = iota
   174  	mapfast32
   175  	mapfast32ptr
   176  	mapfast64
   177  	mapfast64ptr
   178  	mapfaststr
   179  	nmapfast
   180  )
   181  
   182  type mapnames [nmapfast]string
   183  
   184  func mkmapnames(base string, ptr string) mapnames {
   185  	return mapnames{base, base + "_fast32", base + "_fast32" + ptr, base + "_fast64", base + "_fast64" + ptr, base + "_faststr"}
   186  }
   187  
   188  var mapaccess1 = mkmapnames("mapaccess1", "")
   189  var mapaccess2 = mkmapnames("mapaccess2", "")
   190  var mapassign = mkmapnames("mapassign", "ptr")
   191  var mapdelete = mkmapnames("mapdelete", "")
   192  
   193  func mapfast(t *types.Type) int {
   194  	if t.Elem().Size() > abi.MapMaxElemBytes {
   195  		return mapslow
   196  	}
   197  	switch reflectdata.AlgType(t.Key()) {
   198  	case types.AMEM32:
   199  		if !t.Key().HasPointers() {
   200  			return mapfast32
   201  		}
   202  		if types.PtrSize == 4 {
   203  			return mapfast32ptr
   204  		}
   205  		base.Fatalf("small pointer %v", t.Key())
   206  	case types.AMEM64:
   207  		if !t.Key().HasPointers() {
   208  			return mapfast64
   209  		}
   210  		if types.PtrSize == 8 {
   211  			return mapfast64ptr
   212  		}
   213  		// Two-word object, at least one of which is a pointer.
   214  		// Use the slow path.
   215  	case types.ASTRING:
   216  		return mapfaststr
   217  	}
   218  	return mapslow
   219  }
   220  
   221  func walkAppendArgs(n *ir.CallExpr, init *ir.Nodes) {
   222  	walkExprListSafe(n.Args, init)
   223  
   224  	// walkExprListSafe will leave OINDEX (s[n]) alone if both s
   225  	// and n are name or literal, but those may index the slice we're
   226  	// modifying here. Fix explicitly.
   227  	ls := n.Args
   228  	for i1, n1 := range ls {
   229  		ls[i1] = cheapExpr(n1, init)
   230  	}
   231  }
   232  
   233  // appendWalkStmt typechecks and walks stmt and then appends it to init.
   234  func appendWalkStmt(init *ir.Nodes, stmt ir.Node) {
   235  	op := stmt.Op()
   236  	n := typecheck.Stmt(stmt)
   237  	if op == ir.OAS || op == ir.OAS2 {
   238  		// If the assignment has side effects, walkExpr will append them
   239  		// directly to init for us, while walkStmt will wrap it in an OBLOCK.
   240  		// We need to append them directly.
   241  		// TODO(rsc): Clean this up.
   242  		n = walkExpr(n, init)
   243  	} else {
   244  		n = walkStmt(n)
   245  	}
   246  	init.Append(n)
   247  }
   248  
   249  // The max number of defers in a function using open-coded defers. We enforce this
   250  // limit because the deferBits bitmask is currently a single byte (to minimize code size)
   251  const maxOpenDefers = 8
   252  
   253  // backingArrayPtrLen extracts the pointer and length from a slice or string.
   254  // This constructs two nodes referring to n, so n must be a cheapExpr.
   255  func backingArrayPtrLen(n ir.Node) (ptr, length ir.Node) {
   256  	var init ir.Nodes
   257  	c := cheapExpr(n, &init)
   258  	if c != n || len(init) != 0 {
   259  		base.Fatalf("backingArrayPtrLen not cheap: %v", n)
   260  	}
   261  	ptr = ir.NewUnaryExpr(base.Pos, ir.OSPTR, n)
   262  	if n.Type().IsString() {
   263  		ptr.SetType(types.Types[types.TUINT8].PtrTo())
   264  	} else {
   265  		ptr.SetType(n.Type().Elem().PtrTo())
   266  	}
   267  	ptr.SetTypecheck(1)
   268  	length = ir.NewUnaryExpr(base.Pos, ir.OLEN, n)
   269  	length.SetType(types.Types[types.TINT])
   270  	length.SetTypecheck(1)
   271  	return ptr, length
   272  }
   273  
   274  // mayCall reports whether evaluating expression n may require
   275  // function calls, which could clobber function call arguments/results
   276  // currently on the stack.
   277  func mayCall(n ir.Node) bool {
   278  	// This is intended to avoid putting constants
   279  	// into temporaries with the race detector (or other
   280  	// instrumentation) which interferes with simple
   281  	// "this is a constant" tests in ssagen.
   282  	// Also, it will generally lead to better code.
   283  	if n.Op() == ir.OLITERAL {
   284  		return false
   285  	}
   286  
   287  	// When instrumenting, any expression might require function calls.
   288  	if base.Flag.Cfg.Instrumenting {
   289  		return true
   290  	}
   291  
   292  	isSoftFloat := func(typ *types.Type) bool {
   293  		return types.IsFloat[typ.Kind()] || types.IsComplex[typ.Kind()]
   294  	}
   295  
   296  	return ir.Any(n, func(n ir.Node) bool {
   297  		// walk should have already moved any Init blocks off of
   298  		// expressions.
   299  		if len(n.Init()) != 0 {
   300  			base.FatalfAt(n.Pos(), "mayCall %+v", n)
   301  		}
   302  
   303  		switch n.Op() {
   304  		default:
   305  			base.FatalfAt(n.Pos(), "mayCall %+v", n)
   306  
   307  		case ir.OCALLFUNC, ir.OCALLINTER,
   308  			ir.OUNSAFEADD, ir.OUNSAFESLICE:
   309  			return true
   310  
   311  		case ir.OINDEX, ir.OSLICE, ir.OSLICEARR, ir.OSLICE3, ir.OSLICE3ARR, ir.OSLICESTR,
   312  			ir.ODEREF, ir.ODOTPTR, ir.ODOTTYPE, ir.ODYNAMICDOTTYPE, ir.ODIV, ir.OMOD,
   313  			ir.OSLICE2ARR, ir.OSLICE2ARRPTR:
   314  			// These ops might panic, make sure they are done
   315  			// before we start marshaling args for a call. See issue 16760.
   316  			return true
   317  
   318  		case ir.OANDAND, ir.OOROR:
   319  			n := n.(*ir.LogicalExpr)
   320  			// The RHS expression may have init statements that
   321  			// should only execute conditionally, and so cannot be
   322  			// pulled out to the top-level init list. We could try
   323  			// to be more precise here.
   324  			return len(n.Y.Init()) != 0
   325  
   326  		// When using soft-float, these ops might be rewritten to function calls
   327  		// so we ensure they are evaluated first.
   328  		case ir.OADD, ir.OSUB, ir.OMUL, ir.ONEG:
   329  			return ssagen.Arch.SoftFloat && isSoftFloat(n.Type())
   330  		case ir.OLT, ir.OEQ, ir.ONE, ir.OLE, ir.OGE, ir.OGT:
   331  			n := n.(*ir.BinaryExpr)
   332  			return ssagen.Arch.SoftFloat && isSoftFloat(n.X.Type())
   333  		case ir.OCONV:
   334  			n := n.(*ir.ConvExpr)
   335  			return ssagen.Arch.SoftFloat && (isSoftFloat(n.Type()) || isSoftFloat(n.X.Type()))
   336  
   337  		case ir.OMIN, ir.OMAX:
   338  			// string or float requires runtime call, see (*ssagen.state).minmax method.
   339  			return n.Type().IsString() || n.Type().IsFloat()
   340  
   341  		case ir.OLITERAL, ir.ONIL, ir.ONAME, ir.OLINKSYMOFFSET, ir.OMETHEXPR,
   342  			ir.OAND, ir.OANDNOT, ir.OLSH, ir.OOR, ir.ORSH, ir.OXOR, ir.OCOMPLEX, ir.OMAKEFACE,
   343  			ir.OADDR, ir.OBITNOT, ir.ONOT, ir.OPLUS,
   344  			ir.OCAP, ir.OIMAG, ir.OLEN, ir.OREAL,
   345  			ir.OCONVNOP, ir.ODOT,
   346  			ir.OCFUNC, ir.OIDATA, ir.OITAB, ir.OSPTR,
   347  			ir.OBYTES2STRTMP, ir.OGETG, ir.OGETCALLERSP, ir.OSLICEHEADER, ir.OSTRINGHEADER:
   348  			// ok: operations that don't require function calls.
   349  			// Expand as needed.
   350  		}
   351  
   352  		return false
   353  	})
   354  }
   355  
   356  // itabType loads the _type field from a runtime.itab struct.
   357  func itabType(itab ir.Node) ir.Node {
   358  	if itabTypeField == nil {
   359  		// internal/abi.ITab's Type field
   360  		itabTypeField = runtimeField("Type", rttype.ITab.OffsetOf("Type"), types.NewPtr(types.Types[types.TUINT8]))
   361  	}
   362  	return boundedDotPtr(base.Pos, itab, itabTypeField)
   363  }
   364  
   365  var itabTypeField *types.Field
   366  
   367  // boundedDotPtr returns a selector expression representing ptr.field
   368  // and omits nil-pointer checks for ptr.
   369  func boundedDotPtr(pos src.XPos, ptr ir.Node, field *types.Field) *ir.SelectorExpr {
   370  	sel := ir.NewSelectorExpr(pos, ir.ODOTPTR, ptr, field.Sym)
   371  	sel.Selection = field
   372  	sel.SetType(field.Type)
   373  	sel.SetTypecheck(1)
   374  	sel.SetBounded(true) // guaranteed not to fault
   375  	return sel
   376  }
   377  
   378  func runtimeField(name string, offset int64, typ *types.Type) *types.Field {
   379  	f := types.NewField(src.NoXPos, ir.Pkgs.Runtime.Lookup(name), typ)
   380  	f.Offset = offset
   381  	return f
   382  }
   383  
   384  // ifaceData loads the data field from an interface.
   385  // The concrete type must be known to have type t.
   386  // It follows the pointer if !IsDirectIface(t).
   387  func ifaceData(pos src.XPos, n ir.Node, t *types.Type) ir.Node {
   388  	if t.IsInterface() {
   389  		base.Fatalf("ifaceData interface: %v", t)
   390  	}
   391  	ptr := ir.NewUnaryExpr(pos, ir.OIDATA, n)
   392  	if types.IsDirectIface(t) {
   393  		ptr.SetType(t)
   394  		ptr.SetTypecheck(1)
   395  		return ptr
   396  	}
   397  	ptr.SetType(types.NewPtr(t))
   398  	ptr.SetTypecheck(1)
   399  	ind := ir.NewStarExpr(pos, ptr)
   400  	ind.SetType(t)
   401  	ind.SetTypecheck(1)
   402  	ind.SetBounded(true)
   403  	return ind
   404  }
   405  
   406  // staticValue returns the earliest expression it can find that always
   407  // evaluates to n, with similar semantics to [ir.StaticValue].
   408  //
   409  // It only returns results for the ir.CurFunc being processed in [Walk],
   410  // including its closures, and uses a cache to reduce duplicative work.
   411  // It can return n or nil if it does not find an earlier expression.
   412  //
   413  // The current use case is reducing OCONVIFACE allocations, and hence
   414  // staticValue is currently only useful when given an *ir.ConvExpr.X as n.
   415  func staticValue(n ir.Node) ir.Node {
   416  	if staticValues == nil {
   417  		base.Fatalf("staticValues is nil. staticValue called outside of walk.Walk?")
   418  	}
   419  	return staticValues[n]
   420  }
   421  
   422  // staticValues is a cache of static values for use by staticValue.
   423  var staticValues map[ir.Node]ir.Node
   424  
   425  // findStaticValues returns a map of static values for fn.
   426  func findStaticValues(fn *ir.Func) map[ir.Node]ir.Node {
   427  	// We can't use an ir.ReassignOracle or ir.StaticValue in the
   428  	// middle of walk because they don't currently handle
   429  	// transformed assignments (e.g., will complain about 'RHS == nil').
   430  	// So we instead build this map to use in walk.
   431  	ro := &ir.ReassignOracle{}
   432  	ro.Init(fn)
   433  	m := make(map[ir.Node]ir.Node)
   434  	ir.Visit(fn, func(n ir.Node) {
   435  		if n.Op() == ir.OCONVIFACE {
   436  			x := n.(*ir.ConvExpr).X
   437  			v := ro.StaticValue(x)
   438  			if v != nil && v != x {
   439  				m[x] = v
   440  			}
   441  		}
   442  	})
   443  	return m
   444  }
   445  

View as plain text