Source file src/cmd/compile/internal/ssagen/pgen.go

     1  // Copyright 2011 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package ssagen
     6  
     7  import (
     8  	"fmt"
     9  	"internal/buildcfg"
    10  	"os"
    11  	"slices"
    12  	"sort"
    13  	"strings"
    14  	"sync"
    15  
    16  	"cmd/compile/internal/base"
    17  	"cmd/compile/internal/inline"
    18  	"cmd/compile/internal/ir"
    19  	"cmd/compile/internal/liveness"
    20  	"cmd/compile/internal/objw"
    21  	"cmd/compile/internal/pgoir"
    22  	"cmd/compile/internal/ssa"
    23  	"cmd/compile/internal/types"
    24  	"cmd/internal/obj"
    25  	"cmd/internal/objabi"
    26  	"cmd/internal/src"
    27  )
    28  
    29  // cmpstackvarlt reports whether the stack variable a sorts before b.
    30  func cmpstackvarlt(a, b *ir.Name, mls *liveness.MergeLocalsState) bool {
    31  	// Sort non-autos before autos.
    32  	if needAlloc(a) != needAlloc(b) {
    33  		return needAlloc(b)
    34  	}
    35  
    36  	// If both are non-auto (e.g., parameters, results), then sort by
    37  	// frame offset (defined by ABI).
    38  	if !needAlloc(a) {
    39  		return a.FrameOffset() < b.FrameOffset()
    40  	}
    41  
    42  	// From here on, a and b are both autos (i.e., local variables).
    43  
    44  	// Sort followers after leaders, if mls != nil
    45  	if mls != nil {
    46  		aFollow := mls.Subsumed(a)
    47  		bFollow := mls.Subsumed(b)
    48  		if aFollow != bFollow {
    49  			return bFollow
    50  		}
    51  	}
    52  
    53  	// Sort used before unused (so AllocFrame can truncate unused
    54  	// variables).
    55  	if a.Used() != b.Used() {
    56  		return a.Used()
    57  	}
    58  
    59  	// Sort pointer-typed before non-pointer types.
    60  	// Keeps the stack's GC bitmap compact.
    61  	ap := a.Type().HasPointers()
    62  	bp := b.Type().HasPointers()
    63  	if ap != bp {
    64  		return ap
    65  	}
    66  
    67  	// Group variables that need zeroing, so we can efficiently zero
    68  	// them altogether.
    69  	ap = a.Needzero()
    70  	bp = b.Needzero()
    71  	if ap != bp {
    72  		return ap
    73  	}
    74  
    75  	// Sort variables in descending alignment order, so we can optimally
    76  	// pack variables into the frame.
    77  	if a.Type().Alignment() != b.Type().Alignment() {
    78  		return a.Type().Alignment() > b.Type().Alignment()
    79  	}
    80  
    81  	// Sort normal variables before open-coded-defer slots, so that the
    82  	// latter are grouped together and near the top of the frame (to
    83  	// minimize varint encoding of their varp offset).
    84  	if a.OpenDeferSlot() != b.OpenDeferSlot() {
    85  		return a.OpenDeferSlot()
    86  	}
    87  
    88  	// If a and b are both open-coded defer slots, then order them by
    89  	// index in descending order, so they'll be laid out in the frame in
    90  	// ascending order.
    91  	//
    92  	// Their index was saved in FrameOffset in state.openDeferSave.
    93  	if a.OpenDeferSlot() {
    94  		return a.FrameOffset() > b.FrameOffset()
    95  	}
    96  
    97  	// Tie breaker for stable results.
    98  	return a.Sym().Name < b.Sym().Name
    99  }
   100  
   101  // needAlloc reports whether n is within the current frame, for which we need to
   102  // allocate space. In particular, it excludes arguments and results, which are in
   103  // the callers frame.
   104  func needAlloc(n *ir.Name) bool {
   105  	if n.Op() != ir.ONAME {
   106  		base.FatalfAt(n.Pos(), "%v has unexpected Op %v", n, n.Op())
   107  	}
   108  
   109  	switch n.Class {
   110  	case ir.PAUTO:
   111  		return true
   112  	case ir.PPARAM:
   113  		return false
   114  	case ir.PPARAMOUT:
   115  		return n.IsOutputParamInRegisters()
   116  
   117  	default:
   118  		base.FatalfAt(n.Pos(), "%v has unexpected Class %v", n, n.Class)
   119  		return false
   120  	}
   121  }
   122  
   123  func (s *ssafn) AllocFrame(f *ssa.Func) {
   124  	s.stksize = 0
   125  	s.stkptrsize = 0
   126  	s.stkalign = int64(types.RegSize)
   127  	fn := s.curfn
   128  
   129  	// Mark the PAUTO's unused.
   130  	for _, ln := range fn.Dcl {
   131  		if ln.OpenDeferSlot() {
   132  			// Open-coded defer slots have indices that were assigned
   133  			// upfront during SSA construction, but the defer statement can
   134  			// later get removed during deadcode elimination (#61895). To
   135  			// keep their relative offsets correct, treat them all as used.
   136  			continue
   137  		}
   138  
   139  		if needAlloc(ln) {
   140  			ln.SetUsed(false)
   141  		}
   142  	}
   143  
   144  	for _, l := range f.RegAlloc {
   145  		if ls, ok := l.(ssa.LocalSlot); ok {
   146  			ls.N.SetUsed(true)
   147  		}
   148  	}
   149  
   150  	for _, b := range f.Blocks {
   151  		for _, v := range b.Values {
   152  			if n, ok := v.Aux.(*ir.Name); ok {
   153  				switch n.Class {
   154  				case ir.PPARAMOUT:
   155  					if n.IsOutputParamInRegisters() && v.Op == ssa.OpVarDef {
   156  						// ignore VarDef, look for "real" uses.
   157  						// TODO: maybe do this for PAUTO as well?
   158  						continue
   159  					}
   160  					fallthrough
   161  				case ir.PPARAM, ir.PAUTO:
   162  					n.SetUsed(true)
   163  				}
   164  			}
   165  		}
   166  	}
   167  
   168  	var mls *liveness.MergeLocalsState
   169  	var leaders map[*ir.Name]int64
   170  	if base.Debug.MergeLocals != 0 {
   171  		mls = liveness.MergeLocals(fn, f)
   172  		if base.Debug.MergeLocalsTrace > 0 && mls != nil {
   173  			savedNP, savedP := mls.EstSavings()
   174  			fmt.Fprintf(os.Stderr, "%s: %d bytes of stack space saved via stack slot merging (%d nonpointer %d pointer)\n", ir.FuncName(fn), savedNP+savedP, savedNP, savedP)
   175  			if base.Debug.MergeLocalsTrace > 1 {
   176  				fmt.Fprintf(os.Stderr, "=-= merge locals state for %v:\n%v",
   177  					fn, mls)
   178  			}
   179  		}
   180  		leaders = make(map[*ir.Name]int64)
   181  	}
   182  
   183  	// Use sort.SliceStable instead of sort.Slice so stack layout (and thus
   184  	// compiler output) is less sensitive to frontend changes that
   185  	// introduce or remove unused variables.
   186  	sort.SliceStable(fn.Dcl, func(i, j int) bool {
   187  		return cmpstackvarlt(fn.Dcl[i], fn.Dcl[j], mls)
   188  	})
   189  
   190  	if mls != nil {
   191  		// Rewrite fn.Dcl to reposition followers (subsumed vars) to
   192  		// be immediately following the leader var in their partition.
   193  		followers := []*ir.Name{}
   194  		newdcl := make([]*ir.Name, 0, len(fn.Dcl))
   195  		for i := 0; i < len(fn.Dcl); i++ {
   196  			n := fn.Dcl[i]
   197  			if mls.Subsumed(n) {
   198  				continue
   199  			}
   200  			newdcl = append(newdcl, n)
   201  			if mls.IsLeader(n) {
   202  				followers = mls.Followers(n, followers)
   203  				// position followers immediately after leader
   204  				newdcl = append(newdcl, followers...)
   205  			}
   206  		}
   207  		fn.Dcl = newdcl
   208  	}
   209  
   210  	if base.Debug.MergeLocalsTrace > 1 && mls != nil {
   211  		fmt.Fprintf(os.Stderr, "=-= sorted DCL for %v:\n", fn)
   212  		for i, v := range fn.Dcl {
   213  			if !ssa.IsMergeCandidate(v) {
   214  				continue
   215  			}
   216  			fmt.Fprintf(os.Stderr, " %d: %q isleader=%v subsumed=%v used=%v sz=%d align=%d t=%s\n", i, v.Sym().Name, mls.IsLeader(v), mls.Subsumed(v), v.Used(), v.Type().Size(), v.Type().Alignment(), v.Type().String())
   217  		}
   218  	}
   219  
   220  	// Reassign stack offsets of the locals that are used.
   221  	lastHasPtr := false
   222  	for i, n := range fn.Dcl {
   223  		if n.Op() != ir.ONAME || n.Class != ir.PAUTO && !(n.Class == ir.PPARAMOUT && n.IsOutputParamInRegisters()) {
   224  			// i.e., stack assign if AUTO, or if PARAMOUT in registers (which has no predefined spill locations)
   225  			continue
   226  		}
   227  		if mls != nil && mls.Subsumed(n) {
   228  			continue
   229  		}
   230  		if !n.Used() {
   231  			fn.DebugInfo.(*ssa.FuncDebug).OptDcl = fn.Dcl[i:]
   232  			fn.Dcl = fn.Dcl[:i]
   233  			break
   234  		}
   235  		types.CalcSize(n.Type())
   236  		w := n.Type().Size()
   237  		if w >= types.MaxWidth || w < 0 {
   238  			base.Fatalf("bad width")
   239  		}
   240  		if w == 0 && lastHasPtr {
   241  			// Pad between a pointer-containing object and a zero-sized object.
   242  			// This prevents a pointer to the zero-sized object from being interpreted
   243  			// as a pointer to the pointer-containing object (and causing it
   244  			// to be scanned when it shouldn't be). See issue 24993.
   245  			w = 1
   246  		}
   247  		s.stksize += w
   248  		s.stksize = types.RoundUp(s.stksize, n.Type().Alignment())
   249  		if n.Type().Alignment() > int64(types.RegSize) {
   250  			s.stkalign = n.Type().Alignment()
   251  		}
   252  		if n.Type().HasPointers() {
   253  			s.stkptrsize = s.stksize
   254  			lastHasPtr = true
   255  		} else {
   256  			lastHasPtr = false
   257  		}
   258  		n.SetFrameOffset(-s.stksize)
   259  		if mls != nil && mls.IsLeader(n) {
   260  			leaders[n] = -s.stksize
   261  		}
   262  	}
   263  
   264  	if mls != nil {
   265  		// Update offsets of followers (subsumed vars) to be the
   266  		// same as the leader var in their partition.
   267  		for i := 0; i < len(fn.Dcl); i++ {
   268  			n := fn.Dcl[i]
   269  			if !mls.Subsumed(n) {
   270  				continue
   271  			}
   272  			leader := mls.Leader(n)
   273  			off, ok := leaders[leader]
   274  			if !ok {
   275  				panic("internal error missing leader")
   276  			}
   277  			// Set the stack offset this subsumed (followed) var
   278  			// to be the same as the leader.
   279  			n.SetFrameOffset(off)
   280  		}
   281  
   282  		if base.Debug.MergeLocalsTrace > 1 {
   283  			fmt.Fprintf(os.Stderr, "=-= stack layout for %v:\n", fn)
   284  			for i, v := range fn.Dcl {
   285  				if v.Op() != ir.ONAME || (v.Class != ir.PAUTO && !(v.Class == ir.PPARAMOUT && v.IsOutputParamInRegisters())) {
   286  					continue
   287  				}
   288  				fmt.Fprintf(os.Stderr, " %d: %q frameoff %d isleader=%v subsumed=%v sz=%d align=%d t=%s\n", i, v.Sym().Name, v.FrameOffset(), mls.IsLeader(v), mls.Subsumed(v), v.Type().Size(), v.Type().Alignment(), v.Type().String())
   289  			}
   290  		}
   291  	}
   292  
   293  	s.stksize = types.RoundUp(s.stksize, s.stkalign)
   294  	s.stkptrsize = types.RoundUp(s.stkptrsize, s.stkalign)
   295  }
   296  
   297  const maxStackSize = 1 << 30
   298  
   299  // Compile builds an SSA backend function,
   300  // uses it to generate a plist,
   301  // and flushes that plist to machine code.
   302  // worker indicates which of the backend workers is doing the processing.
   303  func Compile(fn *ir.Func, worker int, profile *pgoir.Profile) {
   304  	f := buildssa(fn, worker, inline.IsPgoHotFunc(fn, profile) || inline.HasPgoHotInline(fn))
   305  	// Note: check arg size to fix issue 25507.
   306  	if f.Frontend().(*ssafn).stksize >= maxStackSize || f.OwnAux.ArgWidth() >= maxStackSize {
   307  		largeStackFramesMu.Lock()
   308  		largeStackFrames = append(largeStackFrames, largeStack{locals: f.Frontend().(*ssafn).stksize, args: f.OwnAux.ArgWidth(), pos: fn.Pos()})
   309  		largeStackFramesMu.Unlock()
   310  		return
   311  	}
   312  	pp := objw.NewProgs(fn, worker)
   313  	defer pp.Free()
   314  	genssa(f, pp)
   315  	// Check frame size again.
   316  	// The check above included only the space needed for local variables.
   317  	// After genssa, the space needed includes local variables and the callee arg region.
   318  	// We must do this check prior to calling pp.Flush.
   319  	// If there are any oversized stack frames,
   320  	// the assembler may emit inscrutable complaints about invalid instructions.
   321  	if pp.Text.To.Offset >= maxStackSize {
   322  		largeStackFramesMu.Lock()
   323  		locals := f.Frontend().(*ssafn).stksize
   324  		largeStackFrames = append(largeStackFrames, largeStack{locals: locals, args: f.OwnAux.ArgWidth(), callee: pp.Text.To.Offset - locals, pos: fn.Pos()})
   325  		largeStackFramesMu.Unlock()
   326  		return
   327  	}
   328  
   329  	pp.Flush() // assemble, fill in boilerplate, etc.
   330  
   331  	// If we're compiling the package init function, search for any
   332  	// relocations that target global map init outline functions and
   333  	// turn them into weak relocs.
   334  	if fn.IsPackageInit() && base.Debug.WrapGlobalMapCtl != 1 {
   335  		weakenGlobalMapInitRelocs(fn)
   336  	}
   337  
   338  	// fieldtrack must be called after pp.Flush. See issue 20014.
   339  	fieldtrack(pp.Text.From.Sym, fn.FieldTrack)
   340  }
   341  
   342  // globalMapInitLsyms records the LSym of each map.init.NNN outlined
   343  // map initializer function created by the compiler.
   344  var globalMapInitLsyms map[*obj.LSym]struct{}
   345  
   346  // RegisterMapInitLsym records "s" in the set of outlined map initializer
   347  // functions.
   348  func RegisterMapInitLsym(s *obj.LSym) {
   349  	if globalMapInitLsyms == nil {
   350  		globalMapInitLsyms = make(map[*obj.LSym]struct{})
   351  	}
   352  	globalMapInitLsyms[s] = struct{}{}
   353  }
   354  
   355  // weakenGlobalMapInitRelocs walks through all of the relocations on a
   356  // given a package init function "fn" and looks for relocs that target
   357  // outlined global map initializer functions; if it finds any such
   358  // relocs, it flags them as R_WEAK.
   359  func weakenGlobalMapInitRelocs(fn *ir.Func) {
   360  	if globalMapInitLsyms == nil {
   361  		return
   362  	}
   363  	for i := range fn.LSym.R {
   364  		tgt := fn.LSym.R[i].Sym
   365  		if tgt == nil {
   366  			continue
   367  		}
   368  		if _, ok := globalMapInitLsyms[tgt]; !ok {
   369  			continue
   370  		}
   371  		if base.Debug.WrapGlobalMapDbg > 1 {
   372  			fmt.Fprintf(os.Stderr, "=-= weakify fn %v reloc %d %+v\n", fn, i,
   373  				fn.LSym.R[i])
   374  		}
   375  		// set the R_WEAK bit, leave rest of reloc type intact
   376  		fn.LSym.R[i].Type |= objabi.R_WEAK
   377  	}
   378  }
   379  
   380  // StackOffset returns the stack location of a LocalSlot relative to the
   381  // stack pointer, suitable for use in a DWARF location entry. This has nothing
   382  // to do with its offset in the user variable.
   383  func StackOffset(slot ssa.LocalSlot) int32 {
   384  	n := slot.N
   385  	var off int64
   386  	switch n.Class {
   387  	case ir.PPARAM, ir.PPARAMOUT:
   388  		if !n.IsOutputParamInRegisters() {
   389  			off = n.FrameOffset() + base.Ctxt.Arch.FixedFrameSize
   390  			break
   391  		}
   392  		fallthrough // PPARAMOUT in registers allocates like an AUTO
   393  	case ir.PAUTO:
   394  		off = n.FrameOffset()
   395  		if base.Ctxt.Arch.FixedFrameSize == 0 {
   396  			off -= int64(types.PtrSize)
   397  		}
   398  		if buildcfg.FramePointerEnabled {
   399  			off -= int64(types.PtrSize)
   400  		}
   401  	}
   402  	return int32(off + slot.Off)
   403  }
   404  
   405  // fieldtrack adds R_USEFIELD relocations to fnsym to record any
   406  // struct fields that it used.
   407  func fieldtrack(fnsym *obj.LSym, tracked map[*obj.LSym]struct{}) {
   408  	if fnsym == nil {
   409  		return
   410  	}
   411  	if !buildcfg.Experiment.FieldTrack || len(tracked) == 0 {
   412  		return
   413  	}
   414  
   415  	trackSyms := make([]*obj.LSym, 0, len(tracked))
   416  	for sym := range tracked {
   417  		trackSyms = append(trackSyms, sym)
   418  	}
   419  	slices.SortFunc(trackSyms, func(a, b *obj.LSym) int { return strings.Compare(a.Name, b.Name) })
   420  	for _, sym := range trackSyms {
   421  		fnsym.AddRel(base.Ctxt, obj.Reloc{Type: objabi.R_USEFIELD, Sym: sym})
   422  	}
   423  }
   424  
   425  // largeStack is info about a function whose stack frame is too large (rare).
   426  type largeStack struct {
   427  	locals int64
   428  	args   int64
   429  	callee int64
   430  	pos    src.XPos
   431  }
   432  
   433  var (
   434  	largeStackFramesMu sync.Mutex // protects largeStackFrames
   435  	largeStackFrames   []largeStack
   436  )
   437  
   438  func CheckLargeStacks() {
   439  	// Check whether any of the functions we have compiled have gigantic stack frames.
   440  	sort.Slice(largeStackFrames, func(i, j int) bool {
   441  		return largeStackFrames[i].pos.Before(largeStackFrames[j].pos)
   442  	})
   443  	for _, large := range largeStackFrames {
   444  		if large.callee != 0 {
   445  			base.ErrorfAt(large.pos, 0, "stack frame too large (>1GB): %d MB locals + %d MB args + %d MB callee", large.locals>>20, large.args>>20, large.callee>>20)
   446  		} else {
   447  			base.ErrorfAt(large.pos, 0, "stack frame too large (>1GB): %d MB locals + %d MB args", large.locals>>20, large.args>>20)
   448  		}
   449  	}
   450  }
   451  

View as plain text