Source file src/cmd/compile/internal/riscv64/ssa.go

     1  // Copyright 2016 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package riscv64
     6  
     7  import (
     8  	"cmd/compile/internal/base"
     9  	"cmd/compile/internal/ir"
    10  	"cmd/compile/internal/logopt"
    11  	"cmd/compile/internal/objw"
    12  	"cmd/compile/internal/ssa"
    13  	"cmd/compile/internal/ssagen"
    14  	"cmd/compile/internal/types"
    15  	"cmd/internal/obj"
    16  	"cmd/internal/obj/riscv"
    17  	"internal/abi"
    18  )
    19  
    20  // ssaRegToReg maps ssa register numbers to obj register numbers.
    21  var ssaRegToReg = []int16{
    22  	riscv.REG_X0,
    23  	// X1 (LR): unused
    24  	riscv.REG_X2,
    25  	riscv.REG_X3,
    26  	riscv.REG_X4,
    27  	riscv.REG_X5,
    28  	riscv.REG_X6,
    29  	riscv.REG_X7,
    30  	riscv.REG_X8,
    31  	riscv.REG_X9,
    32  	riscv.REG_X10,
    33  	riscv.REG_X11,
    34  	riscv.REG_X12,
    35  	riscv.REG_X13,
    36  	riscv.REG_X14,
    37  	riscv.REG_X15,
    38  	riscv.REG_X16,
    39  	riscv.REG_X17,
    40  	riscv.REG_X18,
    41  	riscv.REG_X19,
    42  	riscv.REG_X20,
    43  	riscv.REG_X21,
    44  	riscv.REG_X22,
    45  	riscv.REG_X23,
    46  	riscv.REG_X24,
    47  	riscv.REG_X25,
    48  	riscv.REG_X26,
    49  	riscv.REG_X27,
    50  	riscv.REG_X28,
    51  	riscv.REG_X29,
    52  	riscv.REG_X30,
    53  	riscv.REG_X31,
    54  	riscv.REG_F0,
    55  	riscv.REG_F1,
    56  	riscv.REG_F2,
    57  	riscv.REG_F3,
    58  	riscv.REG_F4,
    59  	riscv.REG_F5,
    60  	riscv.REG_F6,
    61  	riscv.REG_F7,
    62  	riscv.REG_F8,
    63  	riscv.REG_F9,
    64  	riscv.REG_F10,
    65  	riscv.REG_F11,
    66  	riscv.REG_F12,
    67  	riscv.REG_F13,
    68  	riscv.REG_F14,
    69  	riscv.REG_F15,
    70  	riscv.REG_F16,
    71  	riscv.REG_F17,
    72  	riscv.REG_F18,
    73  	riscv.REG_F19,
    74  	riscv.REG_F20,
    75  	riscv.REG_F21,
    76  	riscv.REG_F22,
    77  	riscv.REG_F23,
    78  	riscv.REG_F24,
    79  	riscv.REG_F25,
    80  	riscv.REG_F26,
    81  	riscv.REG_F27,
    82  	riscv.REG_F28,
    83  	riscv.REG_F29,
    84  	riscv.REG_F30,
    85  	riscv.REG_F31,
    86  	0, // SB isn't a real register.  We fill an Addr.Reg field with 0 in this case.
    87  }
    88  
    89  func loadByType(t *types.Type) obj.As {
    90  	width := t.Size()
    91  
    92  	if t.IsFloat() {
    93  		switch width {
    94  		case 4:
    95  			return riscv.AMOVF
    96  		case 8:
    97  			return riscv.AMOVD
    98  		default:
    99  			base.Fatalf("unknown float width for load %d in type %v", width, t)
   100  			return 0
   101  		}
   102  	}
   103  
   104  	switch width {
   105  	case 1:
   106  		if t.IsSigned() {
   107  			return riscv.AMOVB
   108  		} else {
   109  			return riscv.AMOVBU
   110  		}
   111  	case 2:
   112  		if t.IsSigned() {
   113  			return riscv.AMOVH
   114  		} else {
   115  			return riscv.AMOVHU
   116  		}
   117  	case 4:
   118  		if t.IsSigned() {
   119  			return riscv.AMOVW
   120  		} else {
   121  			return riscv.AMOVWU
   122  		}
   123  	case 8:
   124  		return riscv.AMOV
   125  	default:
   126  		base.Fatalf("unknown width for load %d in type %v", width, t)
   127  		return 0
   128  	}
   129  }
   130  
   131  // storeByType returns the store instruction of the given type.
   132  func storeByType(t *types.Type) obj.As {
   133  	width := t.Size()
   134  
   135  	if t.IsFloat() {
   136  		switch width {
   137  		case 4:
   138  			return riscv.AMOVF
   139  		case 8:
   140  			return riscv.AMOVD
   141  		default:
   142  			base.Fatalf("unknown float width for store %d in type %v", width, t)
   143  			return 0
   144  		}
   145  	}
   146  
   147  	switch width {
   148  	case 1:
   149  		return riscv.AMOVB
   150  	case 2:
   151  		return riscv.AMOVH
   152  	case 4:
   153  		return riscv.AMOVW
   154  	case 8:
   155  		return riscv.AMOV
   156  	default:
   157  		base.Fatalf("unknown width for store %d in type %v", width, t)
   158  		return 0
   159  	}
   160  }
   161  
   162  // largestMove returns the largest move instruction possible and its size,
   163  // given the alignment of the total size of the move.
   164  //
   165  // e.g., a 16-byte move may use MOV, but an 11-byte move must use MOVB.
   166  //
   167  // Note that the moves may not be on naturally aligned addresses depending on
   168  // the source and destination.
   169  //
   170  // This matches the calculation in ssa.moveSize.
   171  func largestMove(alignment int64) (obj.As, int64) {
   172  	switch {
   173  	case alignment%8 == 0:
   174  		return riscv.AMOV, 8
   175  	case alignment%4 == 0:
   176  		return riscv.AMOVW, 4
   177  	case alignment%2 == 0:
   178  		return riscv.AMOVH, 2
   179  	default:
   180  		return riscv.AMOVB, 1
   181  	}
   182  }
   183  
   184  var fracMovOps = []obj.As{riscv.AMOVB, riscv.AMOVH, riscv.AMOVW, riscv.AMOV}
   185  
   186  // ssaMarkMoves marks any MOVXconst ops that need to avoid clobbering flags.
   187  // RISC-V has no flags, so this is a no-op.
   188  func ssaMarkMoves(s *ssagen.State, b *ssa.Block) {}
   189  
   190  func ssaGenValue(s *ssagen.State, v *ssa.Value) {
   191  	s.SetPos(v.Pos)
   192  
   193  	switch v.Op {
   194  	case ssa.OpInitMem:
   195  		// memory arg needs no code
   196  	case ssa.OpArg:
   197  		// input args need no code
   198  	case ssa.OpPhi:
   199  		ssagen.CheckLoweredPhi(v)
   200  	case ssa.OpCopy, ssa.OpRISCV64MOVDreg:
   201  		if v.Type.IsMemory() {
   202  			return
   203  		}
   204  		rs := v.Args[0].Reg()
   205  		rd := v.Reg()
   206  		if rs == rd {
   207  			return
   208  		}
   209  		as := riscv.AMOV
   210  		if v.Type.IsFloat() {
   211  			as = riscv.AMOVD
   212  		}
   213  		p := s.Prog(as)
   214  		p.From.Type = obj.TYPE_REG
   215  		p.From.Reg = rs
   216  		p.To.Type = obj.TYPE_REG
   217  		p.To.Reg = rd
   218  	case ssa.OpRISCV64MOVDnop:
   219  		// nothing to do
   220  	case ssa.OpLoadReg:
   221  		if v.Type.IsFlags() {
   222  			v.Fatalf("load flags not implemented: %v", v.LongString())
   223  			return
   224  		}
   225  		p := s.Prog(loadByType(v.Type))
   226  		ssagen.AddrAuto(&p.From, v.Args[0])
   227  		p.To.Type = obj.TYPE_REG
   228  		p.To.Reg = v.Reg()
   229  	case ssa.OpStoreReg:
   230  		if v.Type.IsFlags() {
   231  			v.Fatalf("store flags not implemented: %v", v.LongString())
   232  			return
   233  		}
   234  		p := s.Prog(storeByType(v.Type))
   235  		p.From.Type = obj.TYPE_REG
   236  		p.From.Reg = v.Args[0].Reg()
   237  		ssagen.AddrAuto(&p.To, v)
   238  	case ssa.OpArgIntReg, ssa.OpArgFloatReg:
   239  		// The assembler needs to wrap the entry safepoint/stack growth code with spill/unspill
   240  		// The loop only runs once.
   241  		for _, a := range v.Block.Func.RegArgs {
   242  			// Pass the spill/unspill information along to the assembler, offset by size of
   243  			// the saved LR slot.
   244  			addr := ssagen.SpillSlotAddr(a, riscv.REG_SP, base.Ctxt.Arch.FixedFrameSize)
   245  			s.FuncInfo().AddSpill(
   246  				obj.RegSpill{Reg: a.Reg, Addr: addr, Unspill: loadByType(a.Type), Spill: storeByType(a.Type)})
   247  		}
   248  		v.Block.Func.RegArgs = nil
   249  
   250  		ssagen.CheckArgReg(v)
   251  	case ssa.OpSP, ssa.OpSB, ssa.OpGetG:
   252  		// nothing to do
   253  	case ssa.OpRISCV64MOVBreg, ssa.OpRISCV64MOVHreg, ssa.OpRISCV64MOVWreg,
   254  		ssa.OpRISCV64MOVBUreg, ssa.OpRISCV64MOVHUreg, ssa.OpRISCV64MOVWUreg:
   255  		a := v.Args[0]
   256  		for a.Op == ssa.OpCopy || a.Op == ssa.OpRISCV64MOVDreg {
   257  			a = a.Args[0]
   258  		}
   259  		as := v.Op.Asm()
   260  		rs := v.Args[0].Reg()
   261  		rd := v.Reg()
   262  		if a.Op == ssa.OpLoadReg {
   263  			t := a.Type
   264  			switch {
   265  			case v.Op == ssa.OpRISCV64MOVBreg && t.Size() == 1 && t.IsSigned(),
   266  				v.Op == ssa.OpRISCV64MOVHreg && t.Size() == 2 && t.IsSigned(),
   267  				v.Op == ssa.OpRISCV64MOVWreg && t.Size() == 4 && t.IsSigned(),
   268  				v.Op == ssa.OpRISCV64MOVBUreg && t.Size() == 1 && !t.IsSigned(),
   269  				v.Op == ssa.OpRISCV64MOVHUreg && t.Size() == 2 && !t.IsSigned(),
   270  				v.Op == ssa.OpRISCV64MOVWUreg && t.Size() == 4 && !t.IsSigned():
   271  				// arg is a proper-typed load and already sign/zero-extended
   272  				if rs == rd {
   273  					return
   274  				}
   275  				as = riscv.AMOV
   276  			default:
   277  			}
   278  		}
   279  		p := s.Prog(as)
   280  		p.From.Type = obj.TYPE_REG
   281  		p.From.Reg = rs
   282  		p.To.Type = obj.TYPE_REG
   283  		p.To.Reg = rd
   284  	case ssa.OpRISCV64ADD, ssa.OpRISCV64SUB, ssa.OpRISCV64SUBW, ssa.OpRISCV64XNOR, ssa.OpRISCV64XOR,
   285  		ssa.OpRISCV64OR, ssa.OpRISCV64ORN, ssa.OpRISCV64AND, ssa.OpRISCV64ANDN,
   286  		ssa.OpRISCV64SLL, ssa.OpRISCV64SLLW, ssa.OpRISCV64SRA, ssa.OpRISCV64SRAW, ssa.OpRISCV64SRL, ssa.OpRISCV64SRLW,
   287  		ssa.OpRISCV64SLT, ssa.OpRISCV64SLTU, ssa.OpRISCV64MUL, ssa.OpRISCV64MULW, ssa.OpRISCV64MULH,
   288  		ssa.OpRISCV64MULHU, ssa.OpRISCV64DIV, ssa.OpRISCV64DIVU, ssa.OpRISCV64DIVW,
   289  		ssa.OpRISCV64DIVUW, ssa.OpRISCV64REM, ssa.OpRISCV64REMU, ssa.OpRISCV64REMW,
   290  		ssa.OpRISCV64REMUW,
   291  		ssa.OpRISCV64ROL, ssa.OpRISCV64ROLW, ssa.OpRISCV64ROR, ssa.OpRISCV64RORW,
   292  		ssa.OpRISCV64FADDS, ssa.OpRISCV64FSUBS, ssa.OpRISCV64FMULS, ssa.OpRISCV64FDIVS,
   293  		ssa.OpRISCV64FEQS, ssa.OpRISCV64FNES, ssa.OpRISCV64FLTS, ssa.OpRISCV64FLES,
   294  		ssa.OpRISCV64FADDD, ssa.OpRISCV64FSUBD, ssa.OpRISCV64FMULD, ssa.OpRISCV64FDIVD,
   295  		ssa.OpRISCV64FEQD, ssa.OpRISCV64FNED, ssa.OpRISCV64FLTD, ssa.OpRISCV64FLED, ssa.OpRISCV64FSGNJD,
   296  		ssa.OpRISCV64MIN, ssa.OpRISCV64MAX, ssa.OpRISCV64MINU, ssa.OpRISCV64MAXU,
   297  		ssa.OpRISCV64SH1ADD, ssa.OpRISCV64SH2ADD, ssa.OpRISCV64SH3ADD,
   298  		ssa.OpRISCV64CZEROEQZ, ssa.OpRISCV64CZERONEZ:
   299  		r := v.Reg()
   300  		r1 := v.Args[0].Reg()
   301  		r2 := v.Args[1].Reg()
   302  		p := s.Prog(v.Op.Asm())
   303  		p.From.Type = obj.TYPE_REG
   304  		p.From.Reg = r2
   305  		p.Reg = r1
   306  		p.To.Type = obj.TYPE_REG
   307  		p.To.Reg = r
   308  
   309  	case ssa.OpRISCV64LoweredFMAXD, ssa.OpRISCV64LoweredFMIND, ssa.OpRISCV64LoweredFMAXS, ssa.OpRISCV64LoweredFMINS:
   310  		// Most of FMIN/FMAX result match Go's required behaviour, unless one of the
   311  		// inputs is a NaN. As such, we need to explicitly test for NaN
   312  		// before using FMIN/FMAX.
   313  
   314  		// FADD Rarg0, Rarg1, Rout // FADD is used to propagate a NaN to the result in these cases.
   315  		// FEQ  Rarg0, Rarg0, Rtmp
   316  		// BEQZ Rtmp, end
   317  		// FEQ  Rarg1, Rarg1, Rtmp
   318  		// BEQZ Rtmp, end
   319  		// F(MIN | MAX)
   320  
   321  		r0 := v.Args[0].Reg()
   322  		r1 := v.Args[1].Reg()
   323  		out := v.Reg()
   324  		add, feq := riscv.AFADDD, riscv.AFEQD
   325  		if v.Op == ssa.OpRISCV64LoweredFMAXS || v.Op == ssa.OpRISCV64LoweredFMINS {
   326  			add = riscv.AFADDS
   327  			feq = riscv.AFEQS
   328  		}
   329  
   330  		p1 := s.Prog(add)
   331  		p1.From.Type = obj.TYPE_REG
   332  		p1.From.Reg = r0
   333  		p1.Reg = r1
   334  		p1.To.Type = obj.TYPE_REG
   335  		p1.To.Reg = out
   336  
   337  		p2 := s.Prog(feq)
   338  		p2.From.Type = obj.TYPE_REG
   339  		p2.From.Reg = r0
   340  		p2.Reg = r0
   341  		p2.To.Type = obj.TYPE_REG
   342  		p2.To.Reg = riscv.REG_TMP
   343  
   344  		p3 := s.Prog(riscv.ABEQ)
   345  		p3.From.Type = obj.TYPE_REG
   346  		p3.From.Reg = riscv.REG_ZERO
   347  		p3.Reg = riscv.REG_TMP
   348  		p3.To.Type = obj.TYPE_BRANCH
   349  
   350  		p4 := s.Prog(feq)
   351  		p4.From.Type = obj.TYPE_REG
   352  		p4.From.Reg = r1
   353  		p4.Reg = r1
   354  		p4.To.Type = obj.TYPE_REG
   355  		p4.To.Reg = riscv.REG_TMP
   356  
   357  		p5 := s.Prog(riscv.ABEQ)
   358  		p5.From.Type = obj.TYPE_REG
   359  		p5.From.Reg = riscv.REG_ZERO
   360  		p5.Reg = riscv.REG_TMP
   361  		p5.To.Type = obj.TYPE_BRANCH
   362  
   363  		p6 := s.Prog(v.Op.Asm())
   364  		p6.From.Type = obj.TYPE_REG
   365  		p6.From.Reg = r1
   366  		p6.Reg = r0
   367  		p6.To.Type = obj.TYPE_REG
   368  		p6.To.Reg = out
   369  
   370  		nop := s.Prog(obj.ANOP)
   371  		p3.To.SetTarget(nop)
   372  		p5.To.SetTarget(nop)
   373  
   374  	case ssa.OpRISCV64LoweredMuluhilo:
   375  		r0 := v.Args[0].Reg()
   376  		r1 := v.Args[1].Reg()
   377  		p := s.Prog(riscv.AMULHU)
   378  		p.From.Type = obj.TYPE_REG
   379  		p.From.Reg = r1
   380  		p.Reg = r0
   381  		p.To.Type = obj.TYPE_REG
   382  		p.To.Reg = v.Reg0()
   383  		p1 := s.Prog(riscv.AMUL)
   384  		p1.From.Type = obj.TYPE_REG
   385  		p1.From.Reg = r1
   386  		p1.Reg = r0
   387  		p1.To.Type = obj.TYPE_REG
   388  		p1.To.Reg = v.Reg1()
   389  	case ssa.OpRISCV64LoweredMuluover:
   390  		r0 := v.Args[0].Reg()
   391  		r1 := v.Args[1].Reg()
   392  		p := s.Prog(riscv.AMULHU)
   393  		p.From.Type = obj.TYPE_REG
   394  		p.From.Reg = r1
   395  		p.Reg = r0
   396  		p.To.Type = obj.TYPE_REG
   397  		p.To.Reg = v.Reg1()
   398  		p1 := s.Prog(riscv.AMUL)
   399  		p1.From.Type = obj.TYPE_REG
   400  		p1.From.Reg = r1
   401  		p1.Reg = r0
   402  		p1.To.Type = obj.TYPE_REG
   403  		p1.To.Reg = v.Reg0()
   404  		p2 := s.Prog(riscv.ASNEZ)
   405  		p2.From.Type = obj.TYPE_REG
   406  		p2.From.Reg = v.Reg1()
   407  		p2.To.Type = obj.TYPE_REG
   408  		p2.To.Reg = v.Reg1()
   409  	case ssa.OpRISCV64FMADDD, ssa.OpRISCV64FMSUBD, ssa.OpRISCV64FNMADDD, ssa.OpRISCV64FNMSUBD,
   410  		ssa.OpRISCV64FMADDS, ssa.OpRISCV64FMSUBS, ssa.OpRISCV64FNMADDS, ssa.OpRISCV64FNMSUBS:
   411  		r := v.Reg()
   412  		r1 := v.Args[0].Reg()
   413  		r2 := v.Args[1].Reg()
   414  		r3 := v.Args[2].Reg()
   415  		p := s.Prog(v.Op.Asm())
   416  		p.From.Type = obj.TYPE_REG
   417  		p.From.Reg = r2
   418  		p.Reg = r1
   419  		p.AddRestSource(obj.Addr{Type: obj.TYPE_REG, Reg: r3})
   420  		p.To.Type = obj.TYPE_REG
   421  		p.To.Reg = r
   422  	case ssa.OpRISCV64FSQRTS, ssa.OpRISCV64FNEGS, ssa.OpRISCV64FABSD, ssa.OpRISCV64FSQRTD, ssa.OpRISCV64FNEGD,
   423  		ssa.OpRISCV64FMVSX, ssa.OpRISCV64FMVXS, ssa.OpRISCV64FMVDX, ssa.OpRISCV64FMVXD,
   424  		ssa.OpRISCV64FCVTSW, ssa.OpRISCV64FCVTSL, ssa.OpRISCV64FCVTWS, ssa.OpRISCV64FCVTLS,
   425  		ssa.OpRISCV64FCVTDW, ssa.OpRISCV64FCVTDL, ssa.OpRISCV64FCVTWD, ssa.OpRISCV64FCVTLD, ssa.OpRISCV64FCVTDS, ssa.OpRISCV64FCVTSD,
   426  		ssa.OpRISCV64FCLASSS, ssa.OpRISCV64FCLASSD,
   427  		ssa.OpRISCV64NOT, ssa.OpRISCV64NEG, ssa.OpRISCV64NEGW, ssa.OpRISCV64CLZ, ssa.OpRISCV64CLZW, ssa.OpRISCV64CTZ, ssa.OpRISCV64CTZW,
   428  		ssa.OpRISCV64REV8, ssa.OpRISCV64CPOP, ssa.OpRISCV64CPOPW:
   429  		p := s.Prog(v.Op.Asm())
   430  		p.From.Type = obj.TYPE_REG
   431  		p.From.Reg = v.Args[0].Reg()
   432  		p.To.Type = obj.TYPE_REG
   433  		p.To.Reg = v.Reg()
   434  	case ssa.OpRISCV64ADDI, ssa.OpRISCV64ADDIW, ssa.OpRISCV64XORI, ssa.OpRISCV64ORI, ssa.OpRISCV64ANDI,
   435  		ssa.OpRISCV64SLLI, ssa.OpRISCV64SLLIW, ssa.OpRISCV64SRAI, ssa.OpRISCV64SRAIW,
   436  		ssa.OpRISCV64SRLI, ssa.OpRISCV64SRLIW, ssa.OpRISCV64SLTI, ssa.OpRISCV64SLTIU,
   437  		ssa.OpRISCV64RORI, ssa.OpRISCV64RORIW:
   438  		p := s.Prog(v.Op.Asm())
   439  		p.From.Type = obj.TYPE_CONST
   440  		p.From.Offset = v.AuxInt
   441  		p.Reg = v.Args[0].Reg()
   442  		p.To.Type = obj.TYPE_REG
   443  		p.To.Reg = v.Reg()
   444  	case ssa.OpRISCV64MOVDconst:
   445  		p := s.Prog(v.Op.Asm())
   446  		p.From.Type = obj.TYPE_CONST
   447  		p.From.Offset = v.AuxInt
   448  		p.To.Type = obj.TYPE_REG
   449  		p.To.Reg = v.Reg()
   450  	case ssa.OpRISCV64FMOVDconst, ssa.OpRISCV64FMOVFconst:
   451  		p := s.Prog(v.Op.Asm())
   452  		p.From.Type = obj.TYPE_FCONST
   453  		p.From.Val = v.AuxFloat()
   454  		p.From.Name = obj.NAME_NONE
   455  		p.From.Reg = obj.REG_NONE
   456  		p.To.Type = obj.TYPE_REG
   457  		p.To.Reg = v.Reg()
   458  	case ssa.OpRISCV64MOVaddr:
   459  		p := s.Prog(v.Op.Asm())
   460  		p.From.Type = obj.TYPE_ADDR
   461  		p.To.Type = obj.TYPE_REG
   462  		p.To.Reg = v.Reg()
   463  
   464  		var wantreg string
   465  		// MOVW $sym+off(base), R
   466  		switch v.Aux.(type) {
   467  		default:
   468  			v.Fatalf("aux is of unknown type %T", v.Aux)
   469  		case *obj.LSym:
   470  			wantreg = "SB"
   471  			ssagen.AddAux(&p.From, v)
   472  		case *ir.Name:
   473  			wantreg = "SP"
   474  			ssagen.AddAux(&p.From, v)
   475  		case nil:
   476  			// No sym, just MOVW $off(SP), R
   477  			wantreg = "SP"
   478  			p.From.Reg = riscv.REG_SP
   479  			p.From.Offset = v.AuxInt
   480  		}
   481  		if reg := v.Args[0].RegName(); reg != wantreg {
   482  			v.Fatalf("bad reg %s for symbol type %T, want %s", reg, v.Aux, wantreg)
   483  		}
   484  	case ssa.OpRISCV64MOVBload, ssa.OpRISCV64MOVHload, ssa.OpRISCV64MOVWload, ssa.OpRISCV64MOVDload,
   485  		ssa.OpRISCV64MOVBUload, ssa.OpRISCV64MOVHUload, ssa.OpRISCV64MOVWUload,
   486  		ssa.OpRISCV64FMOVWload, ssa.OpRISCV64FMOVDload:
   487  		p := s.Prog(v.Op.Asm())
   488  		p.From.Type = obj.TYPE_MEM
   489  		p.From.Reg = v.Args[0].Reg()
   490  		ssagen.AddAux(&p.From, v)
   491  		p.To.Type = obj.TYPE_REG
   492  		p.To.Reg = v.Reg()
   493  	case ssa.OpRISCV64MOVBstore, ssa.OpRISCV64MOVHstore, ssa.OpRISCV64MOVWstore, ssa.OpRISCV64MOVDstore,
   494  		ssa.OpRISCV64FMOVWstore, ssa.OpRISCV64FMOVDstore:
   495  		p := s.Prog(v.Op.Asm())
   496  		p.From.Type = obj.TYPE_REG
   497  		p.From.Reg = v.Args[1].Reg()
   498  		p.To.Type = obj.TYPE_MEM
   499  		p.To.Reg = v.Args[0].Reg()
   500  		ssagen.AddAux(&p.To, v)
   501  	case ssa.OpRISCV64MOVBstorezero, ssa.OpRISCV64MOVHstorezero, ssa.OpRISCV64MOVWstorezero, ssa.OpRISCV64MOVDstorezero:
   502  		p := s.Prog(v.Op.Asm())
   503  		p.From.Type = obj.TYPE_REG
   504  		p.From.Reg = riscv.REG_ZERO
   505  		p.To.Type = obj.TYPE_MEM
   506  		p.To.Reg = v.Args[0].Reg()
   507  		ssagen.AddAux(&p.To, v)
   508  	case ssa.OpRISCV64SEQZ, ssa.OpRISCV64SNEZ:
   509  		p := s.Prog(v.Op.Asm())
   510  		p.From.Type = obj.TYPE_REG
   511  		p.From.Reg = v.Args[0].Reg()
   512  		p.To.Type = obj.TYPE_REG
   513  		p.To.Reg = v.Reg()
   514  	case ssa.OpRISCV64CALLstatic, ssa.OpRISCV64CALLclosure, ssa.OpRISCV64CALLinter:
   515  		s.Call(v)
   516  	case ssa.OpRISCV64CALLtail, ssa.OpRISCV64CALLtailinter:
   517  		s.TailCall(v)
   518  	case ssa.OpRISCV64LoweredWB:
   519  		p := s.Prog(obj.ACALL)
   520  		p.To.Type = obj.TYPE_MEM
   521  		p.To.Name = obj.NAME_EXTERN
   522  		// AuxInt encodes how many buffer entries we need.
   523  		p.To.Sym = ir.Syms.GCWriteBarrier[v.AuxInt-1]
   524  
   525  	case ssa.OpRISCV64LoweredPanicBoundsRR, ssa.OpRISCV64LoweredPanicBoundsRC, ssa.OpRISCV64LoweredPanicBoundsCR, ssa.OpRISCV64LoweredPanicBoundsCC:
   526  		// Compute the constant we put in the PCData entry for this call.
   527  		code, signed := ssa.BoundsKind(v.AuxInt).Code()
   528  		xIsReg := false
   529  		yIsReg := false
   530  		xVal := 0
   531  		yVal := 0
   532  		switch v.Op {
   533  		case ssa.OpRISCV64LoweredPanicBoundsRR:
   534  			xIsReg = true
   535  			xVal = int(v.Args[0].Reg() - riscv.REG_X5)
   536  			yIsReg = true
   537  			yVal = int(v.Args[1].Reg() - riscv.REG_X5)
   538  		case ssa.OpRISCV64LoweredPanicBoundsRC:
   539  			xIsReg = true
   540  			xVal = int(v.Args[0].Reg() - riscv.REG_X5)
   541  			c := v.Aux.(ssa.PanicBoundsC).C
   542  			if c >= 0 && c <= abi.BoundsMaxConst {
   543  				yVal = int(c)
   544  			} else {
   545  				// Move constant to a register
   546  				yIsReg = true
   547  				if yVal == xVal {
   548  					yVal = 1
   549  				}
   550  				p := s.Prog(riscv.AMOV)
   551  				p.From.Type = obj.TYPE_CONST
   552  				p.From.Offset = c
   553  				p.To.Type = obj.TYPE_REG
   554  				p.To.Reg = riscv.REG_X5 + int16(yVal)
   555  			}
   556  		case ssa.OpRISCV64LoweredPanicBoundsCR:
   557  			yIsReg = true
   558  			yVal = int(v.Args[0].Reg() - riscv.REG_X5)
   559  			c := v.Aux.(ssa.PanicBoundsC).C
   560  			if c >= 0 && c <= abi.BoundsMaxConst {
   561  				xVal = int(c)
   562  			} else {
   563  				// Move constant to a register
   564  				if xVal == yVal {
   565  					xVal = 1
   566  				}
   567  				p := s.Prog(riscv.AMOV)
   568  				p.From.Type = obj.TYPE_CONST
   569  				p.From.Offset = c
   570  				p.To.Type = obj.TYPE_REG
   571  				p.To.Reg = riscv.REG_X5 + int16(xVal)
   572  			}
   573  		case ssa.OpRISCV64LoweredPanicBoundsCC:
   574  			c := v.Aux.(ssa.PanicBoundsCC).Cx
   575  			if c >= 0 && c <= abi.BoundsMaxConst {
   576  				xVal = int(c)
   577  			} else {
   578  				// Move constant to a register
   579  				xIsReg = true
   580  				p := s.Prog(riscv.AMOV)
   581  				p.From.Type = obj.TYPE_CONST
   582  				p.From.Offset = c
   583  				p.To.Type = obj.TYPE_REG
   584  				p.To.Reg = riscv.REG_X5 + int16(xVal)
   585  			}
   586  			c = v.Aux.(ssa.PanicBoundsCC).Cy
   587  			if c >= 0 && c <= abi.BoundsMaxConst {
   588  				yVal = int(c)
   589  			} else {
   590  				// Move constant to a register
   591  				yIsReg = true
   592  				yVal = 1
   593  				p := s.Prog(riscv.AMOV)
   594  				p.From.Type = obj.TYPE_CONST
   595  				p.From.Offset = c
   596  				p.To.Type = obj.TYPE_REG
   597  				p.To.Reg = riscv.REG_X5 + int16(yVal)
   598  			}
   599  		}
   600  		c := abi.BoundsEncode(code, signed, xIsReg, yIsReg, xVal, yVal)
   601  
   602  		p := s.Prog(obj.APCDATA)
   603  		p.From.SetConst(abi.PCDATA_PanicBounds)
   604  		p.To.SetConst(int64(c))
   605  		p = s.Prog(obj.ACALL)
   606  		p.To.Type = obj.TYPE_MEM
   607  		p.To.Name = obj.NAME_EXTERN
   608  		p.To.Sym = ir.Syms.PanicBounds
   609  
   610  	case ssa.OpRISCV64LoweredAtomicLoad8:
   611  		s.Prog(riscv.AFENCE)
   612  		p := s.Prog(riscv.AMOVBU)
   613  		p.From.Type = obj.TYPE_MEM
   614  		p.From.Reg = v.Args[0].Reg()
   615  		p.To.Type = obj.TYPE_REG
   616  		p.To.Reg = v.Reg0()
   617  		s.Prog(riscv.AFENCE)
   618  
   619  	case ssa.OpRISCV64LoweredAtomicLoad32, ssa.OpRISCV64LoweredAtomicLoad64:
   620  		as := riscv.ALRW
   621  		if v.Op == ssa.OpRISCV64LoweredAtomicLoad64 {
   622  			as = riscv.ALRD
   623  		}
   624  		p := s.Prog(as)
   625  		p.From.Type = obj.TYPE_MEM
   626  		p.From.Reg = v.Args[0].Reg()
   627  		p.To.Type = obj.TYPE_REG
   628  		p.To.Reg = v.Reg0()
   629  
   630  	case ssa.OpRISCV64LoweredAtomicStore8:
   631  		s.Prog(riscv.AFENCE)
   632  		p := s.Prog(riscv.AMOVB)
   633  		p.From.Type = obj.TYPE_REG
   634  		p.From.Reg = v.Args[1].Reg()
   635  		p.To.Type = obj.TYPE_MEM
   636  		p.To.Reg = v.Args[0].Reg()
   637  		s.Prog(riscv.AFENCE)
   638  
   639  	case ssa.OpRISCV64LoweredAtomicStore32, ssa.OpRISCV64LoweredAtomicStore64:
   640  		as := riscv.AAMOSWAPW
   641  		if v.Op == ssa.OpRISCV64LoweredAtomicStore64 {
   642  			as = riscv.AAMOSWAPD
   643  		}
   644  		p := s.Prog(as)
   645  		p.From.Type = obj.TYPE_REG
   646  		p.From.Reg = v.Args[1].Reg()
   647  		p.To.Type = obj.TYPE_MEM
   648  		p.To.Reg = v.Args[0].Reg()
   649  		p.RegTo2 = riscv.REG_ZERO
   650  
   651  	case ssa.OpRISCV64LoweredAtomicAdd32, ssa.OpRISCV64LoweredAtomicAdd64:
   652  		as := riscv.AAMOADDW
   653  		if v.Op == ssa.OpRISCV64LoweredAtomicAdd64 {
   654  			as = riscv.AAMOADDD
   655  		}
   656  		p := s.Prog(as)
   657  		p.From.Type = obj.TYPE_REG
   658  		p.From.Reg = v.Args[1].Reg()
   659  		p.To.Type = obj.TYPE_MEM
   660  		p.To.Reg = v.Args[0].Reg()
   661  		p.RegTo2 = riscv.REG_TMP
   662  
   663  		p2 := s.Prog(riscv.AADD)
   664  		p2.From.Type = obj.TYPE_REG
   665  		p2.From.Reg = riscv.REG_TMP
   666  		p2.Reg = v.Args[1].Reg()
   667  		p2.To.Type = obj.TYPE_REG
   668  		p2.To.Reg = v.Reg0()
   669  
   670  	case ssa.OpRISCV64LoweredAtomicExchange32, ssa.OpRISCV64LoweredAtomicExchange64:
   671  		as := riscv.AAMOSWAPW
   672  		if v.Op == ssa.OpRISCV64LoweredAtomicExchange64 {
   673  			as = riscv.AAMOSWAPD
   674  		}
   675  		p := s.Prog(as)
   676  		p.From.Type = obj.TYPE_REG
   677  		p.From.Reg = v.Args[1].Reg()
   678  		p.To.Type = obj.TYPE_MEM
   679  		p.To.Reg = v.Args[0].Reg()
   680  		p.RegTo2 = v.Reg0()
   681  
   682  	case ssa.OpRISCV64LoweredAtomicCas32, ssa.OpRISCV64LoweredAtomicCas64:
   683  		// MOV  ZERO, Rout
   684  		// LR	(Rarg0), Rtmp
   685  		// BNE	Rtmp, Rarg1, 3(PC)
   686  		// SC	Rarg2, (Rarg0), Rtmp
   687  		// BNE	Rtmp, ZERO, -3(PC)
   688  		// MOV	$1, Rout
   689  
   690  		lr := riscv.ALRW
   691  		sc := riscv.ASCW
   692  		if v.Op == ssa.OpRISCV64LoweredAtomicCas64 {
   693  			lr = riscv.ALRD
   694  			sc = riscv.ASCD
   695  		}
   696  
   697  		r0 := v.Args[0].Reg()
   698  		r1 := v.Args[1].Reg()
   699  		r2 := v.Args[2].Reg()
   700  		out := v.Reg0()
   701  
   702  		p := s.Prog(riscv.AMOV)
   703  		p.From.Type = obj.TYPE_REG
   704  		p.From.Reg = riscv.REG_ZERO
   705  		p.To.Type = obj.TYPE_REG
   706  		p.To.Reg = out
   707  
   708  		p1 := s.Prog(lr)
   709  		p1.From.Type = obj.TYPE_MEM
   710  		p1.From.Reg = r0
   711  		p1.To.Type = obj.TYPE_REG
   712  		p1.To.Reg = riscv.REG_TMP
   713  
   714  		p2 := s.Prog(riscv.ABNE)
   715  		p2.From.Type = obj.TYPE_REG
   716  		p2.From.Reg = r1
   717  		p2.Reg = riscv.REG_TMP
   718  		p2.To.Type = obj.TYPE_BRANCH
   719  
   720  		p3 := s.Prog(sc)
   721  		p3.From.Type = obj.TYPE_REG
   722  		p3.From.Reg = r2
   723  		p3.To.Type = obj.TYPE_MEM
   724  		p3.To.Reg = r0
   725  		p3.RegTo2 = riscv.REG_TMP
   726  
   727  		p4 := s.Prog(riscv.ABNE)
   728  		p4.From.Type = obj.TYPE_REG
   729  		p4.From.Reg = riscv.REG_TMP
   730  		p4.Reg = riscv.REG_ZERO
   731  		p4.To.Type = obj.TYPE_BRANCH
   732  		p4.To.SetTarget(p1)
   733  
   734  		p5 := s.Prog(riscv.AMOV)
   735  		p5.From.Type = obj.TYPE_CONST
   736  		p5.From.Offset = 1
   737  		p5.To.Type = obj.TYPE_REG
   738  		p5.To.Reg = out
   739  
   740  		p6 := s.Prog(obj.ANOP)
   741  		p2.To.SetTarget(p6)
   742  
   743  	case ssa.OpRISCV64LoweredAtomicAnd32, ssa.OpRISCV64LoweredAtomicOr32:
   744  		p := s.Prog(v.Op.Asm())
   745  		p.From.Type = obj.TYPE_REG
   746  		p.From.Reg = v.Args[1].Reg()
   747  		p.To.Type = obj.TYPE_MEM
   748  		p.To.Reg = v.Args[0].Reg()
   749  		p.RegTo2 = riscv.REG_ZERO
   750  
   751  	case ssa.OpRISCV64LoweredZero:
   752  		ptr := v.Args[0].Reg()
   753  		sc := v.AuxValAndOff()
   754  		n := sc.Val64()
   755  
   756  		mov, sz := largestMove(sc.Off64())
   757  
   758  		// mov	ZERO, (offset)(Rarg0)
   759  		var off int64
   760  		for n >= sz {
   761  			zeroOp(s, mov, ptr, off)
   762  			off += sz
   763  			n -= sz
   764  		}
   765  
   766  		for i := len(fracMovOps) - 1; i >= 0; i-- {
   767  			tsz := int64(1 << i)
   768  			if n < tsz {
   769  				continue
   770  			}
   771  			zeroOp(s, fracMovOps[i], ptr, off)
   772  			off += tsz
   773  			n -= tsz
   774  		}
   775  
   776  	case ssa.OpRISCV64LoweredZeroLoop:
   777  		ptr := v.Args[0].Reg()
   778  		sc := v.AuxValAndOff()
   779  		n := sc.Val64()
   780  		mov, sz := largestMove(sc.Off64())
   781  		chunk := 8 * sz
   782  
   783  		if n <= 3*chunk {
   784  			v.Fatalf("ZeroLoop too small:%d, expect:%d", n, 3*chunk)
   785  		}
   786  
   787  		tmp := v.RegTmp()
   788  
   789  		p := s.Prog(riscv.AADD)
   790  		p.From.Type = obj.TYPE_CONST
   791  		p.From.Offset = n - n%chunk
   792  		p.Reg = ptr
   793  		p.To.Type = obj.TYPE_REG
   794  		p.To.Reg = tmp
   795  
   796  		for i := int64(0); i < 8; i++ {
   797  			zeroOp(s, mov, ptr, sz*i)
   798  		}
   799  
   800  		p2 := s.Prog(riscv.AADD)
   801  		p2.From.Type = obj.TYPE_CONST
   802  		p2.From.Offset = chunk
   803  		p2.To.Type = obj.TYPE_REG
   804  		p2.To.Reg = ptr
   805  
   806  		p3 := s.Prog(riscv.ABNE)
   807  		p3.From.Reg = tmp
   808  		p3.From.Type = obj.TYPE_REG
   809  		p3.Reg = ptr
   810  		p3.To.Type = obj.TYPE_BRANCH
   811  		p3.To.SetTarget(p.Link)
   812  
   813  		n %= chunk
   814  
   815  		// mov	ZERO, (offset)(Rarg0)
   816  		var off int64
   817  		for n >= sz {
   818  			zeroOp(s, mov, ptr, off)
   819  			off += sz
   820  			n -= sz
   821  		}
   822  
   823  		for i := len(fracMovOps) - 1; i >= 0; i-- {
   824  			tsz := int64(1 << i)
   825  			if n < tsz {
   826  				continue
   827  			}
   828  			zeroOp(s, fracMovOps[i], ptr, off)
   829  			off += tsz
   830  			n -= tsz
   831  		}
   832  
   833  	case ssa.OpRISCV64LoweredMove:
   834  		dst := v.Args[0].Reg()
   835  		src := v.Args[1].Reg()
   836  		if dst == src {
   837  			break
   838  		}
   839  
   840  		sa := v.AuxValAndOff()
   841  		n := sa.Val64()
   842  		mov, sz := largestMove(sa.Off64())
   843  
   844  		var off int64
   845  		tmp := int16(riscv.REG_X5)
   846  		for n >= sz {
   847  			moveOp(s, mov, dst, src, tmp, off)
   848  			off += sz
   849  			n -= sz
   850  		}
   851  
   852  		for i := len(fracMovOps) - 1; i >= 0; i-- {
   853  			tsz := int64(1 << i)
   854  			if n < tsz {
   855  				continue
   856  			}
   857  			moveOp(s, fracMovOps[i], dst, src, tmp, off)
   858  			off += tsz
   859  			n -= tsz
   860  		}
   861  
   862  	case ssa.OpRISCV64LoweredMoveLoop:
   863  		dst := v.Args[0].Reg()
   864  		src := v.Args[1].Reg()
   865  		if dst == src {
   866  			break
   867  		}
   868  
   869  		sc := v.AuxValAndOff()
   870  		n := sc.Val64()
   871  		mov, sz := largestMove(sc.Off64())
   872  		chunk := 8 * sz
   873  
   874  		if n <= 3*chunk {
   875  			v.Fatalf("MoveLoop too small:%d, expect:%d", n, 3*chunk)
   876  		}
   877  		tmp := int16(riscv.REG_X5)
   878  
   879  		p := s.Prog(riscv.AADD)
   880  		p.From.Type = obj.TYPE_CONST
   881  		p.From.Offset = n - n%chunk
   882  		p.Reg = src
   883  		p.To.Type = obj.TYPE_REG
   884  		p.To.Reg = riscv.REG_X6
   885  
   886  		for i := int64(0); i < 8; i++ {
   887  			moveOp(s, mov, dst, src, tmp, sz*i)
   888  		}
   889  
   890  		p1 := s.Prog(riscv.AADD)
   891  		p1.From.Type = obj.TYPE_CONST
   892  		p1.From.Offset = chunk
   893  		p1.To.Type = obj.TYPE_REG
   894  		p1.To.Reg = src
   895  
   896  		p2 := s.Prog(riscv.AADD)
   897  		p2.From.Type = obj.TYPE_CONST
   898  		p2.From.Offset = chunk
   899  		p2.To.Type = obj.TYPE_REG
   900  		p2.To.Reg = dst
   901  
   902  		p3 := s.Prog(riscv.ABNE)
   903  		p3.From.Reg = riscv.REG_X6
   904  		p3.From.Type = obj.TYPE_REG
   905  		p3.Reg = src
   906  		p3.To.Type = obj.TYPE_BRANCH
   907  		p3.To.SetTarget(p.Link)
   908  
   909  		n %= chunk
   910  
   911  		var off int64
   912  		for n >= sz {
   913  			moveOp(s, mov, dst, src, tmp, off)
   914  			off += sz
   915  			n -= sz
   916  		}
   917  
   918  		for i := len(fracMovOps) - 1; i >= 0; i-- {
   919  			tsz := int64(1 << i)
   920  			if n < tsz {
   921  				continue
   922  			}
   923  			moveOp(s, fracMovOps[i], dst, src, tmp, off)
   924  			off += tsz
   925  			n -= tsz
   926  		}
   927  
   928  	case ssa.OpRISCV64LoweredNilCheck:
   929  		// Issue a load which will fault if arg is nil.
   930  		p := s.Prog(riscv.AMOVB)
   931  		p.From.Type = obj.TYPE_MEM
   932  		p.From.Reg = v.Args[0].Reg()
   933  		ssagen.AddAux(&p.From, v)
   934  		p.To.Type = obj.TYPE_REG
   935  		p.To.Reg = riscv.REG_ZERO
   936  		if logopt.Enabled() {
   937  			logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
   938  		}
   939  		if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos == 1 in generated wrappers
   940  			base.WarnfAt(v.Pos, "generated nil check")
   941  		}
   942  
   943  	case ssa.OpRISCV64LoweredGetClosurePtr:
   944  		// Closure pointer is S10 (riscv.REG_CTXT).
   945  		ssagen.CheckLoweredGetClosurePtr(v)
   946  
   947  	case ssa.OpRISCV64LoweredGetCallerSP:
   948  		// caller's SP is FixedFrameSize below the address of the first arg
   949  		p := s.Prog(riscv.AMOV)
   950  		p.From.Type = obj.TYPE_ADDR
   951  		p.From.Offset = -base.Ctxt.Arch.FixedFrameSize
   952  		p.From.Name = obj.NAME_PARAM
   953  		p.To.Type = obj.TYPE_REG
   954  		p.To.Reg = v.Reg()
   955  
   956  	case ssa.OpRISCV64LoweredGetCallerPC:
   957  		p := s.Prog(obj.AGETCALLERPC)
   958  		p.To.Type = obj.TYPE_REG
   959  		p.To.Reg = v.Reg()
   960  
   961  	case ssa.OpRISCV64LoweredPubBarrier:
   962  		// FENCE
   963  		s.Prog(v.Op.Asm())
   964  
   965  	case ssa.OpRISCV64LoweredRound32F, ssa.OpRISCV64LoweredRound64F:
   966  		// input is already rounded
   967  
   968  	case ssa.OpClobber, ssa.OpClobberReg:
   969  		// TODO: implement for clobberdead experiment. Nop is ok for now.
   970  
   971  	default:
   972  		v.Fatalf("Unhandled op %v", v.Op)
   973  	}
   974  }
   975  
   976  var blockBranch = [...]obj.As{
   977  	ssa.BlockRISCV64BEQ:  riscv.ABEQ,
   978  	ssa.BlockRISCV64BEQZ: riscv.ABEQZ,
   979  	ssa.BlockRISCV64BGE:  riscv.ABGE,
   980  	ssa.BlockRISCV64BGEU: riscv.ABGEU,
   981  	ssa.BlockRISCV64BGEZ: riscv.ABGEZ,
   982  	ssa.BlockRISCV64BGTZ: riscv.ABGTZ,
   983  	ssa.BlockRISCV64BLEZ: riscv.ABLEZ,
   984  	ssa.BlockRISCV64BLT:  riscv.ABLT,
   985  	ssa.BlockRISCV64BLTU: riscv.ABLTU,
   986  	ssa.BlockRISCV64BLTZ: riscv.ABLTZ,
   987  	ssa.BlockRISCV64BNE:  riscv.ABNE,
   988  	ssa.BlockRISCV64BNEZ: riscv.ABNEZ,
   989  }
   990  
   991  func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
   992  	s.SetPos(b.Pos)
   993  
   994  	switch b.Kind {
   995  	case ssa.BlockPlain, ssa.BlockDefer:
   996  		if b.Succs[0].Block() != next {
   997  			p := s.Prog(obj.AJMP)
   998  			p.To.Type = obj.TYPE_BRANCH
   999  			s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
  1000  		}
  1001  	case ssa.BlockExit, ssa.BlockRetJmp:
  1002  	case ssa.BlockRet:
  1003  		s.Prog(obj.ARET)
  1004  	case ssa.BlockRISCV64BEQ, ssa.BlockRISCV64BEQZ, ssa.BlockRISCV64BNE, ssa.BlockRISCV64BNEZ,
  1005  		ssa.BlockRISCV64BLT, ssa.BlockRISCV64BLEZ, ssa.BlockRISCV64BGE, ssa.BlockRISCV64BGEZ,
  1006  		ssa.BlockRISCV64BLTZ, ssa.BlockRISCV64BGTZ, ssa.BlockRISCV64BLTU, ssa.BlockRISCV64BGEU:
  1007  
  1008  		as := blockBranch[b.Kind]
  1009  		invAs := riscv.InvertBranch(as)
  1010  
  1011  		var p *obj.Prog
  1012  		switch next {
  1013  		case b.Succs[0].Block():
  1014  			p = s.Br(invAs, b.Succs[1].Block())
  1015  		case b.Succs[1].Block():
  1016  			p = s.Br(as, b.Succs[0].Block())
  1017  		default:
  1018  			if b.Likely != ssa.BranchUnlikely {
  1019  				p = s.Br(as, b.Succs[0].Block())
  1020  				s.Br(obj.AJMP, b.Succs[1].Block())
  1021  			} else {
  1022  				p = s.Br(invAs, b.Succs[1].Block())
  1023  				s.Br(obj.AJMP, b.Succs[0].Block())
  1024  			}
  1025  		}
  1026  
  1027  		p.From.Type = obj.TYPE_REG
  1028  		switch b.Kind {
  1029  		case ssa.BlockRISCV64BEQ, ssa.BlockRISCV64BNE, ssa.BlockRISCV64BLT, ssa.BlockRISCV64BGE, ssa.BlockRISCV64BLTU, ssa.BlockRISCV64BGEU:
  1030  			if b.NumControls() != 2 {
  1031  				b.Fatalf("Unexpected number of controls (%d != 2): %s", b.NumControls(), b.LongString())
  1032  			}
  1033  			p.From.Reg = b.Controls[0].Reg()
  1034  			p.Reg = b.Controls[1].Reg()
  1035  
  1036  		case ssa.BlockRISCV64BEQZ, ssa.BlockRISCV64BNEZ, ssa.BlockRISCV64BGEZ, ssa.BlockRISCV64BLEZ, ssa.BlockRISCV64BLTZ, ssa.BlockRISCV64BGTZ:
  1037  			if b.NumControls() != 1 {
  1038  				b.Fatalf("Unexpected number of controls (%d != 1): %s", b.NumControls(), b.LongString())
  1039  			}
  1040  			p.From.Reg = b.Controls[0].Reg()
  1041  		}
  1042  
  1043  	default:
  1044  		b.Fatalf("Unhandled block: %s", b.LongString())
  1045  	}
  1046  }
  1047  
  1048  func loadRegResult(s *ssagen.State, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog {
  1049  	p := s.Prog(loadByType(t))
  1050  	p.From.Type = obj.TYPE_MEM
  1051  	p.From.Name = obj.NAME_AUTO
  1052  	p.From.Sym = n.Linksym()
  1053  	p.From.Offset = n.FrameOffset() + off
  1054  	p.To.Type = obj.TYPE_REG
  1055  	p.To.Reg = reg
  1056  	return p
  1057  }
  1058  
  1059  func spillArgReg(pp *objw.Progs, p *obj.Prog, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog {
  1060  	p = pp.Append(p, storeByType(t), obj.TYPE_REG, reg, 0, obj.TYPE_MEM, 0, n.FrameOffset()+off)
  1061  	p.To.Name = obj.NAME_PARAM
  1062  	p.To.Sym = n.Linksym()
  1063  	p.Pos = p.Pos.WithNotStmt()
  1064  	return p
  1065  }
  1066  
  1067  func zeroOp(s *ssagen.State, mov obj.As, reg int16, off int64) {
  1068  	p := s.Prog(mov)
  1069  	p.From.Type = obj.TYPE_REG
  1070  	p.From.Reg = riscv.REG_ZERO
  1071  	p.To.Type = obj.TYPE_MEM
  1072  	p.To.Reg = reg
  1073  	p.To.Offset = off
  1074  	return
  1075  }
  1076  
  1077  func moveOp(s *ssagen.State, mov obj.As, dst int16, src int16, tmp int16, off int64) {
  1078  	p := s.Prog(mov)
  1079  	p.From.Type = obj.TYPE_MEM
  1080  	p.From.Reg = src
  1081  	p.From.Offset = off
  1082  	p.To.Type = obj.TYPE_REG
  1083  	p.To.Reg = tmp
  1084  
  1085  	p1 := s.Prog(mov)
  1086  	p1.From.Type = obj.TYPE_REG
  1087  	p1.From.Reg = tmp
  1088  	p1.To.Type = obj.TYPE_MEM
  1089  	p1.To.Reg = dst
  1090  	p1.To.Offset = off
  1091  
  1092  	return
  1093  }
  1094  

View as plain text