Source file src/cmd/compile/internal/riscv64/ssa.go

     1  // Copyright 2016 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package riscv64
     6  
     7  import (
     8  	"cmd/compile/internal/base"
     9  	"cmd/compile/internal/ir"
    10  	"cmd/compile/internal/logopt"
    11  	"cmd/compile/internal/objw"
    12  	"cmd/compile/internal/ssa"
    13  	"cmd/compile/internal/ssagen"
    14  	"cmd/compile/internal/types"
    15  	"cmd/internal/obj"
    16  	"cmd/internal/obj/riscv"
    17  	"internal/abi"
    18  )
    19  
    20  // ssaRegToReg maps ssa register numbers to obj register numbers.
    21  var ssaRegToReg = []int16{
    22  	riscv.REG_X0,
    23  	// X1 (LR): unused
    24  	riscv.REG_X2,
    25  	riscv.REG_X3,
    26  	riscv.REG_X4,
    27  	riscv.REG_X5,
    28  	riscv.REG_X6,
    29  	riscv.REG_X7,
    30  	riscv.REG_X8,
    31  	riscv.REG_X9,
    32  	riscv.REG_X10,
    33  	riscv.REG_X11,
    34  	riscv.REG_X12,
    35  	riscv.REG_X13,
    36  	riscv.REG_X14,
    37  	riscv.REG_X15,
    38  	riscv.REG_X16,
    39  	riscv.REG_X17,
    40  	riscv.REG_X18,
    41  	riscv.REG_X19,
    42  	riscv.REG_X20,
    43  	riscv.REG_X21,
    44  	riscv.REG_X22,
    45  	riscv.REG_X23,
    46  	riscv.REG_X24,
    47  	riscv.REG_X25,
    48  	riscv.REG_X26,
    49  	riscv.REG_X27,
    50  	riscv.REG_X28,
    51  	riscv.REG_X29,
    52  	riscv.REG_X30,
    53  	riscv.REG_X31,
    54  	riscv.REG_F0,
    55  	riscv.REG_F1,
    56  	riscv.REG_F2,
    57  	riscv.REG_F3,
    58  	riscv.REG_F4,
    59  	riscv.REG_F5,
    60  	riscv.REG_F6,
    61  	riscv.REG_F7,
    62  	riscv.REG_F8,
    63  	riscv.REG_F9,
    64  	riscv.REG_F10,
    65  	riscv.REG_F11,
    66  	riscv.REG_F12,
    67  	riscv.REG_F13,
    68  	riscv.REG_F14,
    69  	riscv.REG_F15,
    70  	riscv.REG_F16,
    71  	riscv.REG_F17,
    72  	riscv.REG_F18,
    73  	riscv.REG_F19,
    74  	riscv.REG_F20,
    75  	riscv.REG_F21,
    76  	riscv.REG_F22,
    77  	riscv.REG_F23,
    78  	riscv.REG_F24,
    79  	riscv.REG_F25,
    80  	riscv.REG_F26,
    81  	riscv.REG_F27,
    82  	riscv.REG_F28,
    83  	riscv.REG_F29,
    84  	riscv.REG_F30,
    85  	riscv.REG_F31,
    86  	0, // SB isn't a real register.  We fill an Addr.Reg field with 0 in this case.
    87  }
    88  
    89  func loadByType(t *types.Type) obj.As {
    90  	width := t.Size()
    91  
    92  	if t.IsFloat() {
    93  		switch width {
    94  		case 4:
    95  			return riscv.AMOVF
    96  		case 8:
    97  			return riscv.AMOVD
    98  		default:
    99  			base.Fatalf("unknown float width for load %d in type %v", width, t)
   100  			return 0
   101  		}
   102  	}
   103  
   104  	switch width {
   105  	case 1:
   106  		if t.IsSigned() {
   107  			return riscv.AMOVB
   108  		} else {
   109  			return riscv.AMOVBU
   110  		}
   111  	case 2:
   112  		if t.IsSigned() {
   113  			return riscv.AMOVH
   114  		} else {
   115  			return riscv.AMOVHU
   116  		}
   117  	case 4:
   118  		if t.IsSigned() {
   119  			return riscv.AMOVW
   120  		} else {
   121  			return riscv.AMOVWU
   122  		}
   123  	case 8:
   124  		return riscv.AMOV
   125  	default:
   126  		base.Fatalf("unknown width for load %d in type %v", width, t)
   127  		return 0
   128  	}
   129  }
   130  
   131  // storeByType returns the store instruction of the given type.
   132  func storeByType(t *types.Type) obj.As {
   133  	width := t.Size()
   134  
   135  	if t.IsFloat() {
   136  		switch width {
   137  		case 4:
   138  			return riscv.AMOVF
   139  		case 8:
   140  			return riscv.AMOVD
   141  		default:
   142  			base.Fatalf("unknown float width for store %d in type %v", width, t)
   143  			return 0
   144  		}
   145  	}
   146  
   147  	switch width {
   148  	case 1:
   149  		return riscv.AMOVB
   150  	case 2:
   151  		return riscv.AMOVH
   152  	case 4:
   153  		return riscv.AMOVW
   154  	case 8:
   155  		return riscv.AMOV
   156  	default:
   157  		base.Fatalf("unknown width for store %d in type %v", width, t)
   158  		return 0
   159  	}
   160  }
   161  
   162  // largestMove returns the largest move instruction possible and its size,
   163  // given the alignment of the total size of the move.
   164  //
   165  // e.g., a 16-byte move may use MOV, but an 11-byte move must use MOVB.
   166  //
   167  // Note that the moves may not be on naturally aligned addresses depending on
   168  // the source and destination.
   169  //
   170  // This matches the calculation in ssa.moveSize.
   171  func largestMove(alignment int64) (obj.As, int64) {
   172  	switch {
   173  	case alignment%8 == 0:
   174  		return riscv.AMOV, 8
   175  	case alignment%4 == 0:
   176  		return riscv.AMOVW, 4
   177  	case alignment%2 == 0:
   178  		return riscv.AMOVH, 2
   179  	default:
   180  		return riscv.AMOVB, 1
   181  	}
   182  }
   183  
   184  var fracMovOps = []obj.As{riscv.AMOVB, riscv.AMOVH, riscv.AMOVW, riscv.AMOV}
   185  
   186  // ssaMarkMoves marks any MOVXconst ops that need to avoid clobbering flags.
   187  // RISC-V has no flags, so this is a no-op.
   188  func ssaMarkMoves(s *ssagen.State, b *ssa.Block) {}
   189  
   190  func ssaGenValue(s *ssagen.State, v *ssa.Value) {
   191  	s.SetPos(v.Pos)
   192  
   193  	switch v.Op {
   194  	case ssa.OpInitMem:
   195  		// memory arg needs no code
   196  	case ssa.OpArg:
   197  		// input args need no code
   198  	case ssa.OpPhi:
   199  		ssagen.CheckLoweredPhi(v)
   200  	case ssa.OpCopy, ssa.OpRISCV64MOVDreg:
   201  		if v.Type.IsMemory() {
   202  			return
   203  		}
   204  		rs := v.Args[0].Reg()
   205  		rd := v.Reg()
   206  		if rs == rd {
   207  			return
   208  		}
   209  		as := riscv.AMOV
   210  		if v.Type.IsFloat() {
   211  			as = riscv.AMOVD
   212  		}
   213  		p := s.Prog(as)
   214  		p.From.Type = obj.TYPE_REG
   215  		p.From.Reg = rs
   216  		p.To.Type = obj.TYPE_REG
   217  		p.To.Reg = rd
   218  	case ssa.OpRISCV64MOVDnop:
   219  		// nothing to do
   220  	case ssa.OpLoadReg:
   221  		if v.Type.IsFlags() {
   222  			v.Fatalf("load flags not implemented: %v", v.LongString())
   223  			return
   224  		}
   225  		p := s.Prog(loadByType(v.Type))
   226  		ssagen.AddrAuto(&p.From, v.Args[0])
   227  		p.To.Type = obj.TYPE_REG
   228  		p.To.Reg = v.Reg()
   229  	case ssa.OpStoreReg:
   230  		if v.Type.IsFlags() {
   231  			v.Fatalf("store flags not implemented: %v", v.LongString())
   232  			return
   233  		}
   234  		p := s.Prog(storeByType(v.Type))
   235  		p.From.Type = obj.TYPE_REG
   236  		p.From.Reg = v.Args[0].Reg()
   237  		ssagen.AddrAuto(&p.To, v)
   238  	case ssa.OpArgIntReg, ssa.OpArgFloatReg:
   239  		// The assembler needs to wrap the entry safepoint/stack growth code with spill/unspill
   240  		// The loop only runs once.
   241  		for _, a := range v.Block.Func.RegArgs {
   242  			// Pass the spill/unspill information along to the assembler, offset by size of
   243  			// the saved LR slot.
   244  			addr := ssagen.SpillSlotAddr(a, riscv.REG_SP, base.Ctxt.Arch.FixedFrameSize)
   245  			s.FuncInfo().AddSpill(
   246  				obj.RegSpill{Reg: a.Reg, Addr: addr, Unspill: loadByType(a.Type), Spill: storeByType(a.Type)})
   247  		}
   248  		v.Block.Func.RegArgs = nil
   249  
   250  		ssagen.CheckArgReg(v)
   251  	case ssa.OpSP, ssa.OpSB, ssa.OpGetG:
   252  		// nothing to do
   253  	case ssa.OpRISCV64MOVBreg, ssa.OpRISCV64MOVHreg, ssa.OpRISCV64MOVWreg,
   254  		ssa.OpRISCV64MOVBUreg, ssa.OpRISCV64MOVHUreg, ssa.OpRISCV64MOVWUreg:
   255  		a := v.Args[0]
   256  		for a.Op == ssa.OpCopy || a.Op == ssa.OpRISCV64MOVDreg {
   257  			a = a.Args[0]
   258  		}
   259  		as := v.Op.Asm()
   260  		rs := v.Args[0].Reg()
   261  		rd := v.Reg()
   262  		if a.Op == ssa.OpLoadReg {
   263  			t := a.Type
   264  			switch {
   265  			case v.Op == ssa.OpRISCV64MOVBreg && t.Size() == 1 && t.IsSigned(),
   266  				v.Op == ssa.OpRISCV64MOVHreg && t.Size() == 2 && t.IsSigned(),
   267  				v.Op == ssa.OpRISCV64MOVWreg && t.Size() == 4 && t.IsSigned(),
   268  				v.Op == ssa.OpRISCV64MOVBUreg && t.Size() == 1 && !t.IsSigned(),
   269  				v.Op == ssa.OpRISCV64MOVHUreg && t.Size() == 2 && !t.IsSigned(),
   270  				v.Op == ssa.OpRISCV64MOVWUreg && t.Size() == 4 && !t.IsSigned():
   271  				// arg is a proper-typed load and already sign/zero-extended
   272  				if rs == rd {
   273  					return
   274  				}
   275  				as = riscv.AMOV
   276  			default:
   277  			}
   278  		}
   279  		p := s.Prog(as)
   280  		p.From.Type = obj.TYPE_REG
   281  		p.From.Reg = rs
   282  		p.To.Type = obj.TYPE_REG
   283  		p.To.Reg = rd
   284  	case ssa.OpRISCV64ADD, ssa.OpRISCV64SUB, ssa.OpRISCV64SUBW, ssa.OpRISCV64XNOR, ssa.OpRISCV64XOR,
   285  		ssa.OpRISCV64OR, ssa.OpRISCV64ORN, ssa.OpRISCV64AND, ssa.OpRISCV64ANDN,
   286  		ssa.OpRISCV64SLL, ssa.OpRISCV64SLLW, ssa.OpRISCV64SRA, ssa.OpRISCV64SRAW, ssa.OpRISCV64SRL, ssa.OpRISCV64SRLW,
   287  		ssa.OpRISCV64SLT, ssa.OpRISCV64SLTU, ssa.OpRISCV64MUL, ssa.OpRISCV64MULW, ssa.OpRISCV64MULH,
   288  		ssa.OpRISCV64MULHU, ssa.OpRISCV64DIV, ssa.OpRISCV64DIVU, ssa.OpRISCV64DIVW,
   289  		ssa.OpRISCV64DIVUW, ssa.OpRISCV64REM, ssa.OpRISCV64REMU, ssa.OpRISCV64REMW,
   290  		ssa.OpRISCV64REMUW,
   291  		ssa.OpRISCV64ROL, ssa.OpRISCV64ROLW, ssa.OpRISCV64ROR, ssa.OpRISCV64RORW,
   292  		ssa.OpRISCV64FADDS, ssa.OpRISCV64FSUBS, ssa.OpRISCV64FMULS, ssa.OpRISCV64FDIVS,
   293  		ssa.OpRISCV64FEQS, ssa.OpRISCV64FNES, ssa.OpRISCV64FLTS, ssa.OpRISCV64FLES,
   294  		ssa.OpRISCV64FADDD, ssa.OpRISCV64FSUBD, ssa.OpRISCV64FMULD, ssa.OpRISCV64FDIVD,
   295  		ssa.OpRISCV64FEQD, ssa.OpRISCV64FNED, ssa.OpRISCV64FLTD, ssa.OpRISCV64FLED, ssa.OpRISCV64FSGNJD,
   296  		ssa.OpRISCV64MIN, ssa.OpRISCV64MAX, ssa.OpRISCV64MINU, ssa.OpRISCV64MAXU,
   297  		ssa.OpRISCV64SH1ADD, ssa.OpRISCV64SH2ADD, ssa.OpRISCV64SH3ADD:
   298  		r := v.Reg()
   299  		r1 := v.Args[0].Reg()
   300  		r2 := v.Args[1].Reg()
   301  		p := s.Prog(v.Op.Asm())
   302  		p.From.Type = obj.TYPE_REG
   303  		p.From.Reg = r2
   304  		p.Reg = r1
   305  		p.To.Type = obj.TYPE_REG
   306  		p.To.Reg = r
   307  
   308  	case ssa.OpRISCV64LoweredFMAXD, ssa.OpRISCV64LoweredFMIND, ssa.OpRISCV64LoweredFMAXS, ssa.OpRISCV64LoweredFMINS:
   309  		// Most of FMIN/FMAX result match Go's required behaviour, unless one of the
   310  		// inputs is a NaN. As such, we need to explicitly test for NaN
   311  		// before using FMIN/FMAX.
   312  
   313  		// FADD Rarg0, Rarg1, Rout // FADD is used to propagate a NaN to the result in these cases.
   314  		// FEQ  Rarg0, Rarg0, Rtmp
   315  		// BEQZ Rtmp, end
   316  		// FEQ  Rarg1, Rarg1, Rtmp
   317  		// BEQZ Rtmp, end
   318  		// F(MIN | MAX)
   319  
   320  		r0 := v.Args[0].Reg()
   321  		r1 := v.Args[1].Reg()
   322  		out := v.Reg()
   323  		add, feq := riscv.AFADDD, riscv.AFEQD
   324  		if v.Op == ssa.OpRISCV64LoweredFMAXS || v.Op == ssa.OpRISCV64LoweredFMINS {
   325  			add = riscv.AFADDS
   326  			feq = riscv.AFEQS
   327  		}
   328  
   329  		p1 := s.Prog(add)
   330  		p1.From.Type = obj.TYPE_REG
   331  		p1.From.Reg = r0
   332  		p1.Reg = r1
   333  		p1.To.Type = obj.TYPE_REG
   334  		p1.To.Reg = out
   335  
   336  		p2 := s.Prog(feq)
   337  		p2.From.Type = obj.TYPE_REG
   338  		p2.From.Reg = r0
   339  		p2.Reg = r0
   340  		p2.To.Type = obj.TYPE_REG
   341  		p2.To.Reg = riscv.REG_TMP
   342  
   343  		p3 := s.Prog(riscv.ABEQ)
   344  		p3.From.Type = obj.TYPE_REG
   345  		p3.From.Reg = riscv.REG_ZERO
   346  		p3.Reg = riscv.REG_TMP
   347  		p3.To.Type = obj.TYPE_BRANCH
   348  
   349  		p4 := s.Prog(feq)
   350  		p4.From.Type = obj.TYPE_REG
   351  		p4.From.Reg = r1
   352  		p4.Reg = r1
   353  		p4.To.Type = obj.TYPE_REG
   354  		p4.To.Reg = riscv.REG_TMP
   355  
   356  		p5 := s.Prog(riscv.ABEQ)
   357  		p5.From.Type = obj.TYPE_REG
   358  		p5.From.Reg = riscv.REG_ZERO
   359  		p5.Reg = riscv.REG_TMP
   360  		p5.To.Type = obj.TYPE_BRANCH
   361  
   362  		p6 := s.Prog(v.Op.Asm())
   363  		p6.From.Type = obj.TYPE_REG
   364  		p6.From.Reg = r1
   365  		p6.Reg = r0
   366  		p6.To.Type = obj.TYPE_REG
   367  		p6.To.Reg = out
   368  
   369  		nop := s.Prog(obj.ANOP)
   370  		p3.To.SetTarget(nop)
   371  		p5.To.SetTarget(nop)
   372  
   373  	case ssa.OpRISCV64LoweredMuluhilo:
   374  		r0 := v.Args[0].Reg()
   375  		r1 := v.Args[1].Reg()
   376  		p := s.Prog(riscv.AMULHU)
   377  		p.From.Type = obj.TYPE_REG
   378  		p.From.Reg = r1
   379  		p.Reg = r0
   380  		p.To.Type = obj.TYPE_REG
   381  		p.To.Reg = v.Reg0()
   382  		p1 := s.Prog(riscv.AMUL)
   383  		p1.From.Type = obj.TYPE_REG
   384  		p1.From.Reg = r1
   385  		p1.Reg = r0
   386  		p1.To.Type = obj.TYPE_REG
   387  		p1.To.Reg = v.Reg1()
   388  	case ssa.OpRISCV64LoweredMuluover:
   389  		r0 := v.Args[0].Reg()
   390  		r1 := v.Args[1].Reg()
   391  		p := s.Prog(riscv.AMULHU)
   392  		p.From.Type = obj.TYPE_REG
   393  		p.From.Reg = r1
   394  		p.Reg = r0
   395  		p.To.Type = obj.TYPE_REG
   396  		p.To.Reg = v.Reg1()
   397  		p1 := s.Prog(riscv.AMUL)
   398  		p1.From.Type = obj.TYPE_REG
   399  		p1.From.Reg = r1
   400  		p1.Reg = r0
   401  		p1.To.Type = obj.TYPE_REG
   402  		p1.To.Reg = v.Reg0()
   403  		p2 := s.Prog(riscv.ASNEZ)
   404  		p2.From.Type = obj.TYPE_REG
   405  		p2.From.Reg = v.Reg1()
   406  		p2.To.Type = obj.TYPE_REG
   407  		p2.To.Reg = v.Reg1()
   408  	case ssa.OpRISCV64FMADDD, ssa.OpRISCV64FMSUBD, ssa.OpRISCV64FNMADDD, ssa.OpRISCV64FNMSUBD,
   409  		ssa.OpRISCV64FMADDS, ssa.OpRISCV64FMSUBS, ssa.OpRISCV64FNMADDS, ssa.OpRISCV64FNMSUBS:
   410  		r := v.Reg()
   411  		r1 := v.Args[0].Reg()
   412  		r2 := v.Args[1].Reg()
   413  		r3 := v.Args[2].Reg()
   414  		p := s.Prog(v.Op.Asm())
   415  		p.From.Type = obj.TYPE_REG
   416  		p.From.Reg = r2
   417  		p.Reg = r1
   418  		p.AddRestSource(obj.Addr{Type: obj.TYPE_REG, Reg: r3})
   419  		p.To.Type = obj.TYPE_REG
   420  		p.To.Reg = r
   421  	case ssa.OpRISCV64FSQRTS, ssa.OpRISCV64FNEGS, ssa.OpRISCV64FABSD, ssa.OpRISCV64FSQRTD, ssa.OpRISCV64FNEGD,
   422  		ssa.OpRISCV64FMVSX, ssa.OpRISCV64FMVXS, ssa.OpRISCV64FMVDX, ssa.OpRISCV64FMVXD,
   423  		ssa.OpRISCV64FCVTSW, ssa.OpRISCV64FCVTSL, ssa.OpRISCV64FCVTWS, ssa.OpRISCV64FCVTLS,
   424  		ssa.OpRISCV64FCVTDW, ssa.OpRISCV64FCVTDL, ssa.OpRISCV64FCVTWD, ssa.OpRISCV64FCVTLD, ssa.OpRISCV64FCVTDS, ssa.OpRISCV64FCVTSD,
   425  		ssa.OpRISCV64FCLASSS, ssa.OpRISCV64FCLASSD,
   426  		ssa.OpRISCV64NOT, ssa.OpRISCV64NEG, ssa.OpRISCV64NEGW, ssa.OpRISCV64CLZ, ssa.OpRISCV64CLZW, ssa.OpRISCV64CTZ, ssa.OpRISCV64CTZW,
   427  		ssa.OpRISCV64REV8, ssa.OpRISCV64CPOP, ssa.OpRISCV64CPOPW:
   428  		p := s.Prog(v.Op.Asm())
   429  		p.From.Type = obj.TYPE_REG
   430  		p.From.Reg = v.Args[0].Reg()
   431  		p.To.Type = obj.TYPE_REG
   432  		p.To.Reg = v.Reg()
   433  	case ssa.OpRISCV64ADDI, ssa.OpRISCV64ADDIW, ssa.OpRISCV64XORI, ssa.OpRISCV64ORI, ssa.OpRISCV64ANDI,
   434  		ssa.OpRISCV64SLLI, ssa.OpRISCV64SLLIW, ssa.OpRISCV64SRAI, ssa.OpRISCV64SRAIW,
   435  		ssa.OpRISCV64SRLI, ssa.OpRISCV64SRLIW, ssa.OpRISCV64SLTI, ssa.OpRISCV64SLTIU,
   436  		ssa.OpRISCV64RORI, ssa.OpRISCV64RORIW:
   437  		p := s.Prog(v.Op.Asm())
   438  		p.From.Type = obj.TYPE_CONST
   439  		p.From.Offset = v.AuxInt
   440  		p.Reg = v.Args[0].Reg()
   441  		p.To.Type = obj.TYPE_REG
   442  		p.To.Reg = v.Reg()
   443  	case ssa.OpRISCV64MOVDconst:
   444  		p := s.Prog(v.Op.Asm())
   445  		p.From.Type = obj.TYPE_CONST
   446  		p.From.Offset = v.AuxInt
   447  		p.To.Type = obj.TYPE_REG
   448  		p.To.Reg = v.Reg()
   449  	case ssa.OpRISCV64MOVaddr:
   450  		p := s.Prog(v.Op.Asm())
   451  		p.From.Type = obj.TYPE_ADDR
   452  		p.To.Type = obj.TYPE_REG
   453  		p.To.Reg = v.Reg()
   454  
   455  		var wantreg string
   456  		// MOVW $sym+off(base), R
   457  		switch v.Aux.(type) {
   458  		default:
   459  			v.Fatalf("aux is of unknown type %T", v.Aux)
   460  		case *obj.LSym:
   461  			wantreg = "SB"
   462  			ssagen.AddAux(&p.From, v)
   463  		case *ir.Name:
   464  			wantreg = "SP"
   465  			ssagen.AddAux(&p.From, v)
   466  		case nil:
   467  			// No sym, just MOVW $off(SP), R
   468  			wantreg = "SP"
   469  			p.From.Reg = riscv.REG_SP
   470  			p.From.Offset = v.AuxInt
   471  		}
   472  		if reg := v.Args[0].RegName(); reg != wantreg {
   473  			v.Fatalf("bad reg %s for symbol type %T, want %s", reg, v.Aux, wantreg)
   474  		}
   475  	case ssa.OpRISCV64MOVBload, ssa.OpRISCV64MOVHload, ssa.OpRISCV64MOVWload, ssa.OpRISCV64MOVDload,
   476  		ssa.OpRISCV64MOVBUload, ssa.OpRISCV64MOVHUload, ssa.OpRISCV64MOVWUload,
   477  		ssa.OpRISCV64FMOVWload, ssa.OpRISCV64FMOVDload:
   478  		p := s.Prog(v.Op.Asm())
   479  		p.From.Type = obj.TYPE_MEM
   480  		p.From.Reg = v.Args[0].Reg()
   481  		ssagen.AddAux(&p.From, v)
   482  		p.To.Type = obj.TYPE_REG
   483  		p.To.Reg = v.Reg()
   484  	case ssa.OpRISCV64MOVBstore, ssa.OpRISCV64MOVHstore, ssa.OpRISCV64MOVWstore, ssa.OpRISCV64MOVDstore,
   485  		ssa.OpRISCV64FMOVWstore, ssa.OpRISCV64FMOVDstore:
   486  		p := s.Prog(v.Op.Asm())
   487  		p.From.Type = obj.TYPE_REG
   488  		p.From.Reg = v.Args[1].Reg()
   489  		p.To.Type = obj.TYPE_MEM
   490  		p.To.Reg = v.Args[0].Reg()
   491  		ssagen.AddAux(&p.To, v)
   492  	case ssa.OpRISCV64MOVBstorezero, ssa.OpRISCV64MOVHstorezero, ssa.OpRISCV64MOVWstorezero, ssa.OpRISCV64MOVDstorezero:
   493  		p := s.Prog(v.Op.Asm())
   494  		p.From.Type = obj.TYPE_REG
   495  		p.From.Reg = riscv.REG_ZERO
   496  		p.To.Type = obj.TYPE_MEM
   497  		p.To.Reg = v.Args[0].Reg()
   498  		ssagen.AddAux(&p.To, v)
   499  	case ssa.OpRISCV64SEQZ, ssa.OpRISCV64SNEZ:
   500  		p := s.Prog(v.Op.Asm())
   501  		p.From.Type = obj.TYPE_REG
   502  		p.From.Reg = v.Args[0].Reg()
   503  		p.To.Type = obj.TYPE_REG
   504  		p.To.Reg = v.Reg()
   505  	case ssa.OpRISCV64CALLstatic, ssa.OpRISCV64CALLclosure, ssa.OpRISCV64CALLinter:
   506  		s.Call(v)
   507  	case ssa.OpRISCV64CALLtail:
   508  		s.TailCall(v)
   509  	case ssa.OpRISCV64LoweredWB:
   510  		p := s.Prog(obj.ACALL)
   511  		p.To.Type = obj.TYPE_MEM
   512  		p.To.Name = obj.NAME_EXTERN
   513  		// AuxInt encodes how many buffer entries we need.
   514  		p.To.Sym = ir.Syms.GCWriteBarrier[v.AuxInt-1]
   515  
   516  	case ssa.OpRISCV64LoweredPanicBoundsRR, ssa.OpRISCV64LoweredPanicBoundsRC, ssa.OpRISCV64LoweredPanicBoundsCR, ssa.OpRISCV64LoweredPanicBoundsCC:
   517  		// Compute the constant we put in the PCData entry for this call.
   518  		code, signed := ssa.BoundsKind(v.AuxInt).Code()
   519  		xIsReg := false
   520  		yIsReg := false
   521  		xVal := 0
   522  		yVal := 0
   523  		switch v.Op {
   524  		case ssa.OpRISCV64LoweredPanicBoundsRR:
   525  			xIsReg = true
   526  			xVal = int(v.Args[0].Reg() - riscv.REG_X5)
   527  			yIsReg = true
   528  			yVal = int(v.Args[1].Reg() - riscv.REG_X5)
   529  		case ssa.OpRISCV64LoweredPanicBoundsRC:
   530  			xIsReg = true
   531  			xVal = int(v.Args[0].Reg() - riscv.REG_X5)
   532  			c := v.Aux.(ssa.PanicBoundsC).C
   533  			if c >= 0 && c <= abi.BoundsMaxConst {
   534  				yVal = int(c)
   535  			} else {
   536  				// Move constant to a register
   537  				yIsReg = true
   538  				if yVal == xVal {
   539  					yVal = 1
   540  				}
   541  				p := s.Prog(riscv.AMOV)
   542  				p.From.Type = obj.TYPE_CONST
   543  				p.From.Offset = c
   544  				p.To.Type = obj.TYPE_REG
   545  				p.To.Reg = riscv.REG_X5 + int16(yVal)
   546  			}
   547  		case ssa.OpRISCV64LoweredPanicBoundsCR:
   548  			yIsReg = true
   549  			yVal = int(v.Args[0].Reg() - riscv.REG_X5)
   550  			c := v.Aux.(ssa.PanicBoundsC).C
   551  			if c >= 0 && c <= abi.BoundsMaxConst {
   552  				xVal = int(c)
   553  			} else {
   554  				// Move constant to a register
   555  				if xVal == yVal {
   556  					xVal = 1
   557  				}
   558  				p := s.Prog(riscv.AMOV)
   559  				p.From.Type = obj.TYPE_CONST
   560  				p.From.Offset = c
   561  				p.To.Type = obj.TYPE_REG
   562  				p.To.Reg = riscv.REG_X5 + int16(xVal)
   563  			}
   564  		case ssa.OpRISCV64LoweredPanicBoundsCC:
   565  			c := v.Aux.(ssa.PanicBoundsCC).Cx
   566  			if c >= 0 && c <= abi.BoundsMaxConst {
   567  				xVal = int(c)
   568  			} else {
   569  				// Move constant to a register
   570  				xIsReg = true
   571  				p := s.Prog(riscv.AMOV)
   572  				p.From.Type = obj.TYPE_CONST
   573  				p.From.Offset = c
   574  				p.To.Type = obj.TYPE_REG
   575  				p.To.Reg = riscv.REG_X5 + int16(xVal)
   576  			}
   577  			c = v.Aux.(ssa.PanicBoundsCC).Cy
   578  			if c >= 0 && c <= abi.BoundsMaxConst {
   579  				yVal = int(c)
   580  			} else {
   581  				// Move constant to a register
   582  				yIsReg = true
   583  				yVal = 1
   584  				p := s.Prog(riscv.AMOV)
   585  				p.From.Type = obj.TYPE_CONST
   586  				p.From.Offset = c
   587  				p.To.Type = obj.TYPE_REG
   588  				p.To.Reg = riscv.REG_X5 + int16(yVal)
   589  			}
   590  		}
   591  		c := abi.BoundsEncode(code, signed, xIsReg, yIsReg, xVal, yVal)
   592  
   593  		p := s.Prog(obj.APCDATA)
   594  		p.From.SetConst(abi.PCDATA_PanicBounds)
   595  		p.To.SetConst(int64(c))
   596  		p = s.Prog(obj.ACALL)
   597  		p.To.Type = obj.TYPE_MEM
   598  		p.To.Name = obj.NAME_EXTERN
   599  		p.To.Sym = ir.Syms.PanicBounds
   600  
   601  	case ssa.OpRISCV64LoweredAtomicLoad8:
   602  		s.Prog(riscv.AFENCE)
   603  		p := s.Prog(riscv.AMOVBU)
   604  		p.From.Type = obj.TYPE_MEM
   605  		p.From.Reg = v.Args[0].Reg()
   606  		p.To.Type = obj.TYPE_REG
   607  		p.To.Reg = v.Reg0()
   608  		s.Prog(riscv.AFENCE)
   609  
   610  	case ssa.OpRISCV64LoweredAtomicLoad32, ssa.OpRISCV64LoweredAtomicLoad64:
   611  		as := riscv.ALRW
   612  		if v.Op == ssa.OpRISCV64LoweredAtomicLoad64 {
   613  			as = riscv.ALRD
   614  		}
   615  		p := s.Prog(as)
   616  		p.From.Type = obj.TYPE_MEM
   617  		p.From.Reg = v.Args[0].Reg()
   618  		p.To.Type = obj.TYPE_REG
   619  		p.To.Reg = v.Reg0()
   620  
   621  	case ssa.OpRISCV64LoweredAtomicStore8:
   622  		s.Prog(riscv.AFENCE)
   623  		p := s.Prog(riscv.AMOVB)
   624  		p.From.Type = obj.TYPE_REG
   625  		p.From.Reg = v.Args[1].Reg()
   626  		p.To.Type = obj.TYPE_MEM
   627  		p.To.Reg = v.Args[0].Reg()
   628  		s.Prog(riscv.AFENCE)
   629  
   630  	case ssa.OpRISCV64LoweredAtomicStore32, ssa.OpRISCV64LoweredAtomicStore64:
   631  		as := riscv.AAMOSWAPW
   632  		if v.Op == ssa.OpRISCV64LoweredAtomicStore64 {
   633  			as = riscv.AAMOSWAPD
   634  		}
   635  		p := s.Prog(as)
   636  		p.From.Type = obj.TYPE_REG
   637  		p.From.Reg = v.Args[1].Reg()
   638  		p.To.Type = obj.TYPE_MEM
   639  		p.To.Reg = v.Args[0].Reg()
   640  		p.RegTo2 = riscv.REG_ZERO
   641  
   642  	case ssa.OpRISCV64LoweredAtomicAdd32, ssa.OpRISCV64LoweredAtomicAdd64:
   643  		as := riscv.AAMOADDW
   644  		if v.Op == ssa.OpRISCV64LoweredAtomicAdd64 {
   645  			as = riscv.AAMOADDD
   646  		}
   647  		p := s.Prog(as)
   648  		p.From.Type = obj.TYPE_REG
   649  		p.From.Reg = v.Args[1].Reg()
   650  		p.To.Type = obj.TYPE_MEM
   651  		p.To.Reg = v.Args[0].Reg()
   652  		p.RegTo2 = riscv.REG_TMP
   653  
   654  		p2 := s.Prog(riscv.AADD)
   655  		p2.From.Type = obj.TYPE_REG
   656  		p2.From.Reg = riscv.REG_TMP
   657  		p2.Reg = v.Args[1].Reg()
   658  		p2.To.Type = obj.TYPE_REG
   659  		p2.To.Reg = v.Reg0()
   660  
   661  	case ssa.OpRISCV64LoweredAtomicExchange32, ssa.OpRISCV64LoweredAtomicExchange64:
   662  		as := riscv.AAMOSWAPW
   663  		if v.Op == ssa.OpRISCV64LoweredAtomicExchange64 {
   664  			as = riscv.AAMOSWAPD
   665  		}
   666  		p := s.Prog(as)
   667  		p.From.Type = obj.TYPE_REG
   668  		p.From.Reg = v.Args[1].Reg()
   669  		p.To.Type = obj.TYPE_MEM
   670  		p.To.Reg = v.Args[0].Reg()
   671  		p.RegTo2 = v.Reg0()
   672  
   673  	case ssa.OpRISCV64LoweredAtomicCas32, ssa.OpRISCV64LoweredAtomicCas64:
   674  		// MOV  ZERO, Rout
   675  		// LR	(Rarg0), Rtmp
   676  		// BNE	Rtmp, Rarg1, 3(PC)
   677  		// SC	Rarg2, (Rarg0), Rtmp
   678  		// BNE	Rtmp, ZERO, -3(PC)
   679  		// MOV	$1, Rout
   680  
   681  		lr := riscv.ALRW
   682  		sc := riscv.ASCW
   683  		if v.Op == ssa.OpRISCV64LoweredAtomicCas64 {
   684  			lr = riscv.ALRD
   685  			sc = riscv.ASCD
   686  		}
   687  
   688  		r0 := v.Args[0].Reg()
   689  		r1 := v.Args[1].Reg()
   690  		r2 := v.Args[2].Reg()
   691  		out := v.Reg0()
   692  
   693  		p := s.Prog(riscv.AMOV)
   694  		p.From.Type = obj.TYPE_REG
   695  		p.From.Reg = riscv.REG_ZERO
   696  		p.To.Type = obj.TYPE_REG
   697  		p.To.Reg = out
   698  
   699  		p1 := s.Prog(lr)
   700  		p1.From.Type = obj.TYPE_MEM
   701  		p1.From.Reg = r0
   702  		p1.To.Type = obj.TYPE_REG
   703  		p1.To.Reg = riscv.REG_TMP
   704  
   705  		p2 := s.Prog(riscv.ABNE)
   706  		p2.From.Type = obj.TYPE_REG
   707  		p2.From.Reg = r1
   708  		p2.Reg = riscv.REG_TMP
   709  		p2.To.Type = obj.TYPE_BRANCH
   710  
   711  		p3 := s.Prog(sc)
   712  		p3.From.Type = obj.TYPE_REG
   713  		p3.From.Reg = r2
   714  		p3.To.Type = obj.TYPE_MEM
   715  		p3.To.Reg = r0
   716  		p3.RegTo2 = riscv.REG_TMP
   717  
   718  		p4 := s.Prog(riscv.ABNE)
   719  		p4.From.Type = obj.TYPE_REG
   720  		p4.From.Reg = riscv.REG_TMP
   721  		p4.Reg = riscv.REG_ZERO
   722  		p4.To.Type = obj.TYPE_BRANCH
   723  		p4.To.SetTarget(p1)
   724  
   725  		p5 := s.Prog(riscv.AMOV)
   726  		p5.From.Type = obj.TYPE_CONST
   727  		p5.From.Offset = 1
   728  		p5.To.Type = obj.TYPE_REG
   729  		p5.To.Reg = out
   730  
   731  		p6 := s.Prog(obj.ANOP)
   732  		p2.To.SetTarget(p6)
   733  
   734  	case ssa.OpRISCV64LoweredAtomicAnd32, ssa.OpRISCV64LoweredAtomicOr32:
   735  		p := s.Prog(v.Op.Asm())
   736  		p.From.Type = obj.TYPE_REG
   737  		p.From.Reg = v.Args[1].Reg()
   738  		p.To.Type = obj.TYPE_MEM
   739  		p.To.Reg = v.Args[0].Reg()
   740  		p.RegTo2 = riscv.REG_ZERO
   741  
   742  	case ssa.OpRISCV64LoweredZero:
   743  		ptr := v.Args[0].Reg()
   744  		sc := v.AuxValAndOff()
   745  		n := sc.Val64()
   746  
   747  		mov, sz := largestMove(sc.Off64())
   748  
   749  		// mov	ZERO, (offset)(Rarg0)
   750  		var off int64
   751  		for n >= sz {
   752  			zeroOp(s, mov, ptr, off)
   753  			off += sz
   754  			n -= sz
   755  		}
   756  
   757  		for i := len(fracMovOps) - 1; i >= 0; i-- {
   758  			tsz := int64(1 << i)
   759  			if n < tsz {
   760  				continue
   761  			}
   762  			zeroOp(s, fracMovOps[i], ptr, off)
   763  			off += tsz
   764  			n -= tsz
   765  		}
   766  
   767  	case ssa.OpRISCV64LoweredZeroLoop:
   768  		ptr := v.Args[0].Reg()
   769  		sc := v.AuxValAndOff()
   770  		n := sc.Val64()
   771  		mov, sz := largestMove(sc.Off64())
   772  		chunk := 8 * sz
   773  
   774  		if n <= 3*chunk {
   775  			v.Fatalf("ZeroLoop too small:%d, expect:%d", n, 3*chunk)
   776  		}
   777  
   778  		tmp := v.RegTmp()
   779  
   780  		p := s.Prog(riscv.AADD)
   781  		p.From.Type = obj.TYPE_CONST
   782  		p.From.Offset = n - n%chunk
   783  		p.Reg = ptr
   784  		p.To.Type = obj.TYPE_REG
   785  		p.To.Reg = tmp
   786  
   787  		for i := int64(0); i < 8; i++ {
   788  			zeroOp(s, mov, ptr, sz*i)
   789  		}
   790  
   791  		p2 := s.Prog(riscv.AADD)
   792  		p2.From.Type = obj.TYPE_CONST
   793  		p2.From.Offset = chunk
   794  		p2.To.Type = obj.TYPE_REG
   795  		p2.To.Reg = ptr
   796  
   797  		p3 := s.Prog(riscv.ABNE)
   798  		p3.From.Reg = tmp
   799  		p3.From.Type = obj.TYPE_REG
   800  		p3.Reg = ptr
   801  		p3.To.Type = obj.TYPE_BRANCH
   802  		p3.To.SetTarget(p.Link)
   803  
   804  		n %= chunk
   805  
   806  		// mov	ZERO, (offset)(Rarg0)
   807  		var off int64
   808  		for n >= sz {
   809  			zeroOp(s, mov, ptr, off)
   810  			off += sz
   811  			n -= sz
   812  		}
   813  
   814  		for i := len(fracMovOps) - 1; i >= 0; i-- {
   815  			tsz := int64(1 << i)
   816  			if n < tsz {
   817  				continue
   818  			}
   819  			zeroOp(s, fracMovOps[i], ptr, off)
   820  			off += tsz
   821  			n -= tsz
   822  		}
   823  
   824  	case ssa.OpRISCV64LoweredMove:
   825  		dst := v.Args[0].Reg()
   826  		src := v.Args[1].Reg()
   827  		if dst == src {
   828  			break
   829  		}
   830  
   831  		sa := v.AuxValAndOff()
   832  		n := sa.Val64()
   833  		mov, sz := largestMove(sa.Off64())
   834  
   835  		var off int64
   836  		tmp := int16(riscv.REG_X5)
   837  		for n >= sz {
   838  			moveOp(s, mov, dst, src, tmp, off)
   839  			off += sz
   840  			n -= sz
   841  		}
   842  
   843  		for i := len(fracMovOps) - 1; i >= 0; i-- {
   844  			tsz := int64(1 << i)
   845  			if n < tsz {
   846  				continue
   847  			}
   848  			moveOp(s, fracMovOps[i], dst, src, tmp, off)
   849  			off += tsz
   850  			n -= tsz
   851  		}
   852  
   853  	case ssa.OpRISCV64LoweredMoveLoop:
   854  		dst := v.Args[0].Reg()
   855  		src := v.Args[1].Reg()
   856  		if dst == src {
   857  			break
   858  		}
   859  
   860  		sc := v.AuxValAndOff()
   861  		n := sc.Val64()
   862  		mov, sz := largestMove(sc.Off64())
   863  		chunk := 8 * sz
   864  
   865  		if n <= 3*chunk {
   866  			v.Fatalf("MoveLoop too small:%d, expect:%d", n, 3*chunk)
   867  		}
   868  		tmp := int16(riscv.REG_X5)
   869  
   870  		p := s.Prog(riscv.AADD)
   871  		p.From.Type = obj.TYPE_CONST
   872  		p.From.Offset = n - n%chunk
   873  		p.Reg = src
   874  		p.To.Type = obj.TYPE_REG
   875  		p.To.Reg = riscv.REG_X6
   876  
   877  		for i := int64(0); i < 8; i++ {
   878  			moveOp(s, mov, dst, src, tmp, sz*i)
   879  		}
   880  
   881  		p1 := s.Prog(riscv.AADD)
   882  		p1.From.Type = obj.TYPE_CONST
   883  		p1.From.Offset = chunk
   884  		p1.To.Type = obj.TYPE_REG
   885  		p1.To.Reg = src
   886  
   887  		p2 := s.Prog(riscv.AADD)
   888  		p2.From.Type = obj.TYPE_CONST
   889  		p2.From.Offset = chunk
   890  		p2.To.Type = obj.TYPE_REG
   891  		p2.To.Reg = dst
   892  
   893  		p3 := s.Prog(riscv.ABNE)
   894  		p3.From.Reg = riscv.REG_X6
   895  		p3.From.Type = obj.TYPE_REG
   896  		p3.Reg = src
   897  		p3.To.Type = obj.TYPE_BRANCH
   898  		p3.To.SetTarget(p.Link)
   899  
   900  		n %= chunk
   901  
   902  		var off int64
   903  		for n >= sz {
   904  			moveOp(s, mov, dst, src, tmp, off)
   905  			off += sz
   906  			n -= sz
   907  		}
   908  
   909  		for i := len(fracMovOps) - 1; i >= 0; i-- {
   910  			tsz := int64(1 << i)
   911  			if n < tsz {
   912  				continue
   913  			}
   914  			moveOp(s, fracMovOps[i], dst, src, tmp, off)
   915  			off += tsz
   916  			n -= tsz
   917  		}
   918  
   919  	case ssa.OpRISCV64LoweredNilCheck:
   920  		// Issue a load which will fault if arg is nil.
   921  		p := s.Prog(riscv.AMOVB)
   922  		p.From.Type = obj.TYPE_MEM
   923  		p.From.Reg = v.Args[0].Reg()
   924  		ssagen.AddAux(&p.From, v)
   925  		p.To.Type = obj.TYPE_REG
   926  		p.To.Reg = riscv.REG_ZERO
   927  		if logopt.Enabled() {
   928  			logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
   929  		}
   930  		if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos == 1 in generated wrappers
   931  			base.WarnfAt(v.Pos, "generated nil check")
   932  		}
   933  
   934  	case ssa.OpRISCV64LoweredGetClosurePtr:
   935  		// Closure pointer is S10 (riscv.REG_CTXT).
   936  		ssagen.CheckLoweredGetClosurePtr(v)
   937  
   938  	case ssa.OpRISCV64LoweredGetCallerSP:
   939  		// caller's SP is FixedFrameSize below the address of the first arg
   940  		p := s.Prog(riscv.AMOV)
   941  		p.From.Type = obj.TYPE_ADDR
   942  		p.From.Offset = -base.Ctxt.Arch.FixedFrameSize
   943  		p.From.Name = obj.NAME_PARAM
   944  		p.To.Type = obj.TYPE_REG
   945  		p.To.Reg = v.Reg()
   946  
   947  	case ssa.OpRISCV64LoweredGetCallerPC:
   948  		p := s.Prog(obj.AGETCALLERPC)
   949  		p.To.Type = obj.TYPE_REG
   950  		p.To.Reg = v.Reg()
   951  
   952  	case ssa.OpRISCV64LoweredPubBarrier:
   953  		// FENCE
   954  		s.Prog(v.Op.Asm())
   955  
   956  	case ssa.OpRISCV64LoweredRound32F, ssa.OpRISCV64LoweredRound64F:
   957  		// input is already rounded
   958  
   959  	case ssa.OpClobber, ssa.OpClobberReg:
   960  		// TODO: implement for clobberdead experiment. Nop is ok for now.
   961  
   962  	default:
   963  		v.Fatalf("Unhandled op %v", v.Op)
   964  	}
   965  }
   966  
   967  var blockBranch = [...]obj.As{
   968  	ssa.BlockRISCV64BEQ:  riscv.ABEQ,
   969  	ssa.BlockRISCV64BEQZ: riscv.ABEQZ,
   970  	ssa.BlockRISCV64BGE:  riscv.ABGE,
   971  	ssa.BlockRISCV64BGEU: riscv.ABGEU,
   972  	ssa.BlockRISCV64BGEZ: riscv.ABGEZ,
   973  	ssa.BlockRISCV64BGTZ: riscv.ABGTZ,
   974  	ssa.BlockRISCV64BLEZ: riscv.ABLEZ,
   975  	ssa.BlockRISCV64BLT:  riscv.ABLT,
   976  	ssa.BlockRISCV64BLTU: riscv.ABLTU,
   977  	ssa.BlockRISCV64BLTZ: riscv.ABLTZ,
   978  	ssa.BlockRISCV64BNE:  riscv.ABNE,
   979  	ssa.BlockRISCV64BNEZ: riscv.ABNEZ,
   980  }
   981  
   982  func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
   983  	s.SetPos(b.Pos)
   984  
   985  	switch b.Kind {
   986  	case ssa.BlockPlain, ssa.BlockDefer:
   987  		if b.Succs[0].Block() != next {
   988  			p := s.Prog(obj.AJMP)
   989  			p.To.Type = obj.TYPE_BRANCH
   990  			s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
   991  		}
   992  	case ssa.BlockExit, ssa.BlockRetJmp:
   993  	case ssa.BlockRet:
   994  		s.Prog(obj.ARET)
   995  	case ssa.BlockRISCV64BEQ, ssa.BlockRISCV64BEQZ, ssa.BlockRISCV64BNE, ssa.BlockRISCV64BNEZ,
   996  		ssa.BlockRISCV64BLT, ssa.BlockRISCV64BLEZ, ssa.BlockRISCV64BGE, ssa.BlockRISCV64BGEZ,
   997  		ssa.BlockRISCV64BLTZ, ssa.BlockRISCV64BGTZ, ssa.BlockRISCV64BLTU, ssa.BlockRISCV64BGEU:
   998  
   999  		as := blockBranch[b.Kind]
  1000  		invAs := riscv.InvertBranch(as)
  1001  
  1002  		var p *obj.Prog
  1003  		switch next {
  1004  		case b.Succs[0].Block():
  1005  			p = s.Br(invAs, b.Succs[1].Block())
  1006  		case b.Succs[1].Block():
  1007  			p = s.Br(as, b.Succs[0].Block())
  1008  		default:
  1009  			if b.Likely != ssa.BranchUnlikely {
  1010  				p = s.Br(as, b.Succs[0].Block())
  1011  				s.Br(obj.AJMP, b.Succs[1].Block())
  1012  			} else {
  1013  				p = s.Br(invAs, b.Succs[1].Block())
  1014  				s.Br(obj.AJMP, b.Succs[0].Block())
  1015  			}
  1016  		}
  1017  
  1018  		p.From.Type = obj.TYPE_REG
  1019  		switch b.Kind {
  1020  		case ssa.BlockRISCV64BEQ, ssa.BlockRISCV64BNE, ssa.BlockRISCV64BLT, ssa.BlockRISCV64BGE, ssa.BlockRISCV64BLTU, ssa.BlockRISCV64BGEU:
  1021  			if b.NumControls() != 2 {
  1022  				b.Fatalf("Unexpected number of controls (%d != 2): %s", b.NumControls(), b.LongString())
  1023  			}
  1024  			p.From.Reg = b.Controls[0].Reg()
  1025  			p.Reg = b.Controls[1].Reg()
  1026  
  1027  		case ssa.BlockRISCV64BEQZ, ssa.BlockRISCV64BNEZ, ssa.BlockRISCV64BGEZ, ssa.BlockRISCV64BLEZ, ssa.BlockRISCV64BLTZ, ssa.BlockRISCV64BGTZ:
  1028  			if b.NumControls() != 1 {
  1029  				b.Fatalf("Unexpected number of controls (%d != 1): %s", b.NumControls(), b.LongString())
  1030  			}
  1031  			p.From.Reg = b.Controls[0].Reg()
  1032  		}
  1033  
  1034  	default:
  1035  		b.Fatalf("Unhandled block: %s", b.LongString())
  1036  	}
  1037  }
  1038  
  1039  func loadRegResult(s *ssagen.State, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog {
  1040  	p := s.Prog(loadByType(t))
  1041  	p.From.Type = obj.TYPE_MEM
  1042  	p.From.Name = obj.NAME_AUTO
  1043  	p.From.Sym = n.Linksym()
  1044  	p.From.Offset = n.FrameOffset() + off
  1045  	p.To.Type = obj.TYPE_REG
  1046  	p.To.Reg = reg
  1047  	return p
  1048  }
  1049  
  1050  func spillArgReg(pp *objw.Progs, p *obj.Prog, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog {
  1051  	p = pp.Append(p, storeByType(t), obj.TYPE_REG, reg, 0, obj.TYPE_MEM, 0, n.FrameOffset()+off)
  1052  	p.To.Name = obj.NAME_PARAM
  1053  	p.To.Sym = n.Linksym()
  1054  	p.Pos = p.Pos.WithNotStmt()
  1055  	return p
  1056  }
  1057  
  1058  func zeroOp(s *ssagen.State, mov obj.As, reg int16, off int64) {
  1059  	p := s.Prog(mov)
  1060  	p.From.Type = obj.TYPE_REG
  1061  	p.From.Reg = riscv.REG_ZERO
  1062  	p.To.Type = obj.TYPE_MEM
  1063  	p.To.Reg = reg
  1064  	p.To.Offset = off
  1065  	return
  1066  }
  1067  
  1068  func moveOp(s *ssagen.State, mov obj.As, dst int16, src int16, tmp int16, off int64) {
  1069  	p := s.Prog(mov)
  1070  	p.From.Type = obj.TYPE_MEM
  1071  	p.From.Reg = src
  1072  	p.From.Offset = off
  1073  	p.To.Type = obj.TYPE_REG
  1074  	p.To.Reg = tmp
  1075  
  1076  	p1 := s.Prog(mov)
  1077  	p1.From.Type = obj.TYPE_REG
  1078  	p1.From.Reg = tmp
  1079  	p1.To.Type = obj.TYPE_MEM
  1080  	p1.To.Reg = dst
  1081  	p1.To.Offset = off
  1082  
  1083  	return
  1084  }
  1085  

View as plain text