Source file src/cmd/compile/internal/loong64/ssa.go

     1  // Copyright 2022 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package loong64
     6  
     7  import (
     8  	"math"
     9  
    10  	"cmd/compile/internal/base"
    11  	"cmd/compile/internal/ir"
    12  	"cmd/compile/internal/logopt"
    13  	"cmd/compile/internal/objw"
    14  	"cmd/compile/internal/ssa"
    15  	"cmd/compile/internal/ssagen"
    16  	"cmd/compile/internal/types"
    17  	"cmd/internal/obj"
    18  	"cmd/internal/obj/loong64"
    19  )
    20  
    21  // isFPreg reports whether r is an FP register.
    22  func isFPreg(r int16) bool {
    23  	return loong64.REG_F0 <= r && r <= loong64.REG_F31
    24  }
    25  
    26  // loadByType returns the load instruction of the given type.
    27  func loadByType(t *types.Type, r int16) obj.As {
    28  	if isFPreg(r) {
    29  		if t.Size() == 4 {
    30  			return loong64.AMOVF
    31  		} else {
    32  			return loong64.AMOVD
    33  		}
    34  	} else {
    35  		switch t.Size() {
    36  		case 1:
    37  			if t.IsSigned() {
    38  				return loong64.AMOVB
    39  			} else {
    40  				return loong64.AMOVBU
    41  			}
    42  		case 2:
    43  			if t.IsSigned() {
    44  				return loong64.AMOVH
    45  			} else {
    46  				return loong64.AMOVHU
    47  			}
    48  		case 4:
    49  			if t.IsSigned() {
    50  				return loong64.AMOVW
    51  			} else {
    52  				return loong64.AMOVWU
    53  			}
    54  		case 8:
    55  			return loong64.AMOVV
    56  		}
    57  	}
    58  	panic("bad load type")
    59  }
    60  
    61  // storeByType returns the store instruction of the given type.
    62  func storeByType(t *types.Type, r int16) obj.As {
    63  	if isFPreg(r) {
    64  		if t.Size() == 4 {
    65  			return loong64.AMOVF
    66  		} else {
    67  			return loong64.AMOVD
    68  		}
    69  	} else {
    70  		switch t.Size() {
    71  		case 1:
    72  			return loong64.AMOVB
    73  		case 2:
    74  			return loong64.AMOVH
    75  		case 4:
    76  			return loong64.AMOVW
    77  		case 8:
    78  			return loong64.AMOVV
    79  		}
    80  	}
    81  	panic("bad store type")
    82  }
    83  
    84  // largestMove returns the largest move instruction possible and its size,
    85  // given the alignment of the total size of the move.
    86  //
    87  // e.g., a 16-byte move may use MOVV, but an 11-byte move must use MOVB.
    88  //
    89  // Note that the moves may not be on naturally aligned addresses depending on
    90  // the source and destination.
    91  //
    92  // This matches the calculation in ssa.moveSize.
    93  func largestMove(alignment int64) (obj.As, int64) {
    94  	switch {
    95  	case alignment%8 == 0:
    96  		return loong64.AMOVV, 8
    97  	case alignment%4 == 0:
    98  		return loong64.AMOVW, 4
    99  	case alignment%2 == 0:
   100  		return loong64.AMOVH, 2
   101  	default:
   102  		return loong64.AMOVB, 1
   103  	}
   104  }
   105  
   106  func ssaGenValue(s *ssagen.State, v *ssa.Value) {
   107  	switch v.Op {
   108  	case ssa.OpCopy, ssa.OpLOONG64MOVVreg:
   109  		if v.Type.IsMemory() {
   110  			return
   111  		}
   112  		x := v.Args[0].Reg()
   113  		y := v.Reg()
   114  		if x == y {
   115  			return
   116  		}
   117  		as := loong64.AMOVV
   118  		if isFPreg(x) && isFPreg(y) {
   119  			as = loong64.AMOVD
   120  		}
   121  		p := s.Prog(as)
   122  		p.From.Type = obj.TYPE_REG
   123  		p.From.Reg = x
   124  		p.To.Type = obj.TYPE_REG
   125  		p.To.Reg = y
   126  	case ssa.OpLOONG64MOVVnop,
   127  		ssa.OpLOONG64LoweredRound32F,
   128  		ssa.OpLOONG64LoweredRound64F:
   129  		// nothing to do
   130  	case ssa.OpLoadReg:
   131  		if v.Type.IsFlags() {
   132  			v.Fatalf("load flags not implemented: %v", v.LongString())
   133  			return
   134  		}
   135  		r := v.Reg()
   136  		p := s.Prog(loadByType(v.Type, r))
   137  		ssagen.AddrAuto(&p.From, v.Args[0])
   138  		p.To.Type = obj.TYPE_REG
   139  		p.To.Reg = r
   140  	case ssa.OpStoreReg:
   141  		if v.Type.IsFlags() {
   142  			v.Fatalf("store flags not implemented: %v", v.LongString())
   143  			return
   144  		}
   145  		r := v.Args[0].Reg()
   146  		p := s.Prog(storeByType(v.Type, r))
   147  		p.From.Type = obj.TYPE_REG
   148  		p.From.Reg = r
   149  		ssagen.AddrAuto(&p.To, v)
   150  	case ssa.OpArgIntReg, ssa.OpArgFloatReg:
   151  		// The assembler needs to wrap the entry safepoint/stack growth code with spill/unspill
   152  		// The loop only runs once.
   153  		for _, a := range v.Block.Func.RegArgs {
   154  			// Pass the spill/unspill information along to the assembler, offset by size of
   155  			// the saved LR slot.
   156  			addr := ssagen.SpillSlotAddr(a, loong64.REGSP, base.Ctxt.Arch.FixedFrameSize)
   157  			s.FuncInfo().AddSpill(
   158  				obj.RegSpill{Reg: a.Reg, Addr: addr, Unspill: loadByType(a.Type, a.Reg), Spill: storeByType(a.Type, a.Reg)})
   159  		}
   160  		v.Block.Func.RegArgs = nil
   161  		ssagen.CheckArgReg(v)
   162  	case ssa.OpLOONG64ADDV,
   163  		ssa.OpLOONG64SUBV,
   164  		ssa.OpLOONG64AND,
   165  		ssa.OpLOONG64OR,
   166  		ssa.OpLOONG64XOR,
   167  		ssa.OpLOONG64NOR,
   168  		ssa.OpLOONG64ANDN,
   169  		ssa.OpLOONG64ORN,
   170  		ssa.OpLOONG64SLL,
   171  		ssa.OpLOONG64SLLV,
   172  		ssa.OpLOONG64SRL,
   173  		ssa.OpLOONG64SRLV,
   174  		ssa.OpLOONG64SRA,
   175  		ssa.OpLOONG64SRAV,
   176  		ssa.OpLOONG64ROTR,
   177  		ssa.OpLOONG64ROTRV,
   178  		ssa.OpLOONG64ADDF,
   179  		ssa.OpLOONG64ADDD,
   180  		ssa.OpLOONG64SUBF,
   181  		ssa.OpLOONG64SUBD,
   182  		ssa.OpLOONG64MULF,
   183  		ssa.OpLOONG64MULD,
   184  		ssa.OpLOONG64DIVF,
   185  		ssa.OpLOONG64DIVD,
   186  		ssa.OpLOONG64MULV, ssa.OpLOONG64MULHV, ssa.OpLOONG64MULHVU,
   187  		ssa.OpLOONG64DIVV, ssa.OpLOONG64REMV, ssa.OpLOONG64DIVVU, ssa.OpLOONG64REMVU,
   188  		ssa.OpLOONG64FCOPYSGD:
   189  		p := s.Prog(v.Op.Asm())
   190  		p.From.Type = obj.TYPE_REG
   191  		p.From.Reg = v.Args[1].Reg()
   192  		p.Reg = v.Args[0].Reg()
   193  		p.To.Type = obj.TYPE_REG
   194  		p.To.Reg = v.Reg()
   195  
   196  	case ssa.OpLOONG64BSTRPICKV,
   197  		ssa.OpLOONG64BSTRPICKW:
   198  		p := s.Prog(v.Op.Asm())
   199  		p.From.Type = obj.TYPE_CONST
   200  		if v.Op == ssa.OpLOONG64BSTRPICKW {
   201  			p.From.Offset = v.AuxInt >> 5
   202  			p.AddRestSourceConst(v.AuxInt & 0x1f)
   203  		} else {
   204  			p.From.Offset = v.AuxInt >> 6
   205  			p.AddRestSourceConst(v.AuxInt & 0x3f)
   206  		}
   207  		p.Reg = v.Args[0].Reg()
   208  		p.To.Type = obj.TYPE_REG
   209  		p.To.Reg = v.Reg()
   210  
   211  	case ssa.OpLOONG64FMINF,
   212  		ssa.OpLOONG64FMIND,
   213  		ssa.OpLOONG64FMAXF,
   214  		ssa.OpLOONG64FMAXD:
   215  		// ADDD Rarg0, Rarg1, Rout
   216  		// CMPEQD Rarg0, Rarg0, FCC0
   217  		// bceqz FCC0, end
   218  		// CMPEQD Rarg1, Rarg1, FCC0
   219  		// bceqz FCC0, end
   220  		// F(MIN|MAX)(F|D)
   221  
   222  		r0 := v.Args[0].Reg()
   223  		r1 := v.Args[1].Reg()
   224  		out := v.Reg()
   225  		add, fcmp := loong64.AADDD, loong64.ACMPEQD
   226  		if v.Op == ssa.OpLOONG64FMINF || v.Op == ssa.OpLOONG64FMAXF {
   227  			add = loong64.AADDF
   228  			fcmp = loong64.ACMPEQF
   229  		}
   230  		p1 := s.Prog(add)
   231  		p1.From.Type = obj.TYPE_REG
   232  		p1.From.Reg = r0
   233  		p1.Reg = r1
   234  		p1.To.Type = obj.TYPE_REG
   235  		p1.To.Reg = out
   236  
   237  		p2 := s.Prog(fcmp)
   238  		p2.From.Type = obj.TYPE_REG
   239  		p2.From.Reg = r0
   240  		p2.Reg = r0
   241  		p2.To.Type = obj.TYPE_REG
   242  		p2.To.Reg = loong64.REG_FCC0
   243  
   244  		p3 := s.Prog(loong64.ABFPF)
   245  		p3.To.Type = obj.TYPE_BRANCH
   246  
   247  		p4 := s.Prog(fcmp)
   248  		p4.From.Type = obj.TYPE_REG
   249  		p4.From.Reg = r1
   250  		p4.Reg = r1
   251  		p4.To.Type = obj.TYPE_REG
   252  		p4.To.Reg = loong64.REG_FCC0
   253  
   254  		p5 := s.Prog(loong64.ABFPF)
   255  		p5.To.Type = obj.TYPE_BRANCH
   256  
   257  		p6 := s.Prog(v.Op.Asm())
   258  		p6.From.Type = obj.TYPE_REG
   259  		p6.From.Reg = r1
   260  		p6.Reg = r0
   261  		p6.To.Type = obj.TYPE_REG
   262  		p6.To.Reg = out
   263  
   264  		nop := s.Prog(obj.ANOP)
   265  		p3.To.SetTarget(nop)
   266  		p5.To.SetTarget(nop)
   267  
   268  	case ssa.OpLOONG64SGT,
   269  		ssa.OpLOONG64SGTU:
   270  		p := s.Prog(v.Op.Asm())
   271  		p.From.Type = obj.TYPE_REG
   272  		p.From.Reg = v.Args[0].Reg()
   273  		p.Reg = v.Args[1].Reg()
   274  		p.To.Type = obj.TYPE_REG
   275  		p.To.Reg = v.Reg()
   276  	case ssa.OpLOONG64ADDVconst,
   277  		ssa.OpLOONG64SUBVconst,
   278  		ssa.OpLOONG64ANDconst,
   279  		ssa.OpLOONG64ORconst,
   280  		ssa.OpLOONG64XORconst,
   281  		ssa.OpLOONG64SLLconst,
   282  		ssa.OpLOONG64SLLVconst,
   283  		ssa.OpLOONG64SRLconst,
   284  		ssa.OpLOONG64SRLVconst,
   285  		ssa.OpLOONG64SRAconst,
   286  		ssa.OpLOONG64SRAVconst,
   287  		ssa.OpLOONG64ROTRconst,
   288  		ssa.OpLOONG64ROTRVconst,
   289  		ssa.OpLOONG64SGTconst,
   290  		ssa.OpLOONG64SGTUconst:
   291  		p := s.Prog(v.Op.Asm())
   292  		p.From.Type = obj.TYPE_CONST
   293  		p.From.Offset = v.AuxInt
   294  		p.Reg = v.Args[0].Reg()
   295  		p.To.Type = obj.TYPE_REG
   296  		p.To.Reg = v.Reg()
   297  
   298  	case ssa.OpLOONG64NORconst:
   299  		// MOVV $const, Rtmp
   300  		// NOR  Rtmp, Rarg0, Rout
   301  		p := s.Prog(loong64.AMOVV)
   302  		p.From.Type = obj.TYPE_CONST
   303  		p.From.Offset = v.AuxInt
   304  		p.To.Type = obj.TYPE_REG
   305  		p.To.Reg = loong64.REGTMP
   306  
   307  		p2 := s.Prog(v.Op.Asm())
   308  		p2.From.Type = obj.TYPE_REG
   309  		p2.From.Reg = loong64.REGTMP
   310  		p2.Reg = v.Args[0].Reg()
   311  		p2.To.Type = obj.TYPE_REG
   312  		p2.To.Reg = v.Reg()
   313  
   314  	case ssa.OpLOONG64MOVVconst:
   315  		r := v.Reg()
   316  		p := s.Prog(v.Op.Asm())
   317  		p.From.Type = obj.TYPE_CONST
   318  		p.From.Offset = v.AuxInt
   319  		p.To.Type = obj.TYPE_REG
   320  		p.To.Reg = r
   321  		if isFPreg(r) {
   322  			// cannot move into FP or special registers, use TMP as intermediate
   323  			p.To.Reg = loong64.REGTMP
   324  			p = s.Prog(loong64.AMOVV)
   325  			p.From.Type = obj.TYPE_REG
   326  			p.From.Reg = loong64.REGTMP
   327  			p.To.Type = obj.TYPE_REG
   328  			p.To.Reg = r
   329  		}
   330  	case ssa.OpLOONG64MOVFconst,
   331  		ssa.OpLOONG64MOVDconst:
   332  		p := s.Prog(v.Op.Asm())
   333  		p.From.Type = obj.TYPE_FCONST
   334  		p.From.Val = math.Float64frombits(uint64(v.AuxInt))
   335  		p.To.Type = obj.TYPE_REG
   336  		p.To.Reg = v.Reg()
   337  	case ssa.OpLOONG64CMPEQF,
   338  		ssa.OpLOONG64CMPEQD,
   339  		ssa.OpLOONG64CMPGEF,
   340  		ssa.OpLOONG64CMPGED,
   341  		ssa.OpLOONG64CMPGTF,
   342  		ssa.OpLOONG64CMPGTD:
   343  		p := s.Prog(v.Op.Asm())
   344  		p.From.Type = obj.TYPE_REG
   345  		p.From.Reg = v.Args[0].Reg()
   346  		p.Reg = v.Args[1].Reg()
   347  		p.To.Type = obj.TYPE_REG
   348  		p.To.Reg = loong64.REG_FCC0
   349  
   350  	case ssa.OpLOONG64FMADDF,
   351  		ssa.OpLOONG64FMADDD,
   352  		ssa.OpLOONG64FMSUBF,
   353  		ssa.OpLOONG64FMSUBD,
   354  		ssa.OpLOONG64FNMADDF,
   355  		ssa.OpLOONG64FNMADDD,
   356  		ssa.OpLOONG64FNMSUBF,
   357  		ssa.OpLOONG64FNMSUBD:
   358  		p := s.Prog(v.Op.Asm())
   359  		// r=(FMA x y z) -> FMADDD z, y, x, r
   360  		// the SSA operand order is for taking advantage of
   361  		// commutativity (that only applies for the first two operands)
   362  		r := v.Reg()
   363  		x := v.Args[0].Reg()
   364  		y := v.Args[1].Reg()
   365  		z := v.Args[2].Reg()
   366  		p.From.Type = obj.TYPE_REG
   367  		p.From.Reg = z
   368  		p.Reg = y
   369  		p.AddRestSourceReg(x)
   370  		p.To.Type = obj.TYPE_REG
   371  		p.To.Reg = r
   372  
   373  	case ssa.OpLOONG64MOVVaddr:
   374  		p := s.Prog(loong64.AMOVV)
   375  		p.From.Type = obj.TYPE_ADDR
   376  		p.From.Reg = v.Args[0].Reg()
   377  		var wantreg string
   378  		// MOVV $sym+off(base), R
   379  		// the assembler expands it as the following:
   380  		// - base is SP: add constant offset to SP (R3)
   381  		// when constant is large, tmp register (R30) may be used
   382  		// - base is SB: load external address with relocation
   383  		switch v.Aux.(type) {
   384  		default:
   385  			v.Fatalf("aux is of unknown type %T", v.Aux)
   386  		case *obj.LSym:
   387  			wantreg = "SB"
   388  			ssagen.AddAux(&p.From, v)
   389  		case *ir.Name:
   390  			wantreg = "SP"
   391  			ssagen.AddAux(&p.From, v)
   392  		case nil:
   393  			// No sym, just MOVV $off(SP), R
   394  			wantreg = "SP"
   395  			p.From.Offset = v.AuxInt
   396  		}
   397  		if reg := v.Args[0].RegName(); reg != wantreg {
   398  			v.Fatalf("bad reg %s for symbol type %T, want %s", reg, v.Aux, wantreg)
   399  		}
   400  		p.To.Type = obj.TYPE_REG
   401  		p.To.Reg = v.Reg()
   402  
   403  	case ssa.OpLOONG64MOVBloadidx,
   404  		ssa.OpLOONG64MOVBUloadidx,
   405  		ssa.OpLOONG64MOVHloadidx,
   406  		ssa.OpLOONG64MOVHUloadidx,
   407  		ssa.OpLOONG64MOVWloadidx,
   408  		ssa.OpLOONG64MOVWUloadidx,
   409  		ssa.OpLOONG64MOVVloadidx,
   410  		ssa.OpLOONG64MOVFloadidx,
   411  		ssa.OpLOONG64MOVDloadidx:
   412  		p := s.Prog(v.Op.Asm())
   413  		p.From.Type = obj.TYPE_MEM
   414  		p.From.Name = obj.NAME_NONE
   415  		p.From.Reg = v.Args[0].Reg()
   416  		p.From.Index = v.Args[1].Reg()
   417  		p.To.Type = obj.TYPE_REG
   418  		p.To.Reg = v.Reg()
   419  
   420  	case ssa.OpLOONG64MOVBstoreidx,
   421  		ssa.OpLOONG64MOVHstoreidx,
   422  		ssa.OpLOONG64MOVWstoreidx,
   423  		ssa.OpLOONG64MOVVstoreidx,
   424  		ssa.OpLOONG64MOVFstoreidx,
   425  		ssa.OpLOONG64MOVDstoreidx:
   426  		p := s.Prog(v.Op.Asm())
   427  		p.From.Type = obj.TYPE_REG
   428  		p.From.Reg = v.Args[2].Reg()
   429  		p.To.Type = obj.TYPE_MEM
   430  		p.To.Name = obj.NAME_NONE
   431  		p.To.Reg = v.Args[0].Reg()
   432  		p.To.Index = v.Args[1].Reg()
   433  
   434  	case ssa.OpLOONG64MOVBstorezeroidx,
   435  		ssa.OpLOONG64MOVHstorezeroidx,
   436  		ssa.OpLOONG64MOVWstorezeroidx,
   437  		ssa.OpLOONG64MOVVstorezeroidx:
   438  		p := s.Prog(v.Op.Asm())
   439  		p.From.Type = obj.TYPE_REG
   440  		p.From.Reg = loong64.REGZERO
   441  		p.To.Type = obj.TYPE_MEM
   442  		p.To.Name = obj.NAME_NONE
   443  		p.To.Reg = v.Args[0].Reg()
   444  		p.To.Index = v.Args[1].Reg()
   445  
   446  	case ssa.OpLOONG64MOVBload,
   447  		ssa.OpLOONG64MOVBUload,
   448  		ssa.OpLOONG64MOVHload,
   449  		ssa.OpLOONG64MOVHUload,
   450  		ssa.OpLOONG64MOVWload,
   451  		ssa.OpLOONG64MOVWUload,
   452  		ssa.OpLOONG64MOVVload,
   453  		ssa.OpLOONG64MOVFload,
   454  		ssa.OpLOONG64MOVDload:
   455  		p := s.Prog(v.Op.Asm())
   456  		p.From.Type = obj.TYPE_MEM
   457  		p.From.Reg = v.Args[0].Reg()
   458  		ssagen.AddAux(&p.From, v)
   459  		p.To.Type = obj.TYPE_REG
   460  		p.To.Reg = v.Reg()
   461  	case ssa.OpLOONG64MOVBstore,
   462  		ssa.OpLOONG64MOVHstore,
   463  		ssa.OpLOONG64MOVWstore,
   464  		ssa.OpLOONG64MOVVstore,
   465  		ssa.OpLOONG64MOVFstore,
   466  		ssa.OpLOONG64MOVDstore:
   467  		p := s.Prog(v.Op.Asm())
   468  		p.From.Type = obj.TYPE_REG
   469  		p.From.Reg = v.Args[1].Reg()
   470  		p.To.Type = obj.TYPE_MEM
   471  		p.To.Reg = v.Args[0].Reg()
   472  		ssagen.AddAux(&p.To, v)
   473  	case ssa.OpLOONG64MOVBstorezero,
   474  		ssa.OpLOONG64MOVHstorezero,
   475  		ssa.OpLOONG64MOVWstorezero,
   476  		ssa.OpLOONG64MOVVstorezero:
   477  		p := s.Prog(v.Op.Asm())
   478  		p.From.Type = obj.TYPE_REG
   479  		p.From.Reg = loong64.REGZERO
   480  		p.To.Type = obj.TYPE_MEM
   481  		p.To.Reg = v.Args[0].Reg()
   482  		ssagen.AddAux(&p.To, v)
   483  	case ssa.OpLOONG64MOVBreg,
   484  		ssa.OpLOONG64MOVBUreg,
   485  		ssa.OpLOONG64MOVHreg,
   486  		ssa.OpLOONG64MOVHUreg,
   487  		ssa.OpLOONG64MOVWreg,
   488  		ssa.OpLOONG64MOVWUreg:
   489  		a := v.Args[0]
   490  		for a.Op == ssa.OpCopy || a.Op == ssa.OpLOONG64MOVVreg {
   491  			a = a.Args[0]
   492  		}
   493  		if a.Op == ssa.OpLoadReg && loong64.REG_R0 <= a.Reg() && a.Reg() <= loong64.REG_R31 {
   494  			// LoadReg from a narrower type does an extension, except loading
   495  			// to a floating point register. So only eliminate the extension
   496  			// if it is loaded to an integer register.
   497  
   498  			t := a.Type
   499  			switch {
   500  			case v.Op == ssa.OpLOONG64MOVBreg && t.Size() == 1 && t.IsSigned(),
   501  				v.Op == ssa.OpLOONG64MOVBUreg && t.Size() == 1 && !t.IsSigned(),
   502  				v.Op == ssa.OpLOONG64MOVHreg && t.Size() == 2 && t.IsSigned(),
   503  				v.Op == ssa.OpLOONG64MOVHUreg && t.Size() == 2 && !t.IsSigned(),
   504  				v.Op == ssa.OpLOONG64MOVWreg && t.Size() == 4 && t.IsSigned(),
   505  				v.Op == ssa.OpLOONG64MOVWUreg && t.Size() == 4 && !t.IsSigned():
   506  				// arg is a proper-typed load, already zero/sign-extended, don't extend again
   507  				if v.Reg() == v.Args[0].Reg() {
   508  					return
   509  				}
   510  				p := s.Prog(loong64.AMOVV)
   511  				p.From.Type = obj.TYPE_REG
   512  				p.From.Reg = v.Args[0].Reg()
   513  				p.To.Type = obj.TYPE_REG
   514  				p.To.Reg = v.Reg()
   515  				return
   516  			default:
   517  			}
   518  		}
   519  		fallthrough
   520  
   521  	case ssa.OpLOONG64MOVWF,
   522  		ssa.OpLOONG64MOVWD,
   523  		ssa.OpLOONG64TRUNCFW,
   524  		ssa.OpLOONG64TRUNCDW,
   525  		ssa.OpLOONG64MOVVF,
   526  		ssa.OpLOONG64MOVVD,
   527  		ssa.OpLOONG64TRUNCFV,
   528  		ssa.OpLOONG64TRUNCDV,
   529  		ssa.OpLOONG64MOVFD,
   530  		ssa.OpLOONG64MOVDF,
   531  		ssa.OpLOONG64MOVWfpgp,
   532  		ssa.OpLOONG64MOVWgpfp,
   533  		ssa.OpLOONG64MOVVfpgp,
   534  		ssa.OpLOONG64MOVVgpfp,
   535  		ssa.OpLOONG64NEGF,
   536  		ssa.OpLOONG64NEGD,
   537  		ssa.OpLOONG64CLZW,
   538  		ssa.OpLOONG64CLZV,
   539  		ssa.OpLOONG64CTZW,
   540  		ssa.OpLOONG64CTZV,
   541  		ssa.OpLOONG64SQRTD,
   542  		ssa.OpLOONG64SQRTF,
   543  		ssa.OpLOONG64REVB2H,
   544  		ssa.OpLOONG64REVB2W,
   545  		ssa.OpLOONG64REVBV,
   546  		ssa.OpLOONG64BITREV4B,
   547  		ssa.OpLOONG64BITREVW,
   548  		ssa.OpLOONG64BITREVV,
   549  		ssa.OpLOONG64ABSD:
   550  		p := s.Prog(v.Op.Asm())
   551  		p.From.Type = obj.TYPE_REG
   552  		p.From.Reg = v.Args[0].Reg()
   553  		p.To.Type = obj.TYPE_REG
   554  		p.To.Reg = v.Reg()
   555  
   556  	case ssa.OpLOONG64VPCNT64,
   557  		ssa.OpLOONG64VPCNT32,
   558  		ssa.OpLOONG64VPCNT16:
   559  		p := s.Prog(v.Op.Asm())
   560  		p.From.Type = obj.TYPE_REG
   561  		p.From.Reg = ((v.Args[0].Reg() - loong64.REG_F0) & 31) + loong64.REG_V0
   562  		p.To.Type = obj.TYPE_REG
   563  		p.To.Reg = ((v.Reg() - loong64.REG_F0) & 31) + loong64.REG_V0
   564  
   565  	case ssa.OpLOONG64NEGV:
   566  		// SUB from REGZERO
   567  		p := s.Prog(loong64.ASUBVU)
   568  		p.From.Type = obj.TYPE_REG
   569  		p.From.Reg = v.Args[0].Reg()
   570  		p.Reg = loong64.REGZERO
   571  		p.To.Type = obj.TYPE_REG
   572  		p.To.Reg = v.Reg()
   573  
   574  	case ssa.OpLOONG64DUFFZERO:
   575  		// runtime.duffzero expects start address in R20
   576  		p := s.Prog(obj.ADUFFZERO)
   577  		p.To.Type = obj.TYPE_MEM
   578  		p.To.Name = obj.NAME_EXTERN
   579  		p.To.Sym = ir.Syms.Duffzero
   580  		p.To.Offset = v.AuxInt
   581  	case ssa.OpLOONG64LoweredZero:
   582  		// MOVx	R0, (Rarg0)
   583  		// ADDV	$sz, Rarg0
   584  		// BGEU	Rarg1, Rarg0, -2(PC)
   585  		mov, sz := largestMove(v.AuxInt)
   586  		p := s.Prog(mov)
   587  		p.From.Type = obj.TYPE_REG
   588  		p.From.Reg = loong64.REGZERO
   589  		p.To.Type = obj.TYPE_MEM
   590  		p.To.Reg = v.Args[0].Reg()
   591  
   592  		p2 := s.Prog(loong64.AADDVU)
   593  		p2.From.Type = obj.TYPE_CONST
   594  		p2.From.Offset = sz
   595  		p2.To.Type = obj.TYPE_REG
   596  		p2.To.Reg = v.Args[0].Reg()
   597  
   598  		p3 := s.Prog(loong64.ABGEU)
   599  		p3.From.Type = obj.TYPE_REG
   600  		p3.From.Reg = v.Args[1].Reg()
   601  		p3.Reg = v.Args[0].Reg()
   602  		p3.To.Type = obj.TYPE_BRANCH
   603  		p3.To.SetTarget(p)
   604  
   605  	case ssa.OpLOONG64DUFFCOPY:
   606  		p := s.Prog(obj.ADUFFCOPY)
   607  		p.To.Type = obj.TYPE_MEM
   608  		p.To.Name = obj.NAME_EXTERN
   609  		p.To.Sym = ir.Syms.Duffcopy
   610  		p.To.Offset = v.AuxInt
   611  	case ssa.OpLOONG64LoweredMove:
   612  		// MOVx	(Rarg1), Rtmp
   613  		// MOVx	Rtmp, (Rarg0)
   614  		// ADDV	$sz, Rarg1
   615  		// ADDV	$sz, Rarg0
   616  		// BGEU	Rarg2, Rarg0, -4(PC)
   617  		mov, sz := largestMove(v.AuxInt)
   618  		p := s.Prog(mov)
   619  		p.From.Type = obj.TYPE_MEM
   620  		p.From.Reg = v.Args[1].Reg()
   621  		p.To.Type = obj.TYPE_REG
   622  		p.To.Reg = loong64.REGTMP
   623  
   624  		p2 := s.Prog(mov)
   625  		p2.From.Type = obj.TYPE_REG
   626  		p2.From.Reg = loong64.REGTMP
   627  		p2.To.Type = obj.TYPE_MEM
   628  		p2.To.Reg = v.Args[0].Reg()
   629  
   630  		p3 := s.Prog(loong64.AADDVU)
   631  		p3.From.Type = obj.TYPE_CONST
   632  		p3.From.Offset = sz
   633  		p3.To.Type = obj.TYPE_REG
   634  		p3.To.Reg = v.Args[1].Reg()
   635  
   636  		p4 := s.Prog(loong64.AADDVU)
   637  		p4.From.Type = obj.TYPE_CONST
   638  		p4.From.Offset = sz
   639  		p4.To.Type = obj.TYPE_REG
   640  		p4.To.Reg = v.Args[0].Reg()
   641  
   642  		p5 := s.Prog(loong64.ABGEU)
   643  		p5.From.Type = obj.TYPE_REG
   644  		p5.From.Reg = v.Args[2].Reg()
   645  		p5.Reg = v.Args[1].Reg()
   646  		p5.To.Type = obj.TYPE_BRANCH
   647  		p5.To.SetTarget(p)
   648  
   649  	case ssa.OpLOONG64CALLstatic, ssa.OpLOONG64CALLclosure, ssa.OpLOONG64CALLinter:
   650  		s.Call(v)
   651  	case ssa.OpLOONG64CALLtail:
   652  		s.TailCall(v)
   653  	case ssa.OpLOONG64LoweredWB:
   654  		p := s.Prog(obj.ACALL)
   655  		p.To.Type = obj.TYPE_MEM
   656  		p.To.Name = obj.NAME_EXTERN
   657  		// AuxInt encodes how many buffer entries we need.
   658  		p.To.Sym = ir.Syms.GCWriteBarrier[v.AuxInt-1]
   659  
   660  	case ssa.OpLOONG64LoweredPubBarrier:
   661  		// DBAR 0x1A
   662  		p := s.Prog(v.Op.Asm())
   663  		p.From.Type = obj.TYPE_CONST
   664  		p.From.Offset = 0x1A
   665  
   666  	case ssa.OpLOONG64LoweredPanicBoundsA, ssa.OpLOONG64LoweredPanicBoundsB, ssa.OpLOONG64LoweredPanicBoundsC:
   667  		p := s.Prog(obj.ACALL)
   668  		p.To.Type = obj.TYPE_MEM
   669  		p.To.Name = obj.NAME_EXTERN
   670  		p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
   671  		s.UseArgs(16) // space used in callee args area by assembly stubs
   672  	case ssa.OpLOONG64LoweredAtomicLoad8, ssa.OpLOONG64LoweredAtomicLoad32, ssa.OpLOONG64LoweredAtomicLoad64:
   673  		// MOVB	(Rarg0), Rout
   674  		// DBAR	0x14
   675  		as := loong64.AMOVV
   676  		switch v.Op {
   677  		case ssa.OpLOONG64LoweredAtomicLoad8:
   678  			as = loong64.AMOVB
   679  		case ssa.OpLOONG64LoweredAtomicLoad32:
   680  			as = loong64.AMOVW
   681  		}
   682  		p := s.Prog(as)
   683  		p.From.Type = obj.TYPE_MEM
   684  		p.From.Reg = v.Args[0].Reg()
   685  		p.To.Type = obj.TYPE_REG
   686  		p.To.Reg = v.Reg0()
   687  		p1 := s.Prog(loong64.ADBAR)
   688  		p1.From.Type = obj.TYPE_CONST
   689  		p1.From.Offset = 0x14
   690  
   691  	case ssa.OpLOONG64LoweredAtomicStore8,
   692  		ssa.OpLOONG64LoweredAtomicStore32,
   693  		ssa.OpLOONG64LoweredAtomicStore64:
   694  		// DBAR 0x12
   695  		// MOVx (Rarg1), Rout
   696  		// DBAR 0x18
   697  		movx := loong64.AMOVV
   698  		switch v.Op {
   699  		case ssa.OpLOONG64LoweredAtomicStore8:
   700  			movx = loong64.AMOVB
   701  		case ssa.OpLOONG64LoweredAtomicStore32:
   702  			movx = loong64.AMOVW
   703  		}
   704  		p := s.Prog(loong64.ADBAR)
   705  		p.From.Type = obj.TYPE_CONST
   706  		p.From.Offset = 0x12
   707  
   708  		p1 := s.Prog(movx)
   709  		p1.From.Type = obj.TYPE_REG
   710  		p1.From.Reg = v.Args[1].Reg()
   711  		p1.To.Type = obj.TYPE_MEM
   712  		p1.To.Reg = v.Args[0].Reg()
   713  
   714  		p2 := s.Prog(loong64.ADBAR)
   715  		p2.From.Type = obj.TYPE_CONST
   716  		p2.From.Offset = 0x18
   717  
   718  	case ssa.OpLOONG64LoweredAtomicStore8Variant,
   719  		ssa.OpLOONG64LoweredAtomicStore32Variant,
   720  		ssa.OpLOONG64LoweredAtomicStore64Variant:
   721  		//AMSWAPx  Rarg1, (Rarg0), Rout
   722  		amswapx := loong64.AAMSWAPDBV
   723  		switch v.Op {
   724  		case ssa.OpLOONG64LoweredAtomicStore32Variant:
   725  			amswapx = loong64.AAMSWAPDBW
   726  		case ssa.OpLOONG64LoweredAtomicStore8Variant:
   727  			amswapx = loong64.AAMSWAPDBB
   728  		}
   729  		p := s.Prog(amswapx)
   730  		p.From.Type = obj.TYPE_REG
   731  		p.From.Reg = v.Args[1].Reg()
   732  		p.To.Type = obj.TYPE_MEM
   733  		p.To.Reg = v.Args[0].Reg()
   734  		p.RegTo2 = loong64.REGZERO
   735  
   736  	case ssa.OpLOONG64LoweredAtomicExchange32, ssa.OpLOONG64LoweredAtomicExchange64:
   737  		// AMSWAPx	Rarg1, (Rarg0), Rout
   738  		amswapx := loong64.AAMSWAPDBV
   739  		if v.Op == ssa.OpLOONG64LoweredAtomicExchange32 {
   740  			amswapx = loong64.AAMSWAPDBW
   741  		}
   742  		p := s.Prog(amswapx)
   743  		p.From.Type = obj.TYPE_REG
   744  		p.From.Reg = v.Args[1].Reg()
   745  		p.To.Type = obj.TYPE_MEM
   746  		p.To.Reg = v.Args[0].Reg()
   747  		p.RegTo2 = v.Reg0()
   748  
   749  	case ssa.OpLOONG64LoweredAtomicExchange8Variant:
   750  		// AMSWAPDBB	Rarg1, (Rarg0), Rout
   751  		p := s.Prog(loong64.AAMSWAPDBB)
   752  		p.From.Type = obj.TYPE_REG
   753  		p.From.Reg = v.Args[1].Reg()
   754  		p.To.Type = obj.TYPE_MEM
   755  		p.To.Reg = v.Args[0].Reg()
   756  		p.RegTo2 = v.Reg0()
   757  
   758  	case ssa.OpLOONG64LoweredAtomicAdd32, ssa.OpLOONG64LoweredAtomicAdd64:
   759  		// AMADDx  Rarg1, (Rarg0), Rout
   760  		// ADDV    Rarg1, Rout, Rout
   761  		amaddx := loong64.AAMADDDBV
   762  		addx := loong64.AADDV
   763  		if v.Op == ssa.OpLOONG64LoweredAtomicAdd32 {
   764  			amaddx = loong64.AAMADDDBW
   765  		}
   766  		p := s.Prog(amaddx)
   767  		p.From.Type = obj.TYPE_REG
   768  		p.From.Reg = v.Args[1].Reg()
   769  		p.To.Type = obj.TYPE_MEM
   770  		p.To.Reg = v.Args[0].Reg()
   771  		p.RegTo2 = v.Reg0()
   772  
   773  		p1 := s.Prog(addx)
   774  		p1.From.Type = obj.TYPE_REG
   775  		p1.From.Reg = v.Args[1].Reg()
   776  		p1.Reg = v.Reg0()
   777  		p1.To.Type = obj.TYPE_REG
   778  		p1.To.Reg = v.Reg0()
   779  
   780  	case ssa.OpLOONG64LoweredAtomicCas32, ssa.OpLOONG64LoweredAtomicCas64:
   781  		// MOVV $0, Rout
   782  		// DBAR 0x14
   783  		// LL	(Rarg0), Rtmp
   784  		// BNE	Rtmp, Rarg1, 4(PC)
   785  		// MOVV Rarg2, Rout
   786  		// SC	Rout, (Rarg0)
   787  		// BEQ	Rout, -4(PC)
   788  		// DBAR 0x12
   789  		ll := loong64.ALLV
   790  		sc := loong64.ASCV
   791  		if v.Op == ssa.OpLOONG64LoweredAtomicCas32 {
   792  			ll = loong64.ALL
   793  			sc = loong64.ASC
   794  		}
   795  
   796  		p := s.Prog(loong64.AMOVV)
   797  		p.From.Type = obj.TYPE_REG
   798  		p.From.Reg = loong64.REGZERO
   799  		p.To.Type = obj.TYPE_REG
   800  		p.To.Reg = v.Reg0()
   801  
   802  		p1 := s.Prog(loong64.ADBAR)
   803  		p1.From.Type = obj.TYPE_CONST
   804  		p1.From.Offset = 0x14
   805  
   806  		p2 := s.Prog(ll)
   807  		p2.From.Type = obj.TYPE_MEM
   808  		p2.From.Reg = v.Args[0].Reg()
   809  		p2.To.Type = obj.TYPE_REG
   810  		p2.To.Reg = loong64.REGTMP
   811  
   812  		p3 := s.Prog(loong64.ABNE)
   813  		p3.From.Type = obj.TYPE_REG
   814  		p3.From.Reg = v.Args[1].Reg()
   815  		p3.Reg = loong64.REGTMP
   816  		p3.To.Type = obj.TYPE_BRANCH
   817  
   818  		p4 := s.Prog(loong64.AMOVV)
   819  		p4.From.Type = obj.TYPE_REG
   820  		p4.From.Reg = v.Args[2].Reg()
   821  		p4.To.Type = obj.TYPE_REG
   822  		p4.To.Reg = v.Reg0()
   823  
   824  		p5 := s.Prog(sc)
   825  		p5.From.Type = obj.TYPE_REG
   826  		p5.From.Reg = v.Reg0()
   827  		p5.To.Type = obj.TYPE_MEM
   828  		p5.To.Reg = v.Args[0].Reg()
   829  
   830  		p6 := s.Prog(loong64.ABEQ)
   831  		p6.From.Type = obj.TYPE_REG
   832  		p6.From.Reg = v.Reg0()
   833  		p6.To.Type = obj.TYPE_BRANCH
   834  		p6.To.SetTarget(p2)
   835  
   836  		p7 := s.Prog(loong64.ADBAR)
   837  		p7.From.Type = obj.TYPE_CONST
   838  		p7.From.Offset = 0x12
   839  		p3.To.SetTarget(p7)
   840  
   841  	case ssa.OpLOONG64LoweredAtomicAnd32,
   842  		ssa.OpLOONG64LoweredAtomicOr32:
   843  		// AM{AND,OR}DBx  Rarg1, (Rarg0), RegZero
   844  		p := s.Prog(v.Op.Asm())
   845  		p.From.Type = obj.TYPE_REG
   846  		p.From.Reg = v.Args[1].Reg()
   847  		p.To.Type = obj.TYPE_MEM
   848  		p.To.Reg = v.Args[0].Reg()
   849  		p.RegTo2 = loong64.REGZERO
   850  
   851  	case ssa.OpLOONG64LoweredAtomicAnd32value,
   852  		ssa.OpLOONG64LoweredAtomicAnd64value,
   853  		ssa.OpLOONG64LoweredAtomicOr64value,
   854  		ssa.OpLOONG64LoweredAtomicOr32value:
   855  		// AM{AND,OR}DBx  Rarg1, (Rarg0), Rout
   856  		p := s.Prog(v.Op.Asm())
   857  		p.From.Type = obj.TYPE_REG
   858  		p.From.Reg = v.Args[1].Reg()
   859  		p.To.Type = obj.TYPE_MEM
   860  		p.To.Reg = v.Args[0].Reg()
   861  		p.RegTo2 = v.Reg0()
   862  
   863  	case ssa.OpLOONG64LoweredAtomicCas64Variant, ssa.OpLOONG64LoweredAtomicCas32Variant:
   864  		// MOVV         $0, Rout
   865  		// MOVV         Rarg1, Rtmp
   866  		// AMCASDBx     Rarg2, (Rarg0), Rtmp
   867  		// BNE          Rarg1, Rtmp, 2(PC)
   868  		// MOVV         $1, Rout
   869  		// NOP
   870  
   871  		amcasx := loong64.AAMCASDBV
   872  		if v.Op == ssa.OpLOONG64LoweredAtomicCas32Variant {
   873  			amcasx = loong64.AAMCASDBW
   874  		}
   875  
   876  		p := s.Prog(loong64.AMOVV)
   877  		p.From.Type = obj.TYPE_REG
   878  		p.From.Reg = loong64.REGZERO
   879  		p.To.Type = obj.TYPE_REG
   880  		p.To.Reg = v.Reg0()
   881  
   882  		p1 := s.Prog(loong64.AMOVV)
   883  		p1.From.Type = obj.TYPE_REG
   884  		p1.From.Reg = v.Args[1].Reg()
   885  		p1.To.Type = obj.TYPE_REG
   886  		p1.To.Reg = loong64.REGTMP
   887  
   888  		p2 := s.Prog(amcasx)
   889  		p2.From.Type = obj.TYPE_REG
   890  		p2.From.Reg = v.Args[2].Reg()
   891  		p2.To.Type = obj.TYPE_MEM
   892  		p2.To.Reg = v.Args[0].Reg()
   893  		p2.RegTo2 = loong64.REGTMP
   894  
   895  		p3 := s.Prog(loong64.ABNE)
   896  		p3.From.Type = obj.TYPE_REG
   897  		p3.From.Reg = v.Args[1].Reg()
   898  		p3.Reg = loong64.REGTMP
   899  		p3.To.Type = obj.TYPE_BRANCH
   900  
   901  		p4 := s.Prog(loong64.AMOVV)
   902  		p4.From.Type = obj.TYPE_CONST
   903  		p4.From.Offset = 0x1
   904  		p4.To.Type = obj.TYPE_REG
   905  		p4.To.Reg = v.Reg0()
   906  
   907  		p5 := s.Prog(obj.ANOP)
   908  		p3.To.SetTarget(p5)
   909  
   910  	case ssa.OpLOONG64LoweredNilCheck:
   911  		// Issue a load which will fault if arg is nil.
   912  		p := s.Prog(loong64.AMOVB)
   913  		p.From.Type = obj.TYPE_MEM
   914  		p.From.Reg = v.Args[0].Reg()
   915  		ssagen.AddAux(&p.From, v)
   916  		p.To.Type = obj.TYPE_REG
   917  		p.To.Reg = loong64.REGTMP
   918  		if logopt.Enabled() {
   919  			logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
   920  		}
   921  		if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
   922  			base.WarnfAt(v.Pos, "generated nil check")
   923  		}
   924  	case ssa.OpLOONG64FPFlagTrue,
   925  		ssa.OpLOONG64FPFlagFalse:
   926  		// MOVV	$0, r
   927  		// BFPF	2(PC)
   928  		// MOVV	$1, r
   929  		branch := loong64.ABFPF
   930  		if v.Op == ssa.OpLOONG64FPFlagFalse {
   931  			branch = loong64.ABFPT
   932  		}
   933  		p := s.Prog(loong64.AMOVV)
   934  		p.From.Type = obj.TYPE_REG
   935  		p.From.Reg = loong64.REGZERO
   936  		p.To.Type = obj.TYPE_REG
   937  		p.To.Reg = v.Reg()
   938  		p2 := s.Prog(branch)
   939  		p2.To.Type = obj.TYPE_BRANCH
   940  		p3 := s.Prog(loong64.AMOVV)
   941  		p3.From.Type = obj.TYPE_CONST
   942  		p3.From.Offset = 1
   943  		p3.To.Type = obj.TYPE_REG
   944  		p3.To.Reg = v.Reg()
   945  		p4 := s.Prog(obj.ANOP) // not a machine instruction, for branch to land
   946  		p2.To.SetTarget(p4)
   947  	case ssa.OpLOONG64LoweredGetClosurePtr:
   948  		// Closure pointer is R22 (loong64.REGCTXT).
   949  		ssagen.CheckLoweredGetClosurePtr(v)
   950  	case ssa.OpLOONG64LoweredGetCallerSP:
   951  		// caller's SP is FixedFrameSize below the address of the first arg
   952  		p := s.Prog(loong64.AMOVV)
   953  		p.From.Type = obj.TYPE_ADDR
   954  		p.From.Offset = -base.Ctxt.Arch.FixedFrameSize
   955  		p.From.Name = obj.NAME_PARAM
   956  		p.To.Type = obj.TYPE_REG
   957  		p.To.Reg = v.Reg()
   958  	case ssa.OpLOONG64LoweredGetCallerPC:
   959  		p := s.Prog(obj.AGETCALLERPC)
   960  		p.To.Type = obj.TYPE_REG
   961  		p.To.Reg = v.Reg()
   962  	case ssa.OpLOONG64MASKEQZ, ssa.OpLOONG64MASKNEZ:
   963  		p := s.Prog(v.Op.Asm())
   964  		p.From.Type = obj.TYPE_REG
   965  		p.From.Reg = v.Args[1].Reg()
   966  		p.Reg = v.Args[0].Reg()
   967  		p.To.Type = obj.TYPE_REG
   968  		p.To.Reg = v.Reg()
   969  
   970  	case ssa.OpLOONG64PRELD:
   971  		// PRELD (Rarg0), hint
   972  		p := s.Prog(v.Op.Asm())
   973  		p.From.Type = obj.TYPE_MEM
   974  		p.From.Reg = v.Args[0].Reg()
   975  		p.AddRestSourceConst(v.AuxInt & 0x1f)
   976  
   977  	case ssa.OpLOONG64PRELDX:
   978  		// PRELDX (Rarg0), $n, $hint
   979  		p := s.Prog(v.Op.Asm())
   980  		p.From.Type = obj.TYPE_MEM
   981  		p.From.Reg = v.Args[0].Reg()
   982  		p.AddRestSourceArgs([]obj.Addr{
   983  			{Type: obj.TYPE_CONST, Offset: int64((v.AuxInt >> 5) & 0x1fffffffff)},
   984  			{Type: obj.TYPE_CONST, Offset: int64((v.AuxInt >> 0) & 0x1f)},
   985  		})
   986  
   987  	case ssa.OpClobber, ssa.OpClobberReg:
   988  		// TODO: implement for clobberdead experiment. Nop is ok for now.
   989  	default:
   990  		v.Fatalf("genValue not implemented: %s", v.LongString())
   991  	}
   992  }
   993  
   994  var blockJump = map[ssa.BlockKind]struct {
   995  	asm, invasm obj.As
   996  }{
   997  	ssa.BlockLOONG64EQ:   {loong64.ABEQ, loong64.ABNE},
   998  	ssa.BlockLOONG64NE:   {loong64.ABNE, loong64.ABEQ},
   999  	ssa.BlockLOONG64LTZ:  {loong64.ABLTZ, loong64.ABGEZ},
  1000  	ssa.BlockLOONG64GEZ:  {loong64.ABGEZ, loong64.ABLTZ},
  1001  	ssa.BlockLOONG64LEZ:  {loong64.ABLEZ, loong64.ABGTZ},
  1002  	ssa.BlockLOONG64GTZ:  {loong64.ABGTZ, loong64.ABLEZ},
  1003  	ssa.BlockLOONG64FPT:  {loong64.ABFPT, loong64.ABFPF},
  1004  	ssa.BlockLOONG64FPF:  {loong64.ABFPF, loong64.ABFPT},
  1005  	ssa.BlockLOONG64BEQ:  {loong64.ABEQ, loong64.ABNE},
  1006  	ssa.BlockLOONG64BNE:  {loong64.ABNE, loong64.ABEQ},
  1007  	ssa.BlockLOONG64BGE:  {loong64.ABGE, loong64.ABLT},
  1008  	ssa.BlockLOONG64BLT:  {loong64.ABLT, loong64.ABGE},
  1009  	ssa.BlockLOONG64BLTU: {loong64.ABLTU, loong64.ABGEU},
  1010  	ssa.BlockLOONG64BGEU: {loong64.ABGEU, loong64.ABLTU},
  1011  }
  1012  
  1013  func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
  1014  	switch b.Kind {
  1015  	case ssa.BlockPlain, ssa.BlockDefer:
  1016  		if b.Succs[0].Block() != next {
  1017  			p := s.Prog(obj.AJMP)
  1018  			p.To.Type = obj.TYPE_BRANCH
  1019  			s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
  1020  		}
  1021  	case ssa.BlockExit, ssa.BlockRetJmp:
  1022  	case ssa.BlockRet:
  1023  		s.Prog(obj.ARET)
  1024  	case ssa.BlockLOONG64EQ, ssa.BlockLOONG64NE,
  1025  		ssa.BlockLOONG64LTZ, ssa.BlockLOONG64GEZ,
  1026  		ssa.BlockLOONG64LEZ, ssa.BlockLOONG64GTZ,
  1027  		ssa.BlockLOONG64BEQ, ssa.BlockLOONG64BNE,
  1028  		ssa.BlockLOONG64BLT, ssa.BlockLOONG64BGE,
  1029  		ssa.BlockLOONG64BLTU, ssa.BlockLOONG64BGEU,
  1030  		ssa.BlockLOONG64FPT, ssa.BlockLOONG64FPF:
  1031  		jmp := blockJump[b.Kind]
  1032  		var p *obj.Prog
  1033  		switch next {
  1034  		case b.Succs[0].Block():
  1035  			p = s.Br(jmp.invasm, b.Succs[1].Block())
  1036  		case b.Succs[1].Block():
  1037  			p = s.Br(jmp.asm, b.Succs[0].Block())
  1038  		default:
  1039  			if b.Likely != ssa.BranchUnlikely {
  1040  				p = s.Br(jmp.asm, b.Succs[0].Block())
  1041  				s.Br(obj.AJMP, b.Succs[1].Block())
  1042  			} else {
  1043  				p = s.Br(jmp.invasm, b.Succs[1].Block())
  1044  				s.Br(obj.AJMP, b.Succs[0].Block())
  1045  			}
  1046  		}
  1047  		switch b.Kind {
  1048  		case ssa.BlockLOONG64BEQ, ssa.BlockLOONG64BNE,
  1049  			ssa.BlockLOONG64BGE, ssa.BlockLOONG64BLT,
  1050  			ssa.BlockLOONG64BGEU, ssa.BlockLOONG64BLTU:
  1051  			p.From.Type = obj.TYPE_REG
  1052  			p.From.Reg = b.Controls[0].Reg()
  1053  			p.Reg = b.Controls[1].Reg()
  1054  		case ssa.BlockLOONG64EQ, ssa.BlockLOONG64NE,
  1055  			ssa.BlockLOONG64LTZ, ssa.BlockLOONG64GEZ,
  1056  			ssa.BlockLOONG64LEZ, ssa.BlockLOONG64GTZ,
  1057  			ssa.BlockLOONG64FPT, ssa.BlockLOONG64FPF:
  1058  			if !b.Controls[0].Type.IsFlags() {
  1059  				p.From.Type = obj.TYPE_REG
  1060  				p.From.Reg = b.Controls[0].Reg()
  1061  			}
  1062  		}
  1063  	default:
  1064  		b.Fatalf("branch not implemented: %s", b.LongString())
  1065  	}
  1066  }
  1067  
  1068  func loadRegResult(s *ssagen.State, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog {
  1069  	p := s.Prog(loadByType(t, reg))
  1070  	p.From.Type = obj.TYPE_MEM
  1071  	p.From.Name = obj.NAME_AUTO
  1072  	p.From.Sym = n.Linksym()
  1073  	p.From.Offset = n.FrameOffset() + off
  1074  	p.To.Type = obj.TYPE_REG
  1075  	p.To.Reg = reg
  1076  	return p
  1077  }
  1078  
  1079  func spillArgReg(pp *objw.Progs, p *obj.Prog, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog {
  1080  	p = pp.Append(p, storeByType(t, reg), obj.TYPE_REG, reg, 0, obj.TYPE_MEM, 0, n.FrameOffset()+off)
  1081  	p.To.Name = obj.NAME_PARAM
  1082  	p.To.Sym = n.Linksym()
  1083  	p.Pos = p.Pos.WithNotStmt()
  1084  	return p
  1085  }
  1086  

View as plain text