Source file test/codegen/simd.go

     1  // asmcheck
     2  
     3  // Copyright 2025 The Go Authors. All rights reserved.
     4  // Use of this source code is governed by a BSD-style
     5  // license that can be found in the LICENSE file.
     6  
     7  // These tests check code generation of simd peephole optimizations.
     8  
     9  //go:build goexperiment.simd && amd64
    10  
    11  package codegen
    12  
    13  import (
    14  	"math"
    15  	"simd/archsimd"
    16  )
    17  
    18  func vptest1() bool {
    19  	v1 := archsimd.LoadUint64x2Slice([]uint64{0, 1})
    20  	v2 := archsimd.LoadUint64x2Slice([]uint64{0, 0})
    21  	// amd64:`VPTEST\s(.*)(.*)$`
    22  	// amd64:`SETCS\s(.*)$`
    23  	return v1.AndNot(v2).IsZero()
    24  }
    25  
    26  func vptest2() bool {
    27  	v1 := archsimd.LoadUint64x2Slice([]uint64{0, 1})
    28  	v2 := archsimd.LoadUint64x2Slice([]uint64{0, 0})
    29  	// amd64:`VPTEST\s(.*)(.*)$`
    30  	// amd64:`SETEQ\s(.*)$`
    31  	return v1.And(v2).IsZero()
    32  }
    33  
    34  type Args2 struct {
    35  	V0 archsimd.Uint8x32
    36  	V1 archsimd.Uint8x32
    37  	x  string
    38  }
    39  
    40  //go:noinline
    41  func simdStructNoSpill(a Args2) archsimd.Uint8x32 {
    42  	// amd64:-`VMOVDQU\s.*$`
    43  	return a.V0.Xor(a.V1)
    44  }
    45  
    46  func simdStructWrapperNoSpill(a Args2) archsimd.Uint8x32 {
    47  	// amd64:-`VMOVDQU\s.*$`
    48  	a.x = "test"
    49  	return simdStructNoSpill(a)
    50  }
    51  
    52  //go:noinline
    53  func simdArrayNoSpill(a [1]Args2) archsimd.Uint8x32 {
    54  	// amd64:-`VMOVDQU\s.*$`
    55  	return a[0].V0.Xor(a[0].V1)
    56  }
    57  
    58  func simdArrayWrapperNoSpill(a [1]Args2) archsimd.Uint8x32 {
    59  	// amd64:-`VMOVDQU\s.*$`
    60  	a[0].x = "test"
    61  	return simdArrayNoSpill(a)
    62  }
    63  
    64  func simdFeatureGuardedMaskOpt() archsimd.Int16x16 {
    65  	var x, y archsimd.Int16x16
    66  	if archsimd.X86.AVX512() {
    67  		mask := archsimd.Mask16x16FromBits(5)
    68  		return x.Add(y).Masked(mask) // amd64:`VPADDW.Z\s.*$`
    69  	}
    70  	mask := archsimd.Mask16x16FromBits(5)
    71  	return x.Add(y).Masked(mask) // amd64:`VPAND\s.*$`
    72  }
    73  
    74  func simdMaskedMerge() archsimd.Int16x16 {
    75  	var x, y archsimd.Int16x16
    76  	if archsimd.X86.AVX512() {
    77  		mask := archsimd.Mask16x16FromBits(5)
    78  		return x.Add(y).Merge(x, mask) // amd64:-`VPBLENDVB\s.*$`
    79  	}
    80  	mask := archsimd.Mask16x16FromBits(5)
    81  	return x.Add(y).Merge(x, mask) // amd64:`VPBLENDVB\s.*$`
    82  }
    83  
    84  var nan = math.NaN()
    85  var floats64s = []float64{0, 1, 2, nan, 4, nan, 6, 7, 8, 9, 10, 11, nan, 13, 14, 15}
    86  var sinkInt64s = make([]int64, 100)
    87  
    88  func simdIsNaN() {
    89  	x := archsimd.LoadFloat64x4Slice(floats64s)
    90  	y := archsimd.LoadFloat64x4Slice(floats64s[4:])
    91  	a := x.IsNaN()
    92  	b := y.IsNaN()
    93  	// amd64:"VCMPPD [$]3," -"VPOR"
    94  	c := a.Or(b)
    95  	c.ToInt64x4().StoreSlice(sinkInt64s)
    96  }
    97  
    98  func simdIsNaN512() {
    99  	x := archsimd.LoadFloat64x8Slice(floats64s)
   100  	y := archsimd.LoadFloat64x8Slice(floats64s[8:])
   101  	a := x.IsNaN()
   102  	b := y.IsNaN()
   103  	// amd64:"VCMPPD [$]3," -"VPOR"
   104  	c := a.Or(b)
   105  	c.ToInt64x8().StoreSlice(sinkInt64s)
   106  }
   107  

View as plain text