Text file src/internal/runtime/atomic/atomic_amd64.s

     1  // Copyright 2015 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Note: some of these functions are semantically inlined
     6  // by the compiler (in src/cmd/compile/internal/gc/ssa.go).
     7  
     8  #include "textflag.h"
     9  
    10  TEXT ·Loaduintptr(SB), NOSPLIT, $0-16
    11  	JMP	·Load64(SB)
    12  
    13  TEXT ·Loaduint(SB), NOSPLIT, $0-16
    14  	JMP	·Load64(SB)
    15  
    16  TEXT ·Loadint32(SB), NOSPLIT, $0-12
    17  	JMP	·Load(SB)
    18  
    19  TEXT ·Loadint64(SB), NOSPLIT, $0-16
    20  	JMP	·Load64(SB)
    21  
    22  // bool Cas(int32 *val, int32 old, int32 new)
    23  // Atomically:
    24  //	if(*val == old){
    25  //		*val = new;
    26  //		return 1;
    27  //	} else
    28  //		return 0;
    29  TEXT ·Cas(SB),NOSPLIT,$0-17
    30  	MOVQ	ptr+0(FP), BX
    31  	MOVL	old+8(FP), AX
    32  	MOVL	new+12(FP), CX
    33  	LOCK
    34  	CMPXCHGL	CX, 0(BX)
    35  	SETEQ	ret+16(FP)
    36  	RET
    37  
    38  // bool	·Cas64(uint64 *val, uint64 old, uint64 new)
    39  // Atomically:
    40  //	if(*val == old){
    41  //		*val = new;
    42  //		return 1;
    43  //	} else {
    44  //		return 0;
    45  //	}
    46  TEXT ·Cas64(SB), NOSPLIT, $0-25
    47  	MOVQ	ptr+0(FP), BX
    48  	MOVQ	old+8(FP), AX
    49  	MOVQ	new+16(FP), CX
    50  	LOCK
    51  	CMPXCHGQ	CX, 0(BX)
    52  	SETEQ	ret+24(FP)
    53  	RET
    54  
    55  // bool Casp1(void **val, void *old, void *new)
    56  // Atomically:
    57  //	if(*val == old){
    58  //		*val = new;
    59  //		return 1;
    60  //	} else
    61  //		return 0;
    62  TEXT ·Casp1(SB), NOSPLIT, $0-25
    63  	MOVQ	ptr+0(FP), BX
    64  	MOVQ	old+8(FP), AX
    65  	MOVQ	new+16(FP), CX
    66  	LOCK
    67  	CMPXCHGQ	CX, 0(BX)
    68  	SETEQ	ret+24(FP)
    69  	RET
    70  
    71  TEXT ·Casint32(SB), NOSPLIT, $0-17
    72  	JMP	·Cas(SB)
    73  
    74  TEXT ·Casint64(SB), NOSPLIT, $0-25
    75  	JMP	·Cas64(SB)
    76  
    77  TEXT ·Casuintptr(SB), NOSPLIT, $0-25
    78  	JMP	·Cas64(SB)
    79  
    80  TEXT ·CasRel(SB), NOSPLIT, $0-17
    81  	JMP	·Cas(SB)
    82  
    83  // uint32 Xadd(uint32 volatile *val, int32 delta)
    84  // Atomically:
    85  //	*val += delta;
    86  //	return *val;
    87  TEXT ·Xadd(SB), NOSPLIT, $0-20
    88  	MOVQ	ptr+0(FP), BX
    89  	MOVL	delta+8(FP), AX
    90  	MOVL	AX, CX
    91  	LOCK
    92  	XADDL	AX, 0(BX)
    93  	ADDL	CX, AX
    94  	MOVL	AX, ret+16(FP)
    95  	RET
    96  
    97  // uint64 Xadd64(uint64 volatile *val, int64 delta)
    98  // Atomically:
    99  //	*val += delta;
   100  //	return *val;
   101  TEXT ·Xadd64(SB), NOSPLIT, $0-24
   102  	MOVQ	ptr+0(FP), BX
   103  	MOVQ	delta+8(FP), AX
   104  	MOVQ	AX, CX
   105  	LOCK
   106  	XADDQ	AX, 0(BX)
   107  	ADDQ	CX, AX
   108  	MOVQ	AX, ret+16(FP)
   109  	RET
   110  
   111  TEXT ·Xaddint32(SB), NOSPLIT, $0-20
   112  	JMP	·Xadd(SB)
   113  
   114  TEXT ·Xaddint64(SB), NOSPLIT, $0-24
   115  	JMP	·Xadd64(SB)
   116  
   117  TEXT ·Xadduintptr(SB), NOSPLIT, $0-24
   118  	JMP	·Xadd64(SB)
   119  
   120  // uint8 Xchg(ptr *uint8, new uint8)
   121  // Atomically:
   122  //	old := *ptr;
   123  //	*ptr = new;
   124  //	return old;
   125  TEXT ·Xchg8(SB), NOSPLIT, $0-17
   126  	MOVQ	ptr+0(FP), BX
   127  	MOVB	new+8(FP), AX
   128  	XCHGB	AX, 0(BX)
   129  	MOVB	AX, ret+16(FP)
   130  	RET
   131  
   132  // uint32 Xchg(ptr *uint32, new uint32)
   133  // Atomically:
   134  //	old := *ptr;
   135  //	*ptr = new;
   136  //	return old;
   137  TEXT ·Xchg(SB), NOSPLIT, $0-20
   138  	MOVQ	ptr+0(FP), BX
   139  	MOVL	new+8(FP), AX
   140  	XCHGL	AX, 0(BX)
   141  	MOVL	AX, ret+16(FP)
   142  	RET
   143  
   144  // uint64 Xchg64(ptr *uint64, new uint64)
   145  // Atomically:
   146  //	old := *ptr;
   147  //	*ptr = new;
   148  //	return old;
   149  TEXT ·Xchg64(SB), NOSPLIT, $0-24
   150  	MOVQ	ptr+0(FP), BX
   151  	MOVQ	new+8(FP), AX
   152  	XCHGQ	AX, 0(BX)
   153  	MOVQ	AX, ret+16(FP)
   154  	RET
   155  
   156  TEXT ·Xchgint32(SB), NOSPLIT, $0-20
   157  	JMP	·Xchg(SB)
   158  
   159  TEXT ·Xchgint64(SB), NOSPLIT, $0-24
   160  	JMP	·Xchg64(SB)
   161  
   162  TEXT ·Xchguintptr(SB), NOSPLIT, $0-24
   163  	JMP	·Xchg64(SB)
   164  
   165  TEXT ·StorepNoWB(SB), NOSPLIT, $0-16
   166  	MOVQ	ptr+0(FP), BX
   167  	MOVQ	val+8(FP), AX
   168  	XCHGQ	AX, 0(BX)
   169  	RET
   170  
   171  TEXT ·Store(SB), NOSPLIT, $0-12
   172  	MOVQ	ptr+0(FP), BX
   173  	MOVL	val+8(FP), AX
   174  	XCHGL	AX, 0(BX)
   175  	RET
   176  
   177  TEXT ·Store8(SB), NOSPLIT, $0-9
   178  	MOVQ	ptr+0(FP), BX
   179  	MOVB	val+8(FP), AX
   180  	XCHGB	AX, 0(BX)
   181  	RET
   182  
   183  TEXT ·Store64(SB), NOSPLIT, $0-16
   184  	MOVQ	ptr+0(FP), BX
   185  	MOVQ	val+8(FP), AX
   186  	XCHGQ	AX, 0(BX)
   187  	RET
   188  
   189  TEXT ·Storeint32(SB), NOSPLIT, $0-12
   190  	JMP	·Store(SB)
   191  
   192  TEXT ·Storeint64(SB), NOSPLIT, $0-16
   193  	JMP	·Store64(SB)
   194  
   195  TEXT ·Storeuintptr(SB), NOSPLIT, $0-16
   196  	JMP	·Store64(SB)
   197  
   198  TEXT ·StoreRel(SB), NOSPLIT, $0-12
   199  	JMP	·Store(SB)
   200  
   201  TEXT ·StoreRel64(SB), NOSPLIT, $0-16
   202  	JMP	·Store64(SB)
   203  
   204  TEXT ·StoreReluintptr(SB), NOSPLIT, $0-16
   205  	JMP	·Store64(SB)
   206  
   207  // void	·Or8(byte volatile*, byte);
   208  TEXT ·Or8(SB), NOSPLIT, $0-9
   209  	MOVQ	ptr+0(FP), AX
   210  	MOVB	val+8(FP), BX
   211  	LOCK
   212  	ORB	BX, (AX)
   213  	RET
   214  
   215  // void	·And8(byte volatile*, byte);
   216  TEXT ·And8(SB), NOSPLIT, $0-9
   217  	MOVQ	ptr+0(FP), AX
   218  	MOVB	val+8(FP), BX
   219  	LOCK
   220  	ANDB	BX, (AX)
   221  	RET
   222  
   223  // func Or(addr *uint32, v uint32)
   224  TEXT ·Or(SB), NOSPLIT, $0-12
   225  	MOVQ	ptr+0(FP), AX
   226  	MOVL	val+8(FP), BX
   227  	LOCK
   228  	ORL	BX, (AX)
   229  	RET
   230  
   231  // func And(addr *uint32, v uint32)
   232  TEXT ·And(SB), NOSPLIT, $0-12
   233  	MOVQ	ptr+0(FP), AX
   234  	MOVL	val+8(FP), BX
   235  	LOCK
   236  	ANDL	BX, (AX)
   237  	RET
   238  
   239  // func Or32(addr *uint32, v uint32) old uint32
   240  TEXT ·Or32(SB), NOSPLIT, $0-20
   241  	MOVQ	ptr+0(FP), BX
   242  	MOVL	val+8(FP), CX
   243  casloop:
   244  	MOVL 	CX, DX
   245  	MOVL	(BX), AX
   246  	ORL	AX, DX
   247  	LOCK
   248  	CMPXCHGL	DX, (BX)
   249  	JNZ casloop
   250  	MOVL 	AX, ret+16(FP)
   251  	RET
   252  
   253  // func And32(addr *uint32, v uint32) old uint32
   254  TEXT ·And32(SB), NOSPLIT, $0-20
   255  	MOVQ	ptr+0(FP), BX
   256  	MOVL	val+8(FP), CX
   257  casloop:
   258  	MOVL 	CX, DX
   259  	MOVL	(BX), AX
   260  	ANDL	AX, DX
   261  	LOCK
   262  	CMPXCHGL	DX, (BX)
   263  	JNZ casloop
   264  	MOVL 	AX, ret+16(FP)
   265  	RET
   266  
   267  // func Or64(addr *uint64, v uint64) old uint64
   268  TEXT ·Or64(SB), NOSPLIT, $0-24
   269  	MOVQ	ptr+0(FP), BX
   270  	MOVQ	val+8(FP), CX
   271  casloop:
   272  	MOVQ 	CX, DX
   273  	MOVQ	(BX), AX
   274  	ORQ	AX, DX
   275  	LOCK
   276  	CMPXCHGQ	DX, (BX)
   277  	JNZ casloop
   278  	MOVQ 	AX, ret+16(FP)
   279  	RET
   280  
   281  // func And64(addr *uint64, v uint64) old uint64
   282  TEXT ·And64(SB), NOSPLIT, $0-24
   283  	MOVQ	ptr+0(FP), BX
   284  	MOVQ	val+8(FP), CX
   285  casloop:
   286  	MOVQ 	CX, DX
   287  	MOVQ	(BX), AX
   288  	ANDQ	AX, DX
   289  	LOCK
   290  	CMPXCHGQ	DX, (BX)
   291  	JNZ casloop
   292  	MOVQ 	AX, ret+16(FP)
   293  	RET
   294  
   295  // func Anduintptr(addr *uintptr, v uintptr) old uintptr
   296  TEXT ·Anduintptr(SB), NOSPLIT, $0-24
   297  	JMP	·And64(SB)
   298  
   299  // func Oruintptr(addr *uintptr, v uintptr) old uintptr
   300  TEXT ·Oruintptr(SB), NOSPLIT, $0-24
   301  	JMP	·Or64(SB)
   302  

View as plain text