Source file src/internal/runtime/atomic/atomic_arm.go

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  //go:build arm
     6  
     7  package atomic
     8  
     9  import (
    10  	"internal/cpu"
    11  	"unsafe"
    12  )
    13  
    14  const (
    15  	offsetARMHasV7Atomics = unsafe.Offsetof(cpu.ARM.HasV7Atomics)
    16  )
    17  
    18  // Export some functions via linkname to assembly in sync/atomic.
    19  //
    20  //go:linkname Xchg
    21  //go:linkname Xchguintptr
    22  //go:linkname Xadd
    23  
    24  type spinlock struct {
    25  	v uint32
    26  }
    27  
    28  //go:nosplit
    29  func (l *spinlock) lock() {
    30  	for {
    31  		if Cas(&l.v, 0, 1) {
    32  			return
    33  		}
    34  	}
    35  }
    36  
    37  //go:nosplit
    38  func (l *spinlock) unlock() {
    39  	Store(&l.v, 0)
    40  }
    41  
    42  var locktab [57]struct {
    43  	l   spinlock
    44  	pad [cpu.CacheLinePadSize - unsafe.Sizeof(spinlock{})]byte
    45  }
    46  
    47  func addrLock(addr *uint64) *spinlock {
    48  	return &locktab[(uintptr(unsafe.Pointer(addr))>>3)%uintptr(len(locktab))].l
    49  }
    50  
    51  // Atomic add and return new value.
    52  //
    53  //go:nosplit
    54  func Xadd(val *uint32, delta int32) uint32 {
    55  	for {
    56  		oval := *val
    57  		nval := oval + uint32(delta)
    58  		if Cas(val, oval, nval) {
    59  			return nval
    60  		}
    61  	}
    62  }
    63  
    64  //go:noescape
    65  func Xadduintptr(ptr *uintptr, delta uintptr) uintptr
    66  
    67  //go:nosplit
    68  func Xchg(addr *uint32, v uint32) uint32 {
    69  	for {
    70  		old := *addr
    71  		if Cas(addr, old, v) {
    72  			return old
    73  		}
    74  	}
    75  }
    76  
    77  //go:noescape
    78  func Xchg8(addr *uint8, v uint8) uint8
    79  
    80  //go:nosplit
    81  func goXchg8(addr *uint8, v uint8) uint8 {
    82  	// Align down to 4 bytes and use 32-bit CAS.
    83  	addr32 := (*uint32)(unsafe.Pointer(uintptr(unsafe.Pointer(addr)) &^ 3))
    84  	shift := (uintptr(unsafe.Pointer(addr)) & 3) * 8 // little endian
    85  	word := uint32(v) << shift
    86  	mask := uint32(0xFF) << shift
    87  
    88  	for {
    89  		old := *addr32 // Read the old 32-bit value
    90  		// Clear the old 8 bits then insert the new value
    91  		if Cas(addr32, old, (old&^mask)|word) {
    92  			// Return the old 8-bit value
    93  			return uint8((old & mask) >> shift)
    94  		}
    95  	}
    96  }
    97  
    98  //go:nosplit
    99  func Xchguintptr(addr *uintptr, v uintptr) uintptr {
   100  	return uintptr(Xchg((*uint32)(unsafe.Pointer(addr)), uint32(v)))
   101  }
   102  
   103  // Not noescape -- it installs a pointer to addr.
   104  func StorepNoWB(addr unsafe.Pointer, v unsafe.Pointer)
   105  
   106  //go:noescape
   107  func Store(addr *uint32, v uint32)
   108  
   109  //go:noescape
   110  func StoreRel(addr *uint32, v uint32)
   111  
   112  //go:noescape
   113  func StoreReluintptr(addr *uintptr, v uintptr)
   114  
   115  //go:nosplit
   116  func goCas64(addr *uint64, old, new uint64) bool {
   117  	if uintptr(unsafe.Pointer(addr))&7 != 0 {
   118  		*(*int)(nil) = 0 // crash on unaligned uint64
   119  	}
   120  	_ = *addr // if nil, fault before taking the lock
   121  	var ok bool
   122  	addrLock(addr).lock()
   123  	if *addr == old {
   124  		*addr = new
   125  		ok = true
   126  	}
   127  	addrLock(addr).unlock()
   128  	return ok
   129  }
   130  
   131  //go:nosplit
   132  func goXadd64(addr *uint64, delta int64) uint64 {
   133  	if uintptr(unsafe.Pointer(addr))&7 != 0 {
   134  		*(*int)(nil) = 0 // crash on unaligned uint64
   135  	}
   136  	_ = *addr // if nil, fault before taking the lock
   137  	var r uint64
   138  	addrLock(addr).lock()
   139  	r = *addr + uint64(delta)
   140  	*addr = r
   141  	addrLock(addr).unlock()
   142  	return r
   143  }
   144  
   145  //go:nosplit
   146  func goXchg64(addr *uint64, v uint64) uint64 {
   147  	if uintptr(unsafe.Pointer(addr))&7 != 0 {
   148  		*(*int)(nil) = 0 // crash on unaligned uint64
   149  	}
   150  	_ = *addr // if nil, fault before taking the lock
   151  	var r uint64
   152  	addrLock(addr).lock()
   153  	r = *addr
   154  	*addr = v
   155  	addrLock(addr).unlock()
   156  	return r
   157  }
   158  
   159  //go:nosplit
   160  func goLoad64(addr *uint64) uint64 {
   161  	if uintptr(unsafe.Pointer(addr))&7 != 0 {
   162  		*(*int)(nil) = 0 // crash on unaligned uint64
   163  	}
   164  	_ = *addr // if nil, fault before taking the lock
   165  	var r uint64
   166  	addrLock(addr).lock()
   167  	r = *addr
   168  	addrLock(addr).unlock()
   169  	return r
   170  }
   171  
   172  //go:nosplit
   173  func goStore64(addr *uint64, v uint64) {
   174  	if uintptr(unsafe.Pointer(addr))&7 != 0 {
   175  		*(*int)(nil) = 0 // crash on unaligned uint64
   176  	}
   177  	_ = *addr // if nil, fault before taking the lock
   178  	addrLock(addr).lock()
   179  	*addr = v
   180  	addrLock(addr).unlock()
   181  }
   182  
   183  //go:noescape
   184  func Or8(addr *uint8, v uint8)
   185  
   186  //go:nosplit
   187  func goOr8(addr *uint8, v uint8) {
   188  	// Align down to 4 bytes and use 32-bit CAS.
   189  	addr32 := (*uint32)(unsafe.Pointer(uintptr(unsafe.Pointer(addr)) &^ 3))
   190  	word := uint32(v) << ((uintptr(unsafe.Pointer(addr)) & 3) * 8) // little endian
   191  	for {
   192  		old := *addr32
   193  		if Cas(addr32, old, old|word) {
   194  			return
   195  		}
   196  	}
   197  }
   198  
   199  //go:noescape
   200  func And8(addr *uint8, v uint8)
   201  
   202  //go:nosplit
   203  func goAnd8(addr *uint8, v uint8) {
   204  	// Align down to 4 bytes and use 32-bit CAS.
   205  	addr32 := (*uint32)(unsafe.Pointer(uintptr(unsafe.Pointer(addr)) &^ 3))
   206  	word := uint32(v) << ((uintptr(unsafe.Pointer(addr)) & 3) * 8)    // little endian
   207  	mask := uint32(0xFF) << ((uintptr(unsafe.Pointer(addr)) & 3) * 8) // little endian
   208  	word |= ^mask
   209  	for {
   210  		old := *addr32
   211  		if Cas(addr32, old, old&word) {
   212  			return
   213  		}
   214  	}
   215  }
   216  
   217  //go:nosplit
   218  func Or(addr *uint32, v uint32) {
   219  	for {
   220  		old := *addr
   221  		if Cas(addr, old, old|v) {
   222  			return
   223  		}
   224  	}
   225  }
   226  
   227  //go:nosplit
   228  func And(addr *uint32, v uint32) {
   229  	for {
   230  		old := *addr
   231  		if Cas(addr, old, old&v) {
   232  			return
   233  		}
   234  	}
   235  }
   236  
   237  //go:nosplit
   238  func armcas(ptr *uint32, old, new uint32) bool
   239  
   240  //go:noescape
   241  func Load(addr *uint32) uint32
   242  
   243  // NO go:noescape annotation; *addr escapes if result escapes (#31525)
   244  func Loadp(addr unsafe.Pointer) unsafe.Pointer
   245  
   246  //go:noescape
   247  func Load8(addr *uint8) uint8
   248  
   249  //go:noescape
   250  func LoadAcq(addr *uint32) uint32
   251  
   252  //go:noescape
   253  func LoadAcquintptr(ptr *uintptr) uintptr
   254  
   255  //go:noescape
   256  func Cas64(addr *uint64, old, new uint64) bool
   257  
   258  //go:noescape
   259  func CasRel(addr *uint32, old, new uint32) bool
   260  
   261  //go:noescape
   262  func Xadd64(addr *uint64, delta int64) uint64
   263  
   264  //go:noescape
   265  func Xchg64(addr *uint64, v uint64) uint64
   266  
   267  //go:noescape
   268  func Load64(addr *uint64) uint64
   269  
   270  //go:noescape
   271  func Store8(addr *uint8, v uint8)
   272  
   273  //go:noescape
   274  func Store64(addr *uint64, v uint64)
   275  

View as plain text