Source file src/runtime/race.go

     1  // Copyright 2012 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  //go:build race
     6  
     7  package runtime
     8  
     9  import (
    10  	"internal/abi"
    11  	"unsafe"
    12  )
    13  
    14  // Public race detection API, present iff build with -race.
    15  
    16  func RaceRead(addr unsafe.Pointer)
    17  
    18  //go:linkname race_Read internal/race.Read
    19  //go:nosplit
    20  func race_Read(addr unsafe.Pointer) {
    21  	RaceRead(addr)
    22  }
    23  
    24  func RaceWrite(addr unsafe.Pointer)
    25  
    26  //go:linkname race_Write internal/race.Write
    27  //go:nosplit
    28  func race_Write(addr unsafe.Pointer) {
    29  	RaceWrite(addr)
    30  }
    31  
    32  func RaceReadRange(addr unsafe.Pointer, len int)
    33  
    34  //go:linkname race_ReadRange internal/race.ReadRange
    35  //go:nosplit
    36  func race_ReadRange(addr unsafe.Pointer, len int) {
    37  	RaceReadRange(addr, len)
    38  }
    39  
    40  func RaceWriteRange(addr unsafe.Pointer, len int)
    41  
    42  //go:linkname race_WriteRange internal/race.WriteRange
    43  //go:nosplit
    44  func race_WriteRange(addr unsafe.Pointer, len int) {
    45  	RaceWriteRange(addr, len)
    46  }
    47  
    48  func RaceErrors() int {
    49  	var n uint64
    50  	racecall(&__tsan_report_count, uintptr(unsafe.Pointer(&n)), 0, 0, 0)
    51  	return int(n)
    52  }
    53  
    54  //go:linkname race_Errors internal/race.Errors
    55  //go:nosplit
    56  func race_Errors() int {
    57  	return RaceErrors()
    58  }
    59  
    60  // RaceAcquire/RaceRelease/RaceReleaseMerge establish happens-before relations
    61  // between goroutines. These inform the race detector about actual synchronization
    62  // that it can't see for some reason (e.g. synchronization within RaceDisable/RaceEnable
    63  // sections of code).
    64  // RaceAcquire establishes a happens-before relation with the preceding
    65  // RaceReleaseMerge on addr up to and including the last RaceRelease on addr.
    66  // In terms of the C memory model (C11 §5.1.2.4, §7.17.3),
    67  // RaceAcquire is equivalent to atomic_load(memory_order_acquire).
    68  //
    69  //go:nosplit
    70  func RaceAcquire(addr unsafe.Pointer) {
    71  	raceacquire(addr)
    72  }
    73  
    74  //go:linkname race_Acquire internal/race.Acquire
    75  //go:nosplit
    76  func race_Acquire(addr unsafe.Pointer) {
    77  	RaceAcquire(addr)
    78  }
    79  
    80  // RaceRelease performs a release operation on addr that
    81  // can synchronize with a later RaceAcquire on addr.
    82  //
    83  // In terms of the C memory model, RaceRelease is equivalent to
    84  // atomic_store(memory_order_release).
    85  //
    86  //go:nosplit
    87  func RaceRelease(addr unsafe.Pointer) {
    88  	racerelease(addr)
    89  }
    90  
    91  //go:linkname race_Release internal/race.Release
    92  //go:nosplit
    93  func race_Release(addr unsafe.Pointer) {
    94  	RaceRelease(addr)
    95  }
    96  
    97  // RaceReleaseMerge is like RaceRelease, but also establishes a happens-before
    98  // relation with the preceding RaceRelease or RaceReleaseMerge on addr.
    99  //
   100  // In terms of the C memory model, RaceReleaseMerge is equivalent to
   101  // atomic_exchange(memory_order_release).
   102  //
   103  //go:nosplit
   104  func RaceReleaseMerge(addr unsafe.Pointer) {
   105  	racereleasemerge(addr)
   106  }
   107  
   108  //go:linkname race_ReleaseMerge internal/race.ReleaseMerge
   109  //go:nosplit
   110  func race_ReleaseMerge(addr unsafe.Pointer) {
   111  	RaceReleaseMerge(addr)
   112  }
   113  
   114  // RaceDisable disables handling of race synchronization events in the current goroutine.
   115  // Handling is re-enabled with RaceEnable. RaceDisable/RaceEnable can be nested.
   116  // Non-synchronization events (memory accesses, function entry/exit) still affect
   117  // the race detector.
   118  //
   119  //go:nosplit
   120  func RaceDisable() {
   121  	gp := getg()
   122  	if gp.raceignore == 0 {
   123  		racecall(&__tsan_go_ignore_sync_begin, gp.racectx, 0, 0, 0)
   124  	}
   125  	gp.raceignore++
   126  }
   127  
   128  //go:linkname race_Disable internal/race.Disable
   129  //go:nosplit
   130  func race_Disable() {
   131  	RaceDisable()
   132  }
   133  
   134  // RaceEnable re-enables handling of race events in the current goroutine.
   135  //
   136  //go:nosplit
   137  func RaceEnable() {
   138  	gp := getg()
   139  	gp.raceignore--
   140  	if gp.raceignore == 0 {
   141  		racecall(&__tsan_go_ignore_sync_end, gp.racectx, 0, 0, 0)
   142  	}
   143  }
   144  
   145  //go:linkname race_Enable internal/race.Enable
   146  //go:nosplit
   147  func race_Enable() {
   148  	RaceEnable()
   149  }
   150  
   151  // Private interface for the runtime.
   152  
   153  const raceenabled = true
   154  
   155  // For all functions accepting callerpc and pc,
   156  // callerpc is a return PC of the function that calls this function,
   157  // pc is start PC of the function that calls this function.
   158  func raceReadObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) {
   159  	kind := t.Kind_ & abi.KindMask
   160  	if kind == abi.Array || kind == abi.Struct {
   161  		// for composite objects we have to read every address
   162  		// because a write might happen to any subobject.
   163  		racereadrangepc(addr, t.Size_, callerpc, pc)
   164  	} else {
   165  		// for non-composite objects we can read just the start
   166  		// address, as any write must write the first byte.
   167  		racereadpc(addr, callerpc, pc)
   168  	}
   169  }
   170  
   171  //go:linkname race_ReadObjectPC internal/race.ReadObjectPC
   172  func race_ReadObjectPC(t *abi.Type, addr unsafe.Pointer, callerpc, pc uintptr) {
   173  	raceReadObjectPC(t, addr, callerpc, pc)
   174  }
   175  
   176  func raceWriteObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) {
   177  	kind := t.Kind_ & abi.KindMask
   178  	if kind == abi.Array || kind == abi.Struct {
   179  		// for composite objects we have to write every address
   180  		// because a write might happen to any subobject.
   181  		racewriterangepc(addr, t.Size_, callerpc, pc)
   182  	} else {
   183  		// for non-composite objects we can write just the start
   184  		// address, as any write must write the first byte.
   185  		racewritepc(addr, callerpc, pc)
   186  	}
   187  }
   188  
   189  //go:linkname race_WriteObjectPC internal/race.WriteObjectPC
   190  func race_WriteObjectPC(t *abi.Type, addr unsafe.Pointer, callerpc, pc uintptr) {
   191  	raceWriteObjectPC(t, addr, callerpc, pc)
   192  }
   193  
   194  //go:noescape
   195  func racereadpc(addr unsafe.Pointer, callpc, pc uintptr)
   196  
   197  //go:noescape
   198  func racewritepc(addr unsafe.Pointer, callpc, pc uintptr)
   199  
   200  //go:linkname race_ReadPC internal/race.ReadPC
   201  func race_ReadPC(addr unsafe.Pointer, callerpc, pc uintptr) {
   202  	racereadpc(addr, callerpc, pc)
   203  }
   204  
   205  //go:linkname race_WritePC internal/race.WritePC
   206  func race_WritePC(addr unsafe.Pointer, callerpc, pc uintptr) {
   207  	racewritepc(addr, callerpc, pc)
   208  }
   209  
   210  type symbolizeCodeContext struct {
   211  	pc   uintptr
   212  	fn   *byte
   213  	file *byte
   214  	line uintptr
   215  	off  uintptr
   216  	res  uintptr
   217  }
   218  
   219  var qq = [...]byte{'?', '?', 0}
   220  var dash = [...]byte{'-', 0}
   221  
   222  const (
   223  	raceGetProcCmd = iota
   224  	raceSymbolizeCodeCmd
   225  	raceSymbolizeDataCmd
   226  )
   227  
   228  // Callback from C into Go, runs on g0.
   229  func racecallback(cmd uintptr, ctx unsafe.Pointer) {
   230  	switch cmd {
   231  	case raceGetProcCmd:
   232  		throw("should have been handled by racecallbackthunk")
   233  	case raceSymbolizeCodeCmd:
   234  		raceSymbolizeCode((*symbolizeCodeContext)(ctx))
   235  	case raceSymbolizeDataCmd:
   236  		raceSymbolizeData((*symbolizeDataContext)(ctx))
   237  	default:
   238  		throw("unknown command")
   239  	}
   240  }
   241  
   242  // raceSymbolizeCode reads ctx.pc and populates the rest of *ctx with
   243  // information about the code at that pc.
   244  //
   245  // The race detector has already subtracted 1 from pcs, so they point to the last
   246  // byte of call instructions (including calls to runtime.racewrite and friends).
   247  //
   248  // If the incoming pc is part of an inlined function, *ctx is populated
   249  // with information about the inlined function, and on return ctx.pc is set
   250  // to a pc in the logically containing function. (The race detector should call this
   251  // function again with that pc.)
   252  //
   253  // If the incoming pc is not part of an inlined function, the return pc is unchanged.
   254  func raceSymbolizeCode(ctx *symbolizeCodeContext) {
   255  	pc := ctx.pc
   256  	fi := findfunc(pc)
   257  	if fi.valid() {
   258  		u, uf := newInlineUnwinder(fi, pc)
   259  		for ; uf.valid(); uf = u.next(uf) {
   260  			sf := u.srcFunc(uf)
   261  			if sf.funcID == abi.FuncIDWrapper && u.isInlined(uf) {
   262  				// Ignore wrappers, unless we're at the outermost frame of u.
   263  				// A non-inlined wrapper frame always means we have a physical
   264  				// frame consisting entirely of wrappers, in which case we'll
   265  				// take an outermost wrapper over nothing.
   266  				continue
   267  			}
   268  
   269  			name := sf.name()
   270  			file, line := u.fileLine(uf)
   271  			if line == 0 {
   272  				// Failure to symbolize
   273  				continue
   274  			}
   275  			ctx.fn = &bytes(name)[0] // assume NUL-terminated
   276  			ctx.line = uintptr(line)
   277  			ctx.file = &bytes(file)[0] // assume NUL-terminated
   278  			ctx.off = pc - fi.entry()
   279  			ctx.res = 1
   280  			if u.isInlined(uf) {
   281  				// Set ctx.pc to the "caller" so the race detector calls this again
   282  				// to further unwind.
   283  				uf = u.next(uf)
   284  				ctx.pc = uf.pc
   285  			}
   286  			return
   287  		}
   288  	}
   289  	ctx.fn = &qq[0]
   290  	ctx.file = &dash[0]
   291  	ctx.line = 0
   292  	ctx.off = ctx.pc
   293  	ctx.res = 1
   294  }
   295  
   296  type symbolizeDataContext struct {
   297  	addr  uintptr
   298  	heap  uintptr
   299  	start uintptr
   300  	size  uintptr
   301  	name  *byte
   302  	file  *byte
   303  	line  uintptr
   304  	res   uintptr
   305  }
   306  
   307  func raceSymbolizeData(ctx *symbolizeDataContext) {
   308  	if base, span, _ := findObject(ctx.addr, 0, 0); base != 0 {
   309  		// TODO: Does this need to handle malloc headers?
   310  		ctx.heap = 1
   311  		ctx.start = base
   312  		ctx.size = span.elemsize
   313  		ctx.res = 1
   314  	}
   315  }
   316  
   317  // Race runtime functions called via runtime·racecall.
   318  //
   319  //go:linkname __tsan_init __tsan_init
   320  var __tsan_init byte
   321  
   322  //go:linkname __tsan_fini __tsan_fini
   323  var __tsan_fini byte
   324  
   325  //go:linkname __tsan_proc_create __tsan_proc_create
   326  var __tsan_proc_create byte
   327  
   328  //go:linkname __tsan_proc_destroy __tsan_proc_destroy
   329  var __tsan_proc_destroy byte
   330  
   331  //go:linkname __tsan_map_shadow __tsan_map_shadow
   332  var __tsan_map_shadow byte
   333  
   334  //go:linkname __tsan_finalizer_goroutine __tsan_finalizer_goroutine
   335  var __tsan_finalizer_goroutine byte
   336  
   337  //go:linkname __tsan_go_start __tsan_go_start
   338  var __tsan_go_start byte
   339  
   340  //go:linkname __tsan_go_end __tsan_go_end
   341  var __tsan_go_end byte
   342  
   343  //go:linkname __tsan_malloc __tsan_malloc
   344  var __tsan_malloc byte
   345  
   346  //go:linkname __tsan_free __tsan_free
   347  var __tsan_free byte
   348  
   349  //go:linkname __tsan_acquire __tsan_acquire
   350  var __tsan_acquire byte
   351  
   352  //go:linkname __tsan_release __tsan_release
   353  var __tsan_release byte
   354  
   355  //go:linkname __tsan_release_acquire __tsan_release_acquire
   356  var __tsan_release_acquire byte
   357  
   358  //go:linkname __tsan_release_merge __tsan_release_merge
   359  var __tsan_release_merge byte
   360  
   361  //go:linkname __tsan_go_ignore_sync_begin __tsan_go_ignore_sync_begin
   362  var __tsan_go_ignore_sync_begin byte
   363  
   364  //go:linkname __tsan_go_ignore_sync_end __tsan_go_ignore_sync_end
   365  var __tsan_go_ignore_sync_end byte
   366  
   367  //go:linkname __tsan_report_count __tsan_report_count
   368  var __tsan_report_count byte
   369  
   370  // Mimic what cmd/cgo would do.
   371  //
   372  //go:cgo_import_static __tsan_init
   373  //go:cgo_import_static __tsan_fini
   374  //go:cgo_import_static __tsan_proc_create
   375  //go:cgo_import_static __tsan_proc_destroy
   376  //go:cgo_import_static __tsan_map_shadow
   377  //go:cgo_import_static __tsan_finalizer_goroutine
   378  //go:cgo_import_static __tsan_go_start
   379  //go:cgo_import_static __tsan_go_end
   380  //go:cgo_import_static __tsan_malloc
   381  //go:cgo_import_static __tsan_free
   382  //go:cgo_import_static __tsan_acquire
   383  //go:cgo_import_static __tsan_release
   384  //go:cgo_import_static __tsan_release_acquire
   385  //go:cgo_import_static __tsan_release_merge
   386  //go:cgo_import_static __tsan_go_ignore_sync_begin
   387  //go:cgo_import_static __tsan_go_ignore_sync_end
   388  //go:cgo_import_static __tsan_report_count
   389  
   390  // These are called from race_amd64.s.
   391  //
   392  //go:cgo_import_static __tsan_read
   393  //go:cgo_import_static __tsan_read_pc
   394  //go:cgo_import_static __tsan_read_range
   395  //go:cgo_import_static __tsan_write
   396  //go:cgo_import_static __tsan_write_pc
   397  //go:cgo_import_static __tsan_write_range
   398  //go:cgo_import_static __tsan_func_enter
   399  //go:cgo_import_static __tsan_func_exit
   400  
   401  //go:cgo_import_static __tsan_go_atomic32_load
   402  //go:cgo_import_static __tsan_go_atomic64_load
   403  //go:cgo_import_static __tsan_go_atomic32_store
   404  //go:cgo_import_static __tsan_go_atomic64_store
   405  //go:cgo_import_static __tsan_go_atomic32_exchange
   406  //go:cgo_import_static __tsan_go_atomic64_exchange
   407  //go:cgo_import_static __tsan_go_atomic32_fetch_add
   408  //go:cgo_import_static __tsan_go_atomic64_fetch_add
   409  //go:cgo_import_static __tsan_go_atomic32_fetch_and
   410  //go:cgo_import_static __tsan_go_atomic64_fetch_and
   411  //go:cgo_import_static __tsan_go_atomic32_fetch_or
   412  //go:cgo_import_static __tsan_go_atomic64_fetch_or
   413  //go:cgo_import_static __tsan_go_atomic32_compare_exchange
   414  //go:cgo_import_static __tsan_go_atomic64_compare_exchange
   415  
   416  // start/end of global data (data+bss).
   417  var racedatastart uintptr
   418  var racedataend uintptr
   419  
   420  // start/end of heap for race_amd64.s
   421  var racearenastart uintptr
   422  var racearenaend uintptr
   423  
   424  func racefuncenter(callpc uintptr)
   425  func racefuncenterfp(fp uintptr)
   426  func racefuncexit()
   427  func raceread(addr uintptr)
   428  func racewrite(addr uintptr)
   429  func racereadrange(addr, size uintptr)
   430  func racewriterange(addr, size uintptr)
   431  func racereadrangepc1(addr, size, pc uintptr)
   432  func racewriterangepc1(addr, size, pc uintptr)
   433  func racecallbackthunk(uintptr)
   434  
   435  // racecall allows calling an arbitrary function fn from C race runtime
   436  // with up to 4 uintptr arguments.
   437  func racecall(fn *byte, arg0, arg1, arg2, arg3 uintptr)
   438  
   439  // checks if the address has shadow (i.e. heap or data/bss).
   440  //
   441  //go:nosplit
   442  func isvalidaddr(addr unsafe.Pointer) bool {
   443  	return racearenastart <= uintptr(addr) && uintptr(addr) < racearenaend ||
   444  		racedatastart <= uintptr(addr) && uintptr(addr) < racedataend
   445  }
   446  
   447  //go:nosplit
   448  func raceinit() (gctx, pctx uintptr) {
   449  	lockInit(&raceFiniLock, lockRankRaceFini)
   450  
   451  	// On most machines, cgo is required to initialize libc, which is used by race runtime.
   452  	if !iscgo && GOOS != "darwin" {
   453  		throw("raceinit: race build must use cgo")
   454  	}
   455  
   456  	racecall(&__tsan_init, uintptr(unsafe.Pointer(&gctx)), uintptr(unsafe.Pointer(&pctx)), abi.FuncPCABI0(racecallbackthunk), 0)
   457  
   458  	// Round data segment to page boundaries, because it's used in mmap().
   459  	start := ^uintptr(0)
   460  	end := uintptr(0)
   461  	if start > firstmoduledata.noptrdata {
   462  		start = firstmoduledata.noptrdata
   463  	}
   464  	if start > firstmoduledata.data {
   465  		start = firstmoduledata.data
   466  	}
   467  	if start > firstmoduledata.noptrbss {
   468  		start = firstmoduledata.noptrbss
   469  	}
   470  	if start > firstmoduledata.bss {
   471  		start = firstmoduledata.bss
   472  	}
   473  	if end < firstmoduledata.enoptrdata {
   474  		end = firstmoduledata.enoptrdata
   475  	}
   476  	if end < firstmoduledata.edata {
   477  		end = firstmoduledata.edata
   478  	}
   479  	if end < firstmoduledata.enoptrbss {
   480  		end = firstmoduledata.enoptrbss
   481  	}
   482  	if end < firstmoduledata.ebss {
   483  		end = firstmoduledata.ebss
   484  	}
   485  	size := alignUp(end-start, _PageSize)
   486  	racecall(&__tsan_map_shadow, start, size, 0, 0)
   487  	racedatastart = start
   488  	racedataend = start + size
   489  
   490  	return
   491  }
   492  
   493  //go:nosplit
   494  func racefini() {
   495  	// racefini() can only be called once to avoid races.
   496  	// This eventually (via __tsan_fini) calls C.exit which has
   497  	// undefined behavior if called more than once. If the lock is
   498  	// already held it's assumed that the first caller exits the program
   499  	// so other calls can hang forever without an issue.
   500  	lock(&raceFiniLock)
   501  
   502  	// __tsan_fini will run C atexit functions and C++ destructors,
   503  	// which can theoretically call back into Go.
   504  	// Tell the scheduler we entering external code.
   505  	entersyscall()
   506  
   507  	// We're entering external code that may call ExitProcess on
   508  	// Windows.
   509  	osPreemptExtEnter(getg().m)
   510  
   511  	racecall(&__tsan_fini, 0, 0, 0, 0)
   512  }
   513  
   514  //go:nosplit
   515  func raceproccreate() uintptr {
   516  	var ctx uintptr
   517  	racecall(&__tsan_proc_create, uintptr(unsafe.Pointer(&ctx)), 0, 0, 0)
   518  	return ctx
   519  }
   520  
   521  //go:nosplit
   522  func raceprocdestroy(ctx uintptr) {
   523  	racecall(&__tsan_proc_destroy, ctx, 0, 0, 0)
   524  }
   525  
   526  //go:nosplit
   527  func racemapshadow(addr unsafe.Pointer, size uintptr) {
   528  	if racearenastart == 0 {
   529  		racearenastart = uintptr(addr)
   530  	}
   531  	if racearenaend < uintptr(addr)+size {
   532  		racearenaend = uintptr(addr) + size
   533  	}
   534  	racecall(&__tsan_map_shadow, uintptr(addr), size, 0, 0)
   535  }
   536  
   537  //go:nosplit
   538  func racemalloc(p unsafe.Pointer, sz uintptr) {
   539  	racecall(&__tsan_malloc, 0, 0, uintptr(p), sz)
   540  }
   541  
   542  //go:nosplit
   543  func racefree(p unsafe.Pointer, sz uintptr) {
   544  	racecall(&__tsan_free, uintptr(p), sz, 0, 0)
   545  }
   546  
   547  //go:nosplit
   548  func racegostart(pc uintptr) uintptr {
   549  	gp := getg()
   550  	var spawng *g
   551  	if gp.m.curg != nil {
   552  		spawng = gp.m.curg
   553  	} else {
   554  		spawng = gp
   555  	}
   556  
   557  	var racectx uintptr
   558  	racecall(&__tsan_go_start, spawng.racectx, uintptr(unsafe.Pointer(&racectx)), pc, 0)
   559  	return racectx
   560  }
   561  
   562  //go:nosplit
   563  func racegoend() {
   564  	racecall(&__tsan_go_end, getg().racectx, 0, 0, 0)
   565  }
   566  
   567  //go:nosplit
   568  func racectxend(racectx uintptr) {
   569  	racecall(&__tsan_go_end, racectx, 0, 0, 0)
   570  }
   571  
   572  //go:nosplit
   573  func racewriterangepc(addr unsafe.Pointer, sz, callpc, pc uintptr) {
   574  	gp := getg()
   575  	if gp != gp.m.curg {
   576  		// The call is coming from manual instrumentation of Go code running on g0/gsignal.
   577  		// Not interesting.
   578  		return
   579  	}
   580  	if callpc != 0 {
   581  		racefuncenter(callpc)
   582  	}
   583  	racewriterangepc1(uintptr(addr), sz, pc)
   584  	if callpc != 0 {
   585  		racefuncexit()
   586  	}
   587  }
   588  
   589  //go:nosplit
   590  func racereadrangepc(addr unsafe.Pointer, sz, callpc, pc uintptr) {
   591  	gp := getg()
   592  	if gp != gp.m.curg {
   593  		// The call is coming from manual instrumentation of Go code running on g0/gsignal.
   594  		// Not interesting.
   595  		return
   596  	}
   597  	if callpc != 0 {
   598  		racefuncenter(callpc)
   599  	}
   600  	racereadrangepc1(uintptr(addr), sz, pc)
   601  	if callpc != 0 {
   602  		racefuncexit()
   603  	}
   604  }
   605  
   606  //go:nosplit
   607  func raceacquire(addr unsafe.Pointer) {
   608  	raceacquireg(getg(), addr)
   609  }
   610  
   611  //go:nosplit
   612  func raceacquireg(gp *g, addr unsafe.Pointer) {
   613  	if getg().raceignore != 0 || !isvalidaddr(addr) {
   614  		return
   615  	}
   616  	racecall(&__tsan_acquire, gp.racectx, uintptr(addr), 0, 0)
   617  }
   618  
   619  //go:nosplit
   620  func raceacquirectx(racectx uintptr, addr unsafe.Pointer) {
   621  	if !isvalidaddr(addr) {
   622  		return
   623  	}
   624  	racecall(&__tsan_acquire, racectx, uintptr(addr), 0, 0)
   625  }
   626  
   627  //go:nosplit
   628  func racerelease(addr unsafe.Pointer) {
   629  	racereleaseg(getg(), addr)
   630  }
   631  
   632  //go:nosplit
   633  func racereleaseg(gp *g, addr unsafe.Pointer) {
   634  	if getg().raceignore != 0 || !isvalidaddr(addr) {
   635  		return
   636  	}
   637  	racecall(&__tsan_release, gp.racectx, uintptr(addr), 0, 0)
   638  }
   639  
   640  //go:nosplit
   641  func racereleaseacquire(addr unsafe.Pointer) {
   642  	racereleaseacquireg(getg(), addr)
   643  }
   644  
   645  //go:nosplit
   646  func racereleaseacquireg(gp *g, addr unsafe.Pointer) {
   647  	if getg().raceignore != 0 || !isvalidaddr(addr) {
   648  		return
   649  	}
   650  	racecall(&__tsan_release_acquire, gp.racectx, uintptr(addr), 0, 0)
   651  }
   652  
   653  //go:nosplit
   654  func racereleasemerge(addr unsafe.Pointer) {
   655  	racereleasemergeg(getg(), addr)
   656  }
   657  
   658  //go:nosplit
   659  func racereleasemergeg(gp *g, addr unsafe.Pointer) {
   660  	if getg().raceignore != 0 || !isvalidaddr(addr) {
   661  		return
   662  	}
   663  	racecall(&__tsan_release_merge, gp.racectx, uintptr(addr), 0, 0)
   664  }
   665  
   666  //go:nosplit
   667  func racefingo() {
   668  	racecall(&__tsan_finalizer_goroutine, getg().racectx, 0, 0, 0)
   669  }
   670  
   671  // The declarations below generate ABI wrappers for functions
   672  // implemented in assembly in this package but declared in another
   673  // package.
   674  
   675  //go:linkname abigen_sync_atomic_LoadInt32 sync/atomic.LoadInt32
   676  func abigen_sync_atomic_LoadInt32(addr *int32) (val int32)
   677  
   678  //go:linkname abigen_sync_atomic_LoadInt64 sync/atomic.LoadInt64
   679  func abigen_sync_atomic_LoadInt64(addr *int64) (val int64)
   680  
   681  //go:linkname abigen_sync_atomic_LoadUint32 sync/atomic.LoadUint32
   682  func abigen_sync_atomic_LoadUint32(addr *uint32) (val uint32)
   683  
   684  //go:linkname abigen_sync_atomic_LoadUint64 sync/atomic.LoadUint64
   685  func abigen_sync_atomic_LoadUint64(addr *uint64) (val uint64)
   686  
   687  //go:linkname abigen_sync_atomic_LoadUintptr sync/atomic.LoadUintptr
   688  func abigen_sync_atomic_LoadUintptr(addr *uintptr) (val uintptr)
   689  
   690  //go:linkname abigen_sync_atomic_LoadPointer sync/atomic.LoadPointer
   691  func abigen_sync_atomic_LoadPointer(addr *unsafe.Pointer) (val unsafe.Pointer)
   692  
   693  //go:linkname abigen_sync_atomic_StoreInt32 sync/atomic.StoreInt32
   694  func abigen_sync_atomic_StoreInt32(addr *int32, val int32)
   695  
   696  //go:linkname abigen_sync_atomic_StoreInt64 sync/atomic.StoreInt64
   697  func abigen_sync_atomic_StoreInt64(addr *int64, val int64)
   698  
   699  //go:linkname abigen_sync_atomic_StoreUint32 sync/atomic.StoreUint32
   700  func abigen_sync_atomic_StoreUint32(addr *uint32, val uint32)
   701  
   702  //go:linkname abigen_sync_atomic_StoreUint64 sync/atomic.StoreUint64
   703  func abigen_sync_atomic_StoreUint64(addr *uint64, val uint64)
   704  
   705  //go:linkname abigen_sync_atomic_SwapInt32 sync/atomic.SwapInt32
   706  func abigen_sync_atomic_SwapInt32(addr *int32, new int32) (old int32)
   707  
   708  //go:linkname abigen_sync_atomic_SwapInt64 sync/atomic.SwapInt64
   709  func abigen_sync_atomic_SwapInt64(addr *int64, new int64) (old int64)
   710  
   711  //go:linkname abigen_sync_atomic_SwapUint32 sync/atomic.SwapUint32
   712  func abigen_sync_atomic_SwapUint32(addr *uint32, new uint32) (old uint32)
   713  
   714  //go:linkname abigen_sync_atomic_SwapUint64 sync/atomic.SwapUint64
   715  func abigen_sync_atomic_SwapUint64(addr *uint64, new uint64) (old uint64)
   716  
   717  //go:linkname abigen_sync_atomic_AddInt32 sync/atomic.AddInt32
   718  func abigen_sync_atomic_AddInt32(addr *int32, delta int32) (new int32)
   719  
   720  //go:linkname abigen_sync_atomic_AddUint32 sync/atomic.AddUint32
   721  func abigen_sync_atomic_AddUint32(addr *uint32, delta uint32) (new uint32)
   722  
   723  //go:linkname abigen_sync_atomic_AddInt64 sync/atomic.AddInt64
   724  func abigen_sync_atomic_AddInt64(addr *int64, delta int64) (new int64)
   725  
   726  //go:linkname abigen_sync_atomic_AddUint64 sync/atomic.AddUint64
   727  func abigen_sync_atomic_AddUint64(addr *uint64, delta uint64) (new uint64)
   728  
   729  //go:linkname abigen_sync_atomic_AddUintptr sync/atomic.AddUintptr
   730  func abigen_sync_atomic_AddUintptr(addr *uintptr, delta uintptr) (new uintptr)
   731  
   732  //go:linkname abigen_sync_atomic_AndInt32 sync/atomic.AndInt32
   733  func abigen_sync_atomic_AndInt32(addr *int32, mask int32) (old int32)
   734  
   735  //go:linkname abigen_sync_atomic_AndUint32 sync/atomic.AndUint32
   736  func abigen_sync_atomic_AndUint32(addr *uint32, mask uint32) (old uint32)
   737  
   738  //go:linkname abigen_sync_atomic_AndInt64 sync/atomic.AndInt64
   739  func abigen_sync_atomic_AndInt64(addr *int64, mask int64) (old int64)
   740  
   741  //go:linkname abigen_sync_atomic_AndUint64 sync/atomic.AndUint64
   742  func abigen_sync_atomic_AndUint64(addr *uint64, mask uint64) (old uint64)
   743  
   744  //go:linkname abigen_sync_atomic_AndUintptr sync/atomic.AndUintptr
   745  func abigen_sync_atomic_AndUintptr(addr *uintptr, mask uintptr) (old uintptr)
   746  
   747  //go:linkname abigen_sync_atomic_OrInt32 sync/atomic.OrInt32
   748  func abigen_sync_atomic_OrInt32(addr *int32, mask int32) (old int32)
   749  
   750  //go:linkname abigen_sync_atomic_OrUint32 sync/atomic.OrUint32
   751  func abigen_sync_atomic_OrUint32(addr *uint32, mask uint32) (old uint32)
   752  
   753  //go:linkname abigen_sync_atomic_OrInt64 sync/atomic.OrInt64
   754  func abigen_sync_atomic_OrInt64(addr *int64, mask int64) (old int64)
   755  
   756  //go:linkname abigen_sync_atomic_OrUint64 sync/atomic.OrUint64
   757  func abigen_sync_atomic_OrUint64(addr *uint64, mask uint64) (old uint64)
   758  
   759  //go:linkname abigen_sync_atomic_OrUintptr sync/atomic.OrUintptr
   760  func abigen_sync_atomic_OrUintptr(addr *uintptr, mask uintptr) (old uintptr)
   761  
   762  //go:linkname abigen_sync_atomic_CompareAndSwapInt32 sync/atomic.CompareAndSwapInt32
   763  func abigen_sync_atomic_CompareAndSwapInt32(addr *int32, old, new int32) (swapped bool)
   764  
   765  //go:linkname abigen_sync_atomic_CompareAndSwapInt64 sync/atomic.CompareAndSwapInt64
   766  func abigen_sync_atomic_CompareAndSwapInt64(addr *int64, old, new int64) (swapped bool)
   767  
   768  //go:linkname abigen_sync_atomic_CompareAndSwapUint32 sync/atomic.CompareAndSwapUint32
   769  func abigen_sync_atomic_CompareAndSwapUint32(addr *uint32, old, new uint32) (swapped bool)
   770  
   771  //go:linkname abigen_sync_atomic_CompareAndSwapUint64 sync/atomic.CompareAndSwapUint64
   772  func abigen_sync_atomic_CompareAndSwapUint64(addr *uint64, old, new uint64) (swapped bool)
   773  

View as plain text