Skip to content

Commit

Permalink
runtime: allow futex OSes to use sema-based mutex
Browse files Browse the repository at this point in the history
Implement sema{create,sleep,wakeup} in terms of the futex syscall when
available. Split the lock2/unlock2 implementations out of lock_sema.go
and lock_futex.go (which they shared with runtime.note) to allow
swapping in new implementations of those.

Let futex-based platforms use the semaphore-based mutex implementation.
Control that via the new "spinbitmutex" GOEXPERMENT value, disabled by
default.

This lays the groundwork for a "spinbit" mutex implementation; it does
not include the new mutex implementation.

For #68578.

Change-Id: I091289c85124212a87abec7079ecbd9e610b4270
Reviewed-on: https://go-review.googlesource.com/c/go/+/622996
Reviewed-by: Michael Knyszek <[email protected]>
Reviewed-by: Cherry Mui <[email protected]>
LUCI-TryBot-Result: Go LUCI <[email protected]>
  • Loading branch information
rhysh authored and mknyszek committed Nov 15, 2024
1 parent 252e9de commit 18c2461
Show file tree
Hide file tree
Showing 13 changed files with 347 additions and 251 deletions.
8 changes: 8 additions & 0 deletions src/internal/goexperiment/exp_spinbitmutex_off.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

8 changes: 8 additions & 0 deletions src/internal/goexperiment/exp_spinbitmutex_on.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 4 additions & 0 deletions src/internal/goexperiment/flags.go
Original file line number Diff line number Diff line change
Expand Up @@ -118,4 +118,8 @@ type Flags struct {

// SwissMap enables the SwissTable-based map implementation.
SwissMap bool

// SpinbitMutex enables the new "spinbit" mutex implementation on supported
// platforms. See https://go.dev/issue/68578.
SpinbitMutex bool
}
153 changes: 30 additions & 123 deletions src/runtime/lock_futex.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,136 +11,13 @@ import (
"unsafe"
)

// This implementation depends on OS-specific implementations of
//
// futexsleep(addr *uint32, val uint32, ns int64)
// Atomically,
// if *addr == val { sleep }
// Might be woken up spuriously; that's allowed.
// Don't sleep longer than ns; ns < 0 means forever.
//
// futexwakeup(addr *uint32, cnt uint32)
// If any procs are sleeping on addr, wake up at most cnt.

const (
mutex_unlocked = 0
mutex_locked = 1
mutex_sleeping = 2

active_spin = 4
active_spin_cnt = 30
passive_spin = 1
)

// Possible lock states are mutex_unlocked, mutex_locked and mutex_sleeping.
// mutex_sleeping means that there is presumably at least one sleeping thread.
// Note that there can be spinning threads during all states - they do not
// affect mutex's state.

// We use the uintptr mutex.key and note.key as a uint32.
//
//go:nosplit
func key32(p *uintptr) *uint32 {
return (*uint32)(unsafe.Pointer(p))
}

func mutexContended(l *mutex) bool {
return atomic.Load(key32(&l.key)) > mutex_locked
}

func lock(l *mutex) {
lockWithRank(l, getLockRank(l))
}

func lock2(l *mutex) {
gp := getg()

if gp.m.locks < 0 {
throw("runtime·lock: lock count")
}
gp.m.locks++

// Speculative grab for lock.
v := atomic.Xchg(key32(&l.key), mutex_locked)
if v == mutex_unlocked {
return
}

// wait is either MUTEX_LOCKED or MUTEX_SLEEPING
// depending on whether there is a thread sleeping
// on this mutex. If we ever change l->key from
// MUTEX_SLEEPING to some other value, we must be
// careful to change it back to MUTEX_SLEEPING before
// returning, to ensure that the sleeping thread gets
// its wakeup call.
wait := v

timer := &lockTimer{lock: l}
timer.begin()
// On uniprocessors, no point spinning.
// On multiprocessors, spin for ACTIVE_SPIN attempts.
spin := 0
if ncpu > 1 {
spin = active_spin
}
for {
// Try for lock, spinning.
for i := 0; i < spin; i++ {
for l.key == mutex_unlocked {
if atomic.Cas(key32(&l.key), mutex_unlocked, wait) {
timer.end()
return
}
}
procyield(active_spin_cnt)
}

// Try for lock, rescheduling.
for i := 0; i < passive_spin; i++ {
for l.key == mutex_unlocked {
if atomic.Cas(key32(&l.key), mutex_unlocked, wait) {
timer.end()
return
}
}
osyield()
}

// Sleep.
v = atomic.Xchg(key32(&l.key), mutex_sleeping)
if v == mutex_unlocked {
timer.end()
return
}
wait = mutex_sleeping
futexsleep(key32(&l.key), mutex_sleeping, -1)
}
}

func unlock(l *mutex) {
unlockWithRank(l)
}

func unlock2(l *mutex) {
v := atomic.Xchg(key32(&l.key), mutex_unlocked)
if v == mutex_unlocked {
throw("unlock of unlocked lock")
}
if v == mutex_sleeping {
futexwakeup(key32(&l.key), 1)
}

gp := getg()
gp.m.mLockProfile.recordUnlock(l)
gp.m.locks--
if gp.m.locks < 0 {
throw("runtime·unlock: lock count")
}
if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack
gp.stackguard0 = stackPreempt
}
}

// One-time notifications.
func noteclear(n *note) {
n.key = 0
Expand Down Expand Up @@ -254,3 +131,33 @@ func beforeIdle(int64, int64) (*g, bool) {
}

func checkTimeouts() {}

//go:nosplit
func semacreate(mp *m) {}

//go:nosplit
func semasleep(ns int64) int32 {
mp := getg().m

for v := atomic.Xadd(&mp.waitsema, -1); ; v = atomic.Load(&mp.waitsema) {
if int32(v) >= 0 {
return 0
}
futexsleep(&mp.waitsema, v, ns)
if ns >= 0 {
if int32(v) >= 0 {
return 0
} else {
return -1
}
}
}
}

//go:nosplit
func semawakeup(mp *m) {
v := atomic.Xadd(&mp.waitsema, 1)
if v == 0 {
futexwakeup(&mp.waitsema, 1)
}
}
136 changes: 136 additions & 0 deletions src/runtime/lock_futex_tristate.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,136 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

//go:build (dragonfly || freebsd || linux) && !goexperiment.spinbitmutex

package runtime

import (
"internal/runtime/atomic"
)

// This implementation depends on OS-specific implementations of
//
// futexsleep(addr *uint32, val uint32, ns int64)
// Atomically,
// if *addr == val { sleep }
// Might be woken up spuriously; that's allowed.
// Don't sleep longer than ns; ns < 0 means forever.
//
// futexwakeup(addr *uint32, cnt uint32)
// If any procs are sleeping on addr, wake up at most cnt.

const (
mutex_unlocked = 0
mutex_locked = 1
mutex_sleeping = 2

active_spin = 4
active_spin_cnt = 30
passive_spin = 1
)

// Possible lock states are mutex_unlocked, mutex_locked and mutex_sleeping.
// mutex_sleeping means that there is presumably at least one sleeping thread.
// Note that there can be spinning threads during all states - they do not
// affect mutex's state.

type mWaitList struct{}

func mutexContended(l *mutex) bool {
return atomic.Load(key32(&l.key)) > mutex_locked
}

func lock(l *mutex) {
lockWithRank(l, getLockRank(l))
}

func lock2(l *mutex) {
gp := getg()

if gp.m.locks < 0 {
throw("runtime·lock: lock count")
}
gp.m.locks++

// Speculative grab for lock.
v := atomic.Xchg(key32(&l.key), mutex_locked)
if v == mutex_unlocked {
return
}

// wait is either MUTEX_LOCKED or MUTEX_SLEEPING
// depending on whether there is a thread sleeping
// on this mutex. If we ever change l->key from
// MUTEX_SLEEPING to some other value, we must be
// careful to change it back to MUTEX_SLEEPING before
// returning, to ensure that the sleeping thread gets
// its wakeup call.
wait := v

timer := &lockTimer{lock: l}
timer.begin()
// On uniprocessors, no point spinning.
// On multiprocessors, spin for ACTIVE_SPIN attempts.
spin := 0
if ncpu > 1 {
spin = active_spin
}
for {
// Try for lock, spinning.
for i := 0; i < spin; i++ {
for l.key == mutex_unlocked {
if atomic.Cas(key32(&l.key), mutex_unlocked, wait) {
timer.end()
return
}
}
procyield(active_spin_cnt)
}

// Try for lock, rescheduling.
for i := 0; i < passive_spin; i++ {
for l.key == mutex_unlocked {
if atomic.Cas(key32(&l.key), mutex_unlocked, wait) {
timer.end()
return
}
}
osyield()
}

// Sleep.
v = atomic.Xchg(key32(&l.key), mutex_sleeping)
if v == mutex_unlocked {
timer.end()
return
}
wait = mutex_sleeping
futexsleep(key32(&l.key), mutex_sleeping, -1)
}
}

func unlock(l *mutex) {
unlockWithRank(l)
}

func unlock2(l *mutex) {
v := atomic.Xchg(key32(&l.key), mutex_unlocked)
if v == mutex_unlocked {
throw("unlock of unlocked lock")
}
if v == mutex_sleeping {
futexwakeup(key32(&l.key), 1)
}

gp := getg()
gp.m.mLockProfile.recordUnlock(l)
gp.m.locks--
if gp.m.locks < 0 {
throw("runtime·unlock: lock count")
}
if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack
gp.stackguard0 = stackPreempt
}
}
2 changes: 2 additions & 0 deletions src/runtime/lock_js.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,8 @@ const (
passive_spin = 1
)

type mWaitList struct{}

func mutexContended(l *mutex) bool {
return false
}
Expand Down
Loading

0 comments on commit 18c2461

Please sign in to comment.