package runtime
import (
)
const (
bucketCntBits = 3
bucketCnt = 1 << bucketCntBits
loadFactorNum = 13
loadFactorDen = 2
maxKeySize = 128
maxElemSize = 128
dataOffset = unsafe.Offsetof(struct {
b bmap
v int64
}{}.v)
emptyRest = 0
emptyOne = 1
evacuatedX = 2
evacuatedY = 3
evacuatedEmpty = 4
minTopHash = 5
iterator = 1
oldIterator = 2
hashWriting = 4
sameSizeGrow = 8
noCheck = 1<<(8*sys.PtrSize) - 1
)
func ( uint8) bool {
return <= emptyOne
}
type hmap struct {
count int
flags uint8
B uint8
noverflow uint16
hash0 uint32
buckets unsafe.Pointer
oldbuckets unsafe.Pointer
nevacuate uintptr
extra *mapextra
}
type mapextra struct {
overflow *[]*bmap
oldoverflow *[]*bmap
nextOverflow *bmap
}
type bmap struct {
tophash [bucketCnt]uint8
}
type hiter struct {
key unsafe.Pointer
elem unsafe.Pointer
t *maptype
h *hmap
buckets unsafe.Pointer
bptr *bmap
overflow *[]*bmap
oldoverflow *[]*bmap
startBucket uintptr
offset uint8
wrapped bool
B uint8
i uint8
bucket uintptr
checkBucket uintptr
}
func ( uint8) uintptr {
return uintptr(1) << ( & (sys.PtrSize*8 - 1))
}
func ( uint8) uintptr {
return bucketShift() - 1
}
func ( uintptr) uint8 {
:= uint8( >> (sys.PtrSize*8 - 8))
if < minTopHash {
+= minTopHash
}
return
}
func ( *bmap) bool {
:= .tophash[0]
return > emptyOne && < minTopHash
}
func ( *bmap) ( *maptype) *bmap {
return *(**bmap)(add(unsafe.Pointer(), uintptr(.bucketsize)-sys.PtrSize))
}
func ( *bmap) ( *maptype, *bmap) {
*(**bmap)(add(unsafe.Pointer(), uintptr(.bucketsize)-sys.PtrSize)) =
}
func ( *bmap) () unsafe.Pointer {
return add(unsafe.Pointer(), dataOffset)
}
func ( *hmap) () {
if .B < 16 {
.noverflow++
return
}
:= uint32(1)<<(.B-15) - 1
if fastrand()& == 0 {
.noverflow++
}
}
func ( *hmap) ( *maptype, *bmap) *bmap {
var *bmap
if .extra != nil && .extra.nextOverflow != nil {
= .extra.nextOverflow
if .overflow() == nil {
.extra.nextOverflow = (*bmap)(add(unsafe.Pointer(), uintptr(.bucketsize)))
} else {
.setoverflow(, nil)
.extra.nextOverflow = nil
}
} else {
= (*bmap)(newobject(.bucket))
}
.incrnoverflow()
if .bucket.ptrdata == 0 {
.createOverflow()
*.extra.overflow = append(*.extra.overflow, )
}
.setoverflow(, )
return
}
func ( *hmap) () {
if .extra == nil {
.extra = new(mapextra)
}
if .extra.overflow == nil {
.extra.overflow = new([]*bmap)
}
}
func ( *maptype, int64, *hmap) *hmap {
if int64(int()) != {
= 0
}
return makemap(, int(), )
}
func () *hmap {
:= new(hmap)
.hash0 = fastrand()
return
}
func ( *maptype, int, *hmap) *hmap {
, := math.MulUintptr(uintptr(), .bucket.size)
if || > maxAlloc {
= 0
}
if == nil {
= new(hmap)
}
.hash0 = fastrand()
:= uint8(0)
for overLoadFactor(, ) {
++
}
.B =
if .B != 0 {
var *bmap
.buckets, = makeBucketArray(, .B, nil)
if != nil {
.extra = new(mapextra)
.extra.nextOverflow =
}
}
return
}
func ( *maptype, uint8, unsafe.Pointer) ( unsafe.Pointer, *bmap) {
:= bucketShift()
:=
if >= 4 {
+= bucketShift( - 4)
:= .bucket.size *
:= roundupsize()
if != {
= / .bucket.size
}
}
if == nil {
= newarray(.bucket, int())
} else {
=
:= .bucket.size *
if .bucket.ptrdata != 0 {
memclrHasPointers(, )
} else {
memclrNoHeapPointers(, )
}
}
if != {
= (*bmap)(add(, *uintptr(.bucketsize)))
:= (*bmap)(add(, (-1)*uintptr(.bucketsize)))
.setoverflow(, (*bmap)())
}
return ,
}
func ( *maptype, *hmap, unsafe.Pointer) unsafe.Pointer {
if raceenabled && != nil {
:= getcallerpc()
:= funcPC()
racereadpc(unsafe.Pointer(), , )
raceReadObjectPC(.key, , , )
}
if msanenabled && != nil {
msanread(, .key.size)
}
if == nil || .count == 0 {
if .hashMightPanic() {
.hasher(, 0)
}
return unsafe.Pointer(&zeroVal[0])
}
if .flags&hashWriting != 0 {
throw("concurrent map read and map write")
}
:= .hasher(, uintptr(.hash0))
:= bucketMask(.B)
:= (*bmap)(add(.buckets, (&)*uintptr(.bucketsize)))
if := .oldbuckets; != nil {
if !.sameSizeGrow() {
>>= 1
}
:= (*bmap)(add(, (&)*uintptr(.bucketsize)))
if !evacuated() {
=
}
}
:= tophash()
:
for ; != nil; = .overflow() {
for := uintptr(0); < bucketCnt; ++ {
if .tophash[] != {
if .tophash[] == emptyRest {
break
}
continue
}
:= add(unsafe.Pointer(), dataOffset+*uintptr(.keysize))
if .indirectkey() {
= *((*unsafe.Pointer)())
}
if .key.equal(, ) {
:= add(unsafe.Pointer(), dataOffset+bucketCnt*uintptr(.keysize)+*uintptr(.elemsize))
if .indirectelem() {
= *((*unsafe.Pointer)())
}
return
}
}
}
return unsafe.Pointer(&zeroVal[0])
}
func ( *maptype, *hmap, unsafe.Pointer) (unsafe.Pointer, bool) {
if raceenabled && != nil {
:= getcallerpc()
:= funcPC()
racereadpc(unsafe.Pointer(), , )
raceReadObjectPC(.key, , , )
}
if msanenabled && != nil {
msanread(, .key.size)
}
if == nil || .count == 0 {
if .hashMightPanic() {
.hasher(, 0)
}
return unsafe.Pointer(&zeroVal[0]), false
}
if .flags&hashWriting != 0 {
throw("concurrent map read and map write")
}
:= .hasher(, uintptr(.hash0))
:= bucketMask(.B)
:= (*bmap)(unsafe.Pointer(uintptr(.buckets) + (&)*uintptr(.bucketsize)))
if := .oldbuckets; != nil {
if !.sameSizeGrow() {
>>= 1
}
:= (*bmap)(unsafe.Pointer(uintptr() + (&)*uintptr(.bucketsize)))
if !evacuated() {
=
}
}
:= tophash()
:
for ; != nil; = .overflow() {
for := uintptr(0); < bucketCnt; ++ {
if .tophash[] != {
if .tophash[] == emptyRest {
break
}
continue
}
:= add(unsafe.Pointer(), dataOffset+*uintptr(.keysize))
if .indirectkey() {
= *((*unsafe.Pointer)())
}
if .key.equal(, ) {
:= add(unsafe.Pointer(), dataOffset+bucketCnt*uintptr(.keysize)+*uintptr(.elemsize))
if .indirectelem() {
= *((*unsafe.Pointer)())
}
return , true
}
}
}
return unsafe.Pointer(&zeroVal[0]), false
}
func ( *maptype, *hmap, unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer) {
if == nil || .count == 0 {
return nil, nil
}
:= .hasher(, uintptr(.hash0))
:= bucketMask(.B)
:= (*bmap)(unsafe.Pointer(uintptr(.buckets) + (&)*uintptr(.bucketsize)))
if := .oldbuckets; != nil {
if !.sameSizeGrow() {
>>= 1
}
:= (*bmap)(unsafe.Pointer(uintptr() + (&)*uintptr(.bucketsize)))
if !evacuated() {
=
}
}
:= tophash()
:
for ; != nil; = .overflow() {
for := uintptr(0); < bucketCnt; ++ {
if .tophash[] != {
if .tophash[] == emptyRest {
break
}
continue
}
:= add(unsafe.Pointer(), dataOffset+*uintptr(.keysize))
if .indirectkey() {
= *((*unsafe.Pointer)())
}
if .key.equal(, ) {
:= add(unsafe.Pointer(), dataOffset+bucketCnt*uintptr(.keysize)+*uintptr(.elemsize))
if .indirectelem() {
= *((*unsafe.Pointer)())
}
return ,
}
}
}
return nil, nil
}
func ( *maptype, *hmap, , unsafe.Pointer) unsafe.Pointer {
:= mapaccess1(, , )
if == unsafe.Pointer(&zeroVal[0]) {
return
}
return
}
func ( *maptype, *hmap, , unsafe.Pointer) (unsafe.Pointer, bool) {
:= mapaccess1(, , )
if == unsafe.Pointer(&zeroVal[0]) {
return , false
}
return , true
}
func ( *maptype, *hmap, unsafe.Pointer) unsafe.Pointer {
if == nil {
panic(plainError("assignment to entry in nil map"))
}
if raceenabled {
:= getcallerpc()
:= funcPC()
racewritepc(unsafe.Pointer(), , )
raceReadObjectPC(.key, , , )
}
if msanenabled {
msanread(, .key.size)
}
if .flags&hashWriting != 0 {
throw("concurrent map writes")
}
:= .hasher(, uintptr(.hash0))
.flags ^= hashWriting
if .buckets == nil {
.buckets = newobject(.bucket)
}
:
:= & bucketMask(.B)
if .growing() {
growWork(, , )
}
:= (*bmap)(add(.buckets, *uintptr(.bucketsize)))
:= tophash()
var *uint8
var unsafe.Pointer
var unsafe.Pointer
:
for {
for := uintptr(0); < bucketCnt; ++ {
if .tophash[] != {
if isEmpty(.tophash[]) && == nil {
= &.tophash[]
= add(unsafe.Pointer(), dataOffset+*uintptr(.keysize))
= add(unsafe.Pointer(), dataOffset+bucketCnt*uintptr(.keysize)+*uintptr(.elemsize))
}
if .tophash[] == emptyRest {
break
}
continue
}
:= add(unsafe.Pointer(), dataOffset+*uintptr(.keysize))
if .indirectkey() {
= *((*unsafe.Pointer)())
}
if !.key.equal(, ) {
continue
}
if .needkeyupdate() {
typedmemmove(.key, , )
}
= add(unsafe.Pointer(), dataOffset+bucketCnt*uintptr(.keysize)+*uintptr(.elemsize))
goto
}
:= .overflow()
if == nil {
break
}
=
}
if !.growing() && (overLoadFactor(.count+1, .B) || tooManyOverflowBuckets(.noverflow, .B)) {
hashGrow(, )
goto
}
if == nil {
:= .newoverflow(, )
= &.tophash[0]
= add(unsafe.Pointer(), dataOffset)
= add(, bucketCnt*uintptr(.keysize))
}
if .indirectkey() {
:= newobject(.key)
*(*unsafe.Pointer)() =
=
}
if .indirectelem() {
:= newobject(.elem)
*(*unsafe.Pointer)() =
}
typedmemmove(.key, , )
* =
.count++
:
if .flags&hashWriting == 0 {
throw("concurrent map writes")
}
.flags &^= hashWriting
if .indirectelem() {
= *((*unsafe.Pointer)())
}
return
}
func ( *maptype, *hmap, unsafe.Pointer) {
if raceenabled && != nil {
:= getcallerpc()
:= funcPC()
racewritepc(unsafe.Pointer(), , )
raceReadObjectPC(.key, , , )
}
if msanenabled && != nil {
msanread(, .key.size)
}
if == nil || .count == 0 {
if .hashMightPanic() {
.hasher(, 0)
}
return
}
if .flags&hashWriting != 0 {
throw("concurrent map writes")
}
:= .hasher(, uintptr(.hash0))
.flags ^= hashWriting
:= & bucketMask(.B)
if .growing() {
growWork(, , )
}
:= (*bmap)(add(.buckets, *uintptr(.bucketsize)))
:=
:= tophash()
:
for ; != nil; = .overflow() {
for := uintptr(0); < bucketCnt; ++ {
if .tophash[] != {
if .tophash[] == emptyRest {
break
}
continue
}
:= add(unsafe.Pointer(), dataOffset+*uintptr(.keysize))
:=
if .indirectkey() {
= *((*unsafe.Pointer)())
}
if !.key.equal(, ) {
continue
}
if .indirectkey() {
*(*unsafe.Pointer)() = nil
} else if .key.ptrdata != 0 {
memclrHasPointers(, .key.size)
}
:= add(unsafe.Pointer(), dataOffset+bucketCnt*uintptr(.keysize)+*uintptr(.elemsize))
if .indirectelem() {
*(*unsafe.Pointer)() = nil
} else if .elem.ptrdata != 0 {
memclrHasPointers(, .elem.size)
} else {
memclrNoHeapPointers(, .elem.size)
}
.tophash[] = emptyOne
if == bucketCnt-1 {
if .overflow() != nil && .overflow().tophash[0] != emptyRest {
goto
}
} else {
if .tophash[+1] != emptyRest {
goto
}
}
for {
.tophash[] = emptyRest
if == 0 {
if == {
break
}
:=
for = ; .overflow() != ; = .overflow() {
}
= bucketCnt - 1
} else {
--
}
if .tophash[] != emptyOne {
break
}
}
:
.count--
if .count == 0 {
.hash0 = fastrand()
}
break
}
}
if .flags&hashWriting == 0 {
throw("concurrent map writes")
}
.flags &^= hashWriting
}
func ( *maptype, *hmap, *hiter) {
if raceenabled && != nil {
:= getcallerpc()
racereadpc(unsafe.Pointer(), , funcPC())
}
if == nil || .count == 0 {
return
}
if unsafe.Sizeof(hiter{})/sys.PtrSize != 12 {
throw("hash_iter size incorrect")
}
.t =
.h =
.B = .B
.buckets = .buckets
if .bucket.ptrdata == 0 {
.createOverflow()
.overflow = .extra.overflow
.oldoverflow = .extra.oldoverflow
}
:= uintptr(fastrand())
if .B > 31-bucketCntBits {
+= uintptr(fastrand()) << 31
}
.startBucket = & bucketMask(.B)
.offset = uint8( >> .B & (bucketCnt - 1))
.bucket = .startBucket
if := .flags; &(iterator|oldIterator) != iterator|oldIterator {
atomic.Or8(&.flags, iterator|oldIterator)
}
mapiternext()
}
func ( *hiter) {
:= .h
if raceenabled {
:= getcallerpc()
racereadpc(unsafe.Pointer(), , funcPC())
}
if .flags&hashWriting != 0 {
throw("concurrent map iteration and map write")
}
:= .t
:= .bucket
:= .bptr
:= .i
:= .checkBucket
:
if == nil {
if == .startBucket && .wrapped {
.key = nil
.elem = nil
return
}
if .growing() && .B == .B {
:= & .h.oldbucketmask()
= (*bmap)(add(.oldbuckets, *uintptr(.bucketsize)))
if !evacuated() {
=
} else {
= (*bmap)(add(.buckets, *uintptr(.bucketsize)))
= noCheck
}
} else {
= (*bmap)(add(.buckets, *uintptr(.bucketsize)))
= noCheck
}
++
if == bucketShift(.B) {
= 0
.wrapped = true
}
= 0
}
for ; < bucketCnt; ++ {
:= ( + .offset) & (bucketCnt - 1)
if isEmpty(.tophash[]) || .tophash[] == evacuatedEmpty {
continue
}
:= add(unsafe.Pointer(), dataOffset+uintptr()*uintptr(.keysize))
if .indirectkey() {
= *((*unsafe.Pointer)())
}
:= add(unsafe.Pointer(), dataOffset+bucketCnt*uintptr(.keysize)+uintptr()*uintptr(.elemsize))
if != noCheck && !.sameSizeGrow() {
if .reflexivekey() || .key.equal(, ) {
:= .hasher(, uintptr(.hash0))
if &bucketMask(.B) != {
continue
}
} else {
if >>(.B-1) != uintptr(.tophash[]&1) {
continue
}
}
}
if (.tophash[] != evacuatedX && .tophash[] != evacuatedY) ||
!(.reflexivekey() || .key.equal(, )) {
.key =
if .indirectelem() {
= *((*unsafe.Pointer)())
}
.elem =
} else {
, := mapaccessK(, , )
if == nil {
continue
}
.key =
.elem =
}
.bucket =
if .bptr != {
.bptr =
}
.i = + 1
.checkBucket =
return
}
= .overflow()
= 0
goto
}
func ( *maptype, *hmap) {
if raceenabled && != nil {
:= getcallerpc()
:= funcPC()
racewritepc(unsafe.Pointer(), , )
}
if == nil || .count == 0 {
return
}
if .flags&hashWriting != 0 {
throw("concurrent map writes")
}
.flags ^= hashWriting
.flags &^= sameSizeGrow
.oldbuckets = nil
.nevacuate = 0
.noverflow = 0
.count = 0
.hash0 = fastrand()
if .extra != nil {
*.extra = mapextra{}
}
, := makeBucketArray(, .B, .buckets)
if != nil {
.extra.nextOverflow =
}
if .flags&hashWriting == 0 {
throw("concurrent map writes")
}
.flags &^= hashWriting
}
func ( *maptype, *hmap) {
:= uint8(1)
if !overLoadFactor(.count+1, .B) {
= 0
.flags |= sameSizeGrow
}
:= .buckets
, := makeBucketArray(, .B+, nil)
:= .flags &^ (iterator | oldIterator)
if .flags&iterator != 0 {
|= oldIterator
}
.B +=
.flags =
.oldbuckets =
.buckets =
.nevacuate = 0
.noverflow = 0
if .extra != nil && .extra.overflow != nil {
if .extra.oldoverflow != nil {
throw("oldoverflow is not nil")
}
.extra.oldoverflow = .extra.overflow
.extra.overflow = nil
}
if != nil {
if .extra == nil {
.extra = new(mapextra)
}
.extra.nextOverflow =
}
}
func ( int, uint8) bool {
return > bucketCnt && uintptr() > loadFactorNum*(bucketShift()/loadFactorDen)
}
func ( uint16, uint8) bool {
if > 15 {
= 15
}
return >= uint16(1)<<(&15)
}
func ( *hmap) () bool {
return .oldbuckets != nil
}
func ( *hmap) () bool {
return .flags&sameSizeGrow != 0
}
func ( *hmap) () uintptr {
:= .B
if !.sameSizeGrow() {
--
}
return bucketShift()
}
func ( *hmap) () uintptr {
return .noldbuckets() - 1
}
func ( *maptype, *hmap, uintptr) {
evacuate(, , &.oldbucketmask())
if .growing() {
evacuate(, , .nevacuate)
}
}
func ( *maptype, *hmap, uintptr) bool {
:= (*bmap)(add(.oldbuckets, *uintptr(.bucketsize)))
return evacuated()
}
type evacDst struct {
b *bmap
i int
k unsafe.Pointer
e unsafe.Pointer
}
func ( *maptype, *hmap, uintptr) {
:= (*bmap)(add(.oldbuckets, *uintptr(.bucketsize)))
:= .noldbuckets()
if !evacuated() {
var [2]evacDst
:= &[0]
.b = (*bmap)(add(.buckets, *uintptr(.bucketsize)))
.k = add(unsafe.Pointer(.b), dataOffset)
.e = add(.k, bucketCnt*uintptr(.keysize))
if !.sameSizeGrow() {
:= &[1]
.b = (*bmap)(add(.buckets, (+)*uintptr(.bucketsize)))
.k = add(unsafe.Pointer(.b), dataOffset)
.e = add(.k, bucketCnt*uintptr(.keysize))
}
for ; != nil; = .overflow() {
:= add(unsafe.Pointer(), dataOffset)
:= add(, bucketCnt*uintptr(.keysize))
for := 0; < bucketCnt; , , = +1, add(, uintptr(.keysize)), add(, uintptr(.elemsize)) {
:= .tophash[]
if isEmpty() {
.tophash[] = evacuatedEmpty
continue
}
if < minTopHash {
throw("bad map state")
}
:=
if .indirectkey() {
= *((*unsafe.Pointer)())
}
var uint8
if !.sameSizeGrow() {
:= .hasher(, uintptr(.hash0))
if .flags&iterator != 0 && !.reflexivekey() && !.key.equal(, ) {
= & 1
= tophash()
} else {
if & != 0 {
= 1
}
}
}
if evacuatedX+1 != evacuatedY || evacuatedX^1 != evacuatedY {
throw("bad evacuatedN")
}
.tophash[] = evacuatedX +
:= &[]
if .i == bucketCnt {
.b = .newoverflow(, .b)
.i = 0
.k = add(unsafe.Pointer(.b), dataOffset)
.e = add(.k, bucketCnt*uintptr(.keysize))
}
.b.tophash[.i&(bucketCnt-1)] =
if .indirectkey() {
*(*unsafe.Pointer)(.k) =
} else {
typedmemmove(.key, .k, )
}
if .indirectelem() {
*(*unsafe.Pointer)(.e) = *(*unsafe.Pointer)()
} else {
typedmemmove(.elem, .e, )
}
.i++
.k = add(.k, uintptr(.keysize))
.e = add(.e, uintptr(.elemsize))
}
}
if .flags&oldIterator == 0 && .bucket.ptrdata != 0 {
:= add(.oldbuckets, *uintptr(.bucketsize))
:= add(, dataOffset)
:= uintptr(.bucketsize) - dataOffset
memclrHasPointers(, )
}
}
if == .nevacuate {
advanceEvacuationMark(, , )
}
}
func ( *hmap, *maptype, uintptr) {
.nevacuate++
:= .nevacuate + 1024
if > {
=
}
for .nevacuate != && bucketEvacuated(, , .nevacuate) {
.nevacuate++
}
if .nevacuate == {
.oldbuckets = nil
if .extra != nil {
.extra.oldoverflow = nil
}
.flags &^= sameSizeGrow
}
}
func ( *maptype, int) *hmap {
if .key.equal == nil {
throw("runtime.reflect_makemap: unsupported map key type")
}
if .key.size > maxKeySize && (!.indirectkey() || .keysize != uint8(sys.PtrSize)) ||
.key.size <= maxKeySize && (.indirectkey() || .keysize != uint8(.key.size)) {
throw("key size wrong")
}
if .elem.size > maxElemSize && (!.indirectelem() || .elemsize != uint8(sys.PtrSize)) ||
.elem.size <= maxElemSize && (.indirectelem() || .elemsize != uint8(.elem.size)) {
throw("elem size wrong")
}
if .key.align > bucketCnt {
throw("key align too big")
}
if .elem.align > bucketCnt {
throw("elem align too big")
}
if .key.size%uintptr(.key.align) != 0 {
throw("key size not a multiple of key align")
}
if .elem.size%uintptr(.elem.align) != 0 {
throw("elem size not a multiple of elem align")
}
if bucketCnt < 8 {
throw("bucketsize too small for proper alignment")
}
if dataOffset%uintptr(.key.align) != 0 {
throw("need padding in bucket (key)")
}
if dataOffset%uintptr(.elem.align) != 0 {
throw("need padding in bucket (elem)")
}
return makemap(, , nil)
}
func ( *maptype, *hmap, unsafe.Pointer) unsafe.Pointer {
, := mapaccess2(, , )
if ! {
= nil
}
return
}
func ( *maptype, *hmap, unsafe.Pointer, unsafe.Pointer) {
:= mapassign(, , )
typedmemmove(.elem, , )
}
func ( *maptype, *hmap, unsafe.Pointer) {
mapdelete(, , )
}
func ( *maptype, *hmap) *hiter {
:= new(hiter)
mapiterinit(, , )
return
}
func ( *hiter) {
mapiternext()
}
func ( *hiter) unsafe.Pointer {
return .key
}
func ( *hiter) unsafe.Pointer {
return .elem
}
func ( *hmap) int {
if == nil {
return 0
}
if raceenabled {
:= getcallerpc()
racereadpc(unsafe.Pointer(), , funcPC())
}
return .count
}
func ( *hmap) int {
if == nil {
return 0
}
if raceenabled {
:= getcallerpc()
racereadpc(unsafe.Pointer(), , funcPC(reflect_maplen))
}
return .count
}
const maxZero = 1024
var zeroVal [maxZero]byte