package runtime
import (
)
func ( *maptype, *hmap, uint64) unsafe.Pointer {
if raceenabled && != nil {
:= getcallerpc()
racereadpc(unsafe.Pointer(), , funcPC())
}
if == nil || .count == 0 {
return unsafe.Pointer(&zeroVal[0])
}
if .flags&hashWriting != 0 {
throw("concurrent map read and map write")
}
var *bmap
if .B == 0 {
= (*bmap)(.buckets)
} else {
:= .hasher(noescape(unsafe.Pointer(&)), uintptr(.hash0))
:= bucketMask(.B)
= (*bmap)(add(.buckets, (&)*uintptr(.bucketsize)))
if := .oldbuckets; != nil {
if !.sameSizeGrow() {
>>= 1
}
:= (*bmap)(add(, (&)*uintptr(.bucketsize)))
if !evacuated() {
=
}
}
}
for ; != nil; = .overflow() {
for , := uintptr(0), .keys(); < bucketCnt; , = +1, add(, 8) {
if *(*uint64)() == && !isEmpty(.tophash[]) {
return add(unsafe.Pointer(), dataOffset+bucketCnt*8+*uintptr(.elemsize))
}
}
}
return unsafe.Pointer(&zeroVal[0])
}
func ( *maptype, *hmap, uint64) (unsafe.Pointer, bool) {
if raceenabled && != nil {
:= getcallerpc()
racereadpc(unsafe.Pointer(), , funcPC())
}
if == nil || .count == 0 {
return unsafe.Pointer(&zeroVal[0]), false
}
if .flags&hashWriting != 0 {
throw("concurrent map read and map write")
}
var *bmap
if .B == 0 {
= (*bmap)(.buckets)
} else {
:= .hasher(noescape(unsafe.Pointer(&)), uintptr(.hash0))
:= bucketMask(.B)
= (*bmap)(add(.buckets, (&)*uintptr(.bucketsize)))
if := .oldbuckets; != nil {
if !.sameSizeGrow() {
>>= 1
}
:= (*bmap)(add(, (&)*uintptr(.bucketsize)))
if !evacuated() {
=
}
}
}
for ; != nil; = .overflow() {
for , := uintptr(0), .keys(); < bucketCnt; , = +1, add(, 8) {
if *(*uint64)() == && !isEmpty(.tophash[]) {
return add(unsafe.Pointer(), dataOffset+bucketCnt*8+*uintptr(.elemsize)), true
}
}
}
return unsafe.Pointer(&zeroVal[0]), false
}
func ( *maptype, *hmap, uint64) unsafe.Pointer {
if == nil {
panic(plainError("assignment to entry in nil map"))
}
if raceenabled {
:= getcallerpc()
racewritepc(unsafe.Pointer(), , funcPC())
}
if .flags&hashWriting != 0 {
throw("concurrent map writes")
}
:= .hasher(noescape(unsafe.Pointer(&)), uintptr(.hash0))
.flags ^= hashWriting
if .buckets == nil {
.buckets = newobject(.bucket)
}
:
:= & bucketMask(.B)
if .growing() {
growWork_fast64(, , )
}
:= (*bmap)(add(.buckets, *uintptr(.bucketsize)))
var *bmap
var uintptr
var unsafe.Pointer
:
for {
for := uintptr(0); < bucketCnt; ++ {
if isEmpty(.tophash[]) {
if == nil {
=
=
}
if .tophash[] == emptyRest {
break
}
continue
}
:= *((*uint64)(add(unsafe.Pointer(), dataOffset+*8)))
if != {
continue
}
=
=
goto
}
:= .overflow()
if == nil {
break
}
=
}
if !.growing() && (overLoadFactor(.count+1, .B) || tooManyOverflowBuckets(.noverflow, .B)) {
hashGrow(, )
goto
}
if == nil {
= .newoverflow(, )
= 0
}
.tophash[&(bucketCnt-1)] = tophash()
= add(unsafe.Pointer(), dataOffset+*8)
*(*uint64)() =
.count++
:
:= add(unsafe.Pointer(), dataOffset+bucketCnt*8+*uintptr(.elemsize))
if .flags&hashWriting == 0 {
throw("concurrent map writes")
}
.flags &^= hashWriting
return
}
func ( *maptype, *hmap, unsafe.Pointer) unsafe.Pointer {
if == nil {
panic(plainError("assignment to entry in nil map"))
}
if raceenabled {
:= getcallerpc()
racewritepc(unsafe.Pointer(), , funcPC(mapassign_fast64))
}
if .flags&hashWriting != 0 {
throw("concurrent map writes")
}
:= .hasher(noescape(unsafe.Pointer(&)), uintptr(.hash0))
.flags ^= hashWriting
if .buckets == nil {
.buckets = newobject(.bucket)
}
:
:= & bucketMask(.B)
if .growing() {
growWork_fast64(, , )
}
:= (*bmap)(add(.buckets, *uintptr(.bucketsize)))
var *bmap
var uintptr
var unsafe.Pointer
:
for {
for := uintptr(0); < bucketCnt; ++ {
if isEmpty(.tophash[]) {
if == nil {
=
=
}
if .tophash[] == emptyRest {
break
}
continue
}
:= *((*unsafe.Pointer)(add(unsafe.Pointer(), dataOffset+*8)))
if != {
continue
}
=
=
goto
}
:= .overflow()
if == nil {
break
}
=
}
if !.growing() && (overLoadFactor(.count+1, .B) || tooManyOverflowBuckets(.noverflow, .B)) {
hashGrow(, )
goto
}
if == nil {
= .newoverflow(, )
= 0
}
.tophash[&(bucketCnt-1)] = tophash()
= add(unsafe.Pointer(), dataOffset+*8)
*(*unsafe.Pointer)() =
.count++
:
:= add(unsafe.Pointer(), dataOffset+bucketCnt*8+*uintptr(.elemsize))
if .flags&hashWriting == 0 {
throw("concurrent map writes")
}
.flags &^= hashWriting
return
}
func ( *maptype, *hmap, uint64) {
if raceenabled && != nil {
:= getcallerpc()
racewritepc(unsafe.Pointer(), , funcPC())
}
if == nil || .count == 0 {
return
}
if .flags&hashWriting != 0 {
throw("concurrent map writes")
}
:= .hasher(noescape(unsafe.Pointer(&)), uintptr(.hash0))
.flags ^= hashWriting
:= & bucketMask(.B)
if .growing() {
growWork_fast64(, , )
}
:= (*bmap)(add(.buckets, *uintptr(.bucketsize)))
:=
:
for ; != nil; = .overflow() {
for , := uintptr(0), .keys(); < bucketCnt; , = +1, add(, 8) {
if != *(*uint64)() || isEmpty(.tophash[]) {
continue
}
if .key.ptrdata != 0 {
if sys.PtrSize == 8 {
*(*unsafe.Pointer)() = nil
} else {
memclrHasPointers(, 8)
}
}
:= add(unsafe.Pointer(), dataOffset+bucketCnt*8+*uintptr(.elemsize))
if .elem.ptrdata != 0 {
memclrHasPointers(, .elem.size)
} else {
memclrNoHeapPointers(, .elem.size)
}
.tophash[] = emptyOne
if == bucketCnt-1 {
if .overflow() != nil && .overflow().tophash[0] != emptyRest {
goto
}
} else {
if .tophash[+1] != emptyRest {
goto
}
}
for {
.tophash[] = emptyRest
if == 0 {
if == {
break
}
:=
for = ; .overflow() != ; = .overflow() {
}
= bucketCnt - 1
} else {
--
}
if .tophash[] != emptyOne {
break
}
}
:
.count--
if .count == 0 {
.hash0 = fastrand()
}
break
}
}
if .flags&hashWriting == 0 {
throw("concurrent map writes")
}
.flags &^= hashWriting
}
func ( *maptype, *hmap, uintptr) {
evacuate_fast64(, , &.oldbucketmask())
if .growing() {
evacuate_fast64(, , .nevacuate)
}
}
func ( *maptype, *hmap, uintptr) {
:= (*bmap)(add(.oldbuckets, *uintptr(.bucketsize)))
:= .noldbuckets()
if !evacuated() {
var [2]evacDst
:= &[0]
.b = (*bmap)(add(.buckets, *uintptr(.bucketsize)))
.k = add(unsafe.Pointer(.b), dataOffset)
.e = add(.k, bucketCnt*8)
if !.sameSizeGrow() {
:= &[1]
.b = (*bmap)(add(.buckets, (+)*uintptr(.bucketsize)))
.k = add(unsafe.Pointer(.b), dataOffset)
.e = add(.k, bucketCnt*8)
}
for ; != nil; = .overflow() {
:= add(unsafe.Pointer(), dataOffset)
:= add(, bucketCnt*8)
for := 0; < bucketCnt; , , = +1, add(, 8), add(, uintptr(.elemsize)) {
:= .tophash[]
if isEmpty() {
.tophash[] = evacuatedEmpty
continue
}
if < minTopHash {
throw("bad map state")
}
var uint8
if !.sameSizeGrow() {
:= .hasher(, uintptr(.hash0))
if & != 0 {
= 1
}
}
.tophash[] = evacuatedX +
:= &[]
if .i == bucketCnt {
.b = .newoverflow(, .b)
.i = 0
.k = add(unsafe.Pointer(.b), dataOffset)
.e = add(.k, bucketCnt*8)
}
.b.tophash[.i&(bucketCnt-1)] =
if .key.ptrdata != 0 && writeBarrier.enabled {
if sys.PtrSize == 8 {
*(*unsafe.Pointer)(.k) = *(*unsafe.Pointer)()
} else {
typedmemmove(.key, .k, )
}
} else {
*(*uint64)(.k) = *(*uint64)()
}
typedmemmove(.elem, .e, )
.i++
.k = add(.k, 8)
.e = add(.e, uintptr(.elemsize))
}
}
if .flags&oldIterator == 0 && .bucket.ptrdata != 0 {
:= add(.oldbuckets, *uintptr(.bucketsize))
:= add(, dataOffset)
:= uintptr(.bucketsize) - dataOffset
memclrHasPointers(, )
}
}
if == .nevacuate {
advanceEvacuationMark(, , )
}
}