@@ -295,7 +295,8 @@ func stkbucket(typ bucketType, size uintptr, stk []uintptr, alloc bool) *bucket
 	i := int(h % buckHashSize)
 	// first check optimistically, without the lock
 	for b := (*bucket)(bh[i].Load()); b != nil; b = b.next {
-		if b.typ == typ && b.hash == h && b.size == size && eqslice(b.stk(), stk) {
+		bstk := b.stk()
+		if b.typ == typ && b.hash == h && b.size == size && bytealg.Equal(*(*[]byte)(unsafe.Pointer(&bstk)), *(*[]byte)(unsafe.Pointer(&stk))) {
 			return b
 		}
 	}
@@ -307,7 +308,8 @@ func stkbucket(typ bucketType, size uintptr, stk []uintptr, alloc bool) *bucket
 	lock(&profInsertLock)
 	// check again under the insertion lock
 	for b := (*bucket)(bh[i].Load()); b != nil; b = b.next {
-		if b.typ == typ && b.hash == h && b.size == size && eqslice(b.stk(), stk) {
+		bstk := b.stk()
+		if b.typ == typ && b.hash == h && b.size == size && bytealg.Equal(*(*[]byte)(unsafe.Pointer(&bstk)), *(*[]byte)(unsafe.Pointer(&stk))) {
 			unlock(&profInsertLock)
 			return b
 		}
@@ -338,10 +340,6 @@ func stkbucket(typ bucketType, size uintptr, stk []uintptr, alloc bool) *bucket
 	return b
 }
 
-func eqslice(x, y []uintptr) bool {
-	return bytealg.Equal(*(*[]byte)(unsafe.Pointer(&x)), *(*[]byte)(unsafe.Pointer(&y)))
-}
-
 // mProf_NextCycle publishes the next heap profile cycle and creates a
 // fresh heap profile cycle. This operation is fast and can be done
 // during STW. The caller must call mProf_Flush before calling