diff --git a/offheap/internal/pointerstore/free_list.go b/offheap/internal/pointerstore/free_list.go new file mode 100644 index 0000000..e6d8555 --- /dev/null +++ b/offheap/internal/pointerstore/free_list.go @@ -0,0 +1,24 @@ +// Copyright 2025 Francis Michael Stephens. All rights reserved. Use of this +// source code is governed by an MIT license that can be found in the LICENSE +// file. + +package pointerstore + +type freeStack struct { + free []RefPointer +} + +func (s *freeStack) push(r RefPointer) { + s.free = append(s.free, r) +} + +func (s *freeStack) pop() (r RefPointer, ok bool) { + l := len(s.free) + if l == 0 { + return RefPointer{}, false + } + + r = s.free[l-1] + s.free = s.free[:l-1] + return r, true +} diff --git a/offheap/internal/pointerstore/free_list_test.go b/offheap/internal/pointerstore/free_list_test.go new file mode 100644 index 0000000..ffd35af --- /dev/null +++ b/offheap/internal/pointerstore/free_list_test.go @@ -0,0 +1,95 @@ +// Copyright 2025 Francis Michael Stephens. All rights reserved. Use of this +// source code is governed by an MIT license that can be found in the LICENSE +// file. + +package pointerstore + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestFreeStack_Empty(t *testing.T) { + s := freeStack{} + r, ok := s.pop() + assert.True(t, r.IsNil()) + assert.False(t, ok) +} + +// Push 3 references onto the stack. Pop them off in reverse order +func TestFreeStack_PushPop(t *testing.T) { + allocConfig := NewAllocConfigBySize(8, 32*8) + objects, metadatas := MmapSlab(allocConfig) + + s := freeStack{} + + r1Push := NewReference(objects[0], metadatas[0]) + r2Push := NewReference(objects[1], metadatas[1]) + r3Push := NewReference(objects[2], metadatas[2]) + + s.push(r1Push) + s.push(r2Push) + s.push(r3Push) + + r3Pop, ok3 := s.pop() + assert.Equal(t, r3Push, r3Pop) + assert.True(t, ok3) + + r2Pop, ok2 := s.pop() + assert.Equal(t, r2Push, r2Pop) + assert.True(t, ok2) + + r1Pop, ok1 := s.pop() + assert.Equal(t, r1Push, r1Pop) + assert.True(t, ok1) + + r0Pop, ok0 := s.pop() + assert.True(t, r0Pop.IsNil()) + assert.False(t, ok0) +} + +// Push three references off, pop two, push two more. Pop remaining references. +func TestFreeStack_PushPopComplex(t *testing.T) { + allocConfig := NewAllocConfigBySize(8, 32*8) + objects, metadatas := MmapSlab(allocConfig) + + s := freeStack{} + + r1Push := NewReference(objects[0], metadatas[0]) + r2Push := NewReference(objects[1], metadatas[1]) + r3Push := NewReference(objects[2], metadatas[2]) + r4Push := NewReference(objects[3], metadatas[3]) + r5Push := NewReference(objects[4], metadatas[4]) + + s.push(r1Push) + s.push(r2Push) + s.push(r3Push) + + r3Pop, ok3 := s.pop() + assert.Equal(t, r3Push, r3Pop) + assert.True(t, ok3) + + r2Pop, ok2 := s.pop() + assert.Equal(t, r2Push, r2Pop) + assert.True(t, ok2) + + s.push(r4Push) + s.push(r5Push) + + r5Pop, ok5 := s.pop() + assert.Equal(t, r5Push, r5Pop) + assert.True(t, ok5) + + r4Pop, ok4 := s.pop() + assert.Equal(t, r4Push, r4Pop) + assert.True(t, ok4) + + r1Pop, ok1 := s.pop() + assert.Equal(t, r1Push, r1Pop) + assert.True(t, ok1) + + r0Pop, ok0 := s.pop() + assert.True(t, r0Pop.IsNil()) + assert.False(t, ok0) +} diff --git a/offheap/internal/pointerstore/metadata.go b/offheap/internal/pointerstore/metadata.go new file mode 100644 index 0000000..ba88e27 --- /dev/null +++ b/offheap/internal/pointerstore/metadata.go @@ -0,0 +1,43 @@ +// Copyright 2025 Francis Michael Stephens. All rights reserved. Use of this +// source code is governed by an MIT license that can be found in the LICENSE +// file. + +package pointerstore + +// A RefPointer smuggles a generation tag. Only references with the same gen +// value can access/free objects they point to. This is a best-effort safety +// check to try to catch use-after-free type errors. +// +// Metadata smuggles a generation tag and an is-free tag into its +// taggedAddress. When accessing the data from a RefPointer we check that the +// metadata indicates the object is not free and that the RefPointer's +// generation matches that of the metadat. +type metadata struct { + dataAddressAndGen taggedAddress +} + +//gcassert:noescape +func (m *metadata) gen() uint8 { + return m.dataAddressAndGen.gen() +} + +//gcassert:noescape +func (m *metadata) incGen() uint8 { + m.dataAddressAndGen = m.dataAddressAndGen.withIncGen() + return m.dataAddressAndGen.gen() +} + +//gcassert:noescape +func (m *metadata) isFree() bool { + return m.dataAddressAndGen.isFree() +} + +//gcassert:noescape +func (m *metadata) setFree() { + m.dataAddressAndGen = m.dataAddressAndGen.withFree() +} + +//gcassert:noescape +func (m *metadata) setNotFree() { + m.dataAddressAndGen = m.dataAddressAndGen.withNotFree() +} diff --git a/offheap/internal/pointerstore/pointer_reference.go b/offheap/internal/pointerstore/pointer_reference.go index 8fa0fbd..5e2003c 100644 --- a/offheap/internal/pointerstore/pointer_reference.go +++ b/offheap/internal/pointerstore/pointer_reference.go @@ -9,171 +9,128 @@ import ( "unsafe" ) -const maskShift = 56 // This leaves 8 bits for the generation data -const genMask = uint64(0xFF << maskShift) -const pointerMask = ^genMask - // The address field holds a pointer to an object, but also sneaks a generation -// value in the top 8 bits of the metaAddress field. +// value in the top 8 bits of the dataAddress field. // // The generation must be masked out to get a usable pointer value. The object // pointed to must have the same generation value in order to access/free that // object. -// -// Because the Refpointer struct is two words (on 64 bit systems) in size, all -// method receivers are pointers to avoid copying two words in method calls. -// However, the RefPointer is _always_ used as a value. This means that having -// a pointer receiver could potentially cause the RefPointer to be allocated if -// its receiver escapes to the heap (according to escape analysis). We assert -// that all methods do not allow their receiver variable to escape to the heap. -// -// This is all very nice and good, but the idea that avoiding copying two words -// is a meaningful improvement are speculative and haven't been tested. This -// would be a good target for future performance testing. type RefPointer struct { - dataAddress uint64 - metaAddress uint64 -} - -// If the object's metadata has a non-nil nextFree pointer then the object is -// currently free. Object's which have never been allocated are implicitly -// free, but have a nil nextFree. -// -// An object's metadata has a gen field. Only references with the same gen -// value can access/free objects they point to. This is a best-effort safety -// check to try to catch use-after-free type errors. -type metadata struct { - nextFree RefPointer - gen uint8 + address taggedAddress } -func NewReference(pAddress, pMetadata uintptr) RefPointer { - if pAddress == (uintptr)(unsafe.Pointer(nil)) { - panic("cannot create new Reference with nil pointer") +func NewReference(dataAddress, metaAddress uintptr) RefPointer { + if dataAddress == nilPtr { + panic("cannot create new Reference with nil data pointer") } - address := uint64(pAddress) - // This sets the generation to 0 by clearing the smuggled bits - maskedAddress := address & pointerMask - - // Setting the generation 0 shouldn't actually change the address - // If there were any 1s in the top part of the address our generation - // smuggling system will break this pointer. This is an unrecoverable error. - if address != maskedAddress { - panic(fmt.Errorf("the raw pointer (%d) uses more than %d bits", address, maskShift)) + if metaAddress == nilPtr { + panic("cannot create new Reference with nil metadata pointer") } - // NB: The gen on a brand new Reference is always 0 - // So we don't set it - return RefPointer{ - dataAddress: maskedAddress, - metaAddress: uint64(pMetadata), + r := RefPointer{ + address: newTaggedAddress(metaAddress), } -} -//gcassert:noescape -func (r *RefPointer) AllocFromFree() (nextFree RefPointer) { - // Grab the nextFree reference, and nil it for this metadata + // Set the dataAddressAndGen in this reference's metadata. meta := r.metadata() - nextFree = meta.nextFree - meta.nextFree = RefPointer{} + meta.dataAddressAndGen = newTaggedAddress(dataAddress) + // The value defaults to false, but we write it here for readability + meta.setNotFree() - // If the nextFree pointer points back to this Reference, then there - // are no more freed slots available - if nextFree == *r { - nextFree = RefPointer{} - } + return r +} + +func (r RefPointer) free() { + // Check that this reference can access the allocation to free it + r.accessibleActiveAddress() - // Increment the generation for the object and set that generation in - // the Reference - meta.gen++ - r.setGen(meta.gen) + meta := r.metadata() - return nextFree + // Mark the object as free + meta.setFree() + + // Increment the generation for the allocation's metadata + meta.incGen() } -//gcassert:noescape -func (r *RefPointer) Free(oldFree RefPointer) { +// Sets the RefPointer's metadata to not-free and creates a new RefPointer with +// the correct generation tag. +func (r RefPointer) allocFromFree() RefPointer { meta := r.metadata() - if !meta.nextFree.IsNil() { - // NB: The odd-looking *r here actually prevents an allocation. - // Fuller explanation found in DataPtr() - panic(fmt.Errorf("attempted to Free freed allocation %v", *r)) + if !meta.isFree() { + panic(fmt.Errorf("attempt to alloc-from-free active allocation %v", r)) } - if meta.gen != r.Gen() { - panic(fmt.Errorf("attempt to free allocation (%d) using stale reference (%d)", meta.gen, r.Gen())) - } + // This object is now allocated and is no long free + meta.setNotFree() - if oldFree.IsNil() { - meta.nextFree = *r - } else { - meta.nextFree = oldFree - } -} + // Get the metadata generation tag + gen := meta.gen() -//gcassert:noescape -func (r *RefPointer) IsNil() bool { - return r.metadataPtr() == 0 + // Create new RefPointer with correct generation tag + return r.withGen(gen) } -//gcassert:noescape -func (r *RefPointer) DataPtr() uintptr { +// This method re-allocates the memory location. When this method returns r +// will no longer be a valid reference. The reference returned _will_ be a +// valid reference to the same location. +func (r RefPointer) Realloc() RefPointer { + // Test that this reference is actually allowed to access the allocation + r.accessibleActiveAddress() + meta := r.metadata() - if !meta.nextFree.IsNil() { - // NB: We make a copy of r here - otherwise the compiler - // believes that r itself escapes to the heap (not strictly - // wrong) and will allocate it to the heap, even if this path - // is not taken. This panic path _does_ allocate due to the fmt - // call, but if we don't take a copy of r in the fmt call, then - // every call will allocate regardless of whether the method - // panics or not - panic(fmt.Errorf("attempted to get freed allocation %v", *r)) - } + // Get and increment the metadata generation tag + gen := meta.incGen() - if meta.gen != r.Gen() { - panic(fmt.Errorf("attempt to get value (%d) using stale reference (%d)", meta.gen, r.Gen())) - } - return (uintptr)(r.dataAddress & pointerMask) + // Set the new generation tag in the reference + return r.withGen(gen) +} + +func (r RefPointer) DataPtr() uintptr { + return r.accessibleActiveAddress().pointer() } // Convenient method to retrieve raw data of an allocation -// -//gcassert:noescape -func (r *RefPointer) Bytes(size int) []byte { - ptr := r.DataPtr() - return pointerToBytes(ptr, size) +func (r RefPointer) Bytes(size int) []byte { + return r.accessibleActiveAddress().bytes(size) } -//gcassert:noescape -func (r *RefPointer) metadataPtr() uintptr { - return (uintptr)(r.metaAddress & pointerMask) +func (r RefPointer) IsNil() bool { + return r.address.isNil() } -//gcassert:noescape -func (r *RefPointer) metadata() *metadata { - return (*metadata)(unsafe.Pointer(r.metadataPtr())) +// Calling this method +// +// 1: looks up the metadata for this reference +// 2: Verifies that the reference is _allowed_ to access this data +// 3: Returns the taggedAddress pointing to the actual data +func (r RefPointer) accessibleActiveAddress() taggedAddress { + meta := r.metadata() + + if meta.isFree() { + panic(fmt.Errorf("attempt to access freed allocation %v", r)) + } + + if meta.gen() != r.Gen() { + panic(fmt.Errorf("generation mismatch between metadata (%d) and reference (%d)", meta.gen(), r.Gen())) + } + + return meta.dataAddressAndGen } -//gcassert:noescape -func (r *RefPointer) Gen() uint8 { - return (uint8)((r.metaAddress & genMask) >> maskShift) +func (r RefPointer) metadata() *metadata { + return (*metadata)(unsafe.Pointer(r.address.pointer())) } -//gcassert:noescape -func (r *RefPointer) setGen(gen uint8) { - r.metaAddress = (r.metaAddress & pointerMask) | (uint64(gen) << maskShift) +func (r RefPointer) Gen() uint8 { + return r.address.gen() } -// This method re-allocates the memory location. When this method returns r -// will no longer be a valid reference. The reference returned _will_ be a -// valid reference to the same location. -func (r *RefPointer) Realloc() RefPointer { - newRef := *r - meta := r.metadata() - meta.gen++ - newRef.setGen(meta.gen) - return newRef +func (r RefPointer) withGen(gen uint8) RefPointer { + return RefPointer{ + address: r.address.withGen(gen), + } } diff --git a/offheap/internal/pointerstore/pointer_reference_test.go b/offheap/internal/pointerstore/pointer_reference_test.go index 96e859a..6401c0a 100644 --- a/offheap/internal/pointerstore/pointer_reference_test.go +++ b/offheap/internal/pointerstore/pointer_reference_test.go @@ -6,6 +6,7 @@ package pointerstore import ( "testing" + "unsafe" "github.com/stretchr/testify/assert" ) @@ -16,13 +17,33 @@ func TestIsNil(t *testing.T) { assert.True(t, r.IsNil()) } -// Calling newReference() with nil will panic +// Calling newReference() with any nil uintptr parameter will panic func TestNewReferenceWithNilPanics(t *testing.T) { - assert.Panics(t, func() { NewReference(0, 0) }) + nilPtr := uintptr(unsafe.Pointer(nil)) + + allocConfig := NewAllocConfigBySize(8, 8) + objects, metadata := MmapSlab(allocConfig) + + assert.Panics(t, func() { NewReference(objects[0], nilPtr) }) + assert.Panics(t, func() { NewReference(nilPtr, metadata[0]) }) + assert.Panics(t, func() { NewReference(nilPtr, nilPtr) }) +} + +// Calling newReference() with any uintptr parameter with generation tag bits set will panic +func TestNewReferenceWithGenerationBitsSetPanics(t *testing.T) { + allocConfig := NewAllocConfigBySize(8, 8) + objects, metadata := MmapSlab(allocConfig) + + badObject := objects[0] | (1 << maskShift) + badMeta := metadata[0] | (1 << maskShift) + + assert.Panics(t, func() { NewReference(objects[0], badMeta) }) + assert.Panics(t, func() { NewReference(badObject, metadata[0]) }) + assert.Panics(t, func() { NewReference(badObject, badMeta) }) } // Demonstrate that a pointer with any non-0 field is not nil -func TestIsNotNil(t *testing.T) { +func TestNewReference(t *testing.T) { allocConfig := NewAllocConfigBySize(8, 32*8) objects, metadata := MmapSlab(allocConfig) for i := range objects { @@ -31,10 +52,15 @@ func TestIsNotNil(t *testing.T) { assert.False(t, r.IsNil()) // Data pointer points to the correct location assert.Equal(t, objects[i], r.DataPtr()) + // Bytes points to data at the correct location + assert.Equal(t, objects[i], uintptr(unsafe.Pointer(&r.Bytes(8)[0]))) // Metadata pointer points to the correct location - assert.Equal(t, metadata[i], r.metadataPtr()) + assert.Equal(t, metadata[i], r.address.pointer()) // Generation of a new Reference is always 0 assert.Equal(t, uint8(0), r.Gen()) + // The data should be accessible through this reference + assert.NotPanics(t, func() { r.DataPtr() }) + assert.NotPanics(t, func() { r.Bytes(8) }) } } @@ -58,16 +84,90 @@ func TestGenerationDoesNotAppearInOtherFields(t *testing.T) { r := NewReference(objects[0], metadatas[0]) dataPtr := r.DataPtr() - metaPtr := r.metadataPtr() metadata := r.metadata() - gen := uint8(255) - metadata.gen = gen - r.setGen(gen) + for i := 1; i <= maxGen; i++ { + gen := metadata.incGen() + r = r.withGen(gen) + + assert.Equal(t, dataPtr, r.DataPtr()) + assert.Equal(t, metadata, r.metadata()) + assert.Equal(t, uint8(i%128), r.Gen()) + } +} + +func TestFree(t *testing.T) { + allocConfig := NewAllocConfigBySize(8, 32*8) + objects, metadatas := MmapSlab(allocConfig) + + r := NewReference(objects[0], metadatas[0]) + meta := r.metadata() + + assert.False(t, meta.isFree()) + assert.Equal(t, uint8(0), meta.dataAddressAndGen.gen()) + assert.Equal(t, uint8(0), r.Gen()) + + r.free() + + // The reference still points to the same metadata location + assert.Equal(t, meta, r.metadata()) + + // The metadata is now marked as free + assert.True(t, meta.isFree()) + // After free is called the metadata for this reference has a new + // generation tag, while the reference has the same old generation tag + assert.Equal(t, uint8(1), meta.dataAddressAndGen.gen()) + assert.Equal(t, uint8(0), r.Gen()) + + // Accessng the data of a freed reference will panic + assert.Panics(t, func() { r.DataPtr() }) + assert.Panics(t, func() { r.Bytes(8) }) + + // Reallocing a freed reference will panic + assert.Panics(t, func() { r.Realloc() }) - assert.Equal(t, dataPtr, r.DataPtr()) - assert.Equal(t, metaPtr, r.metadataPtr()) - assert.Equal(t, gen, r.Gen()) + // Freeing a freed reference will panic + assert.Panics(t, func() { r.free() }) +} + +func TestAllocFromFree(t *testing.T) { + allocConfig := NewAllocConfigBySize(8, 32*8) + objects, metadatas := MmapSlab(allocConfig) + + r := NewReference(objects[0], metadatas[0]) + meta := r.metadata() + + assert.False(t, meta.isFree()) + assert.Equal(t, uint8(0), meta.dataAddressAndGen.gen()) + assert.Equal(t, uint8(0), r.Gen()) + + r.free() + r = r.allocFromFree() + + // The reference still points to the same metadata location + assert.Equal(t, meta, r.metadata()) + + // The metadata is now marked as not free + assert.False(t, meta.isFree()) + // After allocFromFree is called the reference's generation will match the metadata's generation + assert.Equal(t, uint8(1), meta.dataAddressAndGen.gen()) + assert.Equal(t, uint8(1), r.Gen()) + + // Accessng the data of an allocated-from-free reference will not panic + assert.NotPanics(t, func() { r.DataPtr() }) + assert.NotPanics(t, func() { r.Bytes(8) }) + + // Freeing an allocated-from-free reference will not panic + assert.NotPanics(t, func() { r.free() }) +} + +func TestAllocFromFree_NoFree(t *testing.T) { + allocConfig := NewAllocConfigBySize(8, 32*8) + objects, metadatas := MmapSlab(allocConfig) + + r := NewReference(objects[0], metadatas[0]) + + assert.Panics(t, func() { r.allocFromFree() }) } func TestRealloc(t *testing.T) { @@ -75,20 +175,34 @@ func TestRealloc(t *testing.T) { objects, metadatas := MmapSlab(allocConfig) r1 := NewReference(objects[0], metadatas[0]) + meta1 := r1.metadata() + dataPtr := r1.DataPtr() - metaPtr := r1.metadataPtr() + metadata := r1.metadata() gen := r1.Gen() r2 := r1.Realloc() + meta2 := r2.metadata() + + // The metadata of r1 and r2 is the same location + assert.Equal(t, meta1, meta2) + // The generation matches in both the r2 and the metadata + assert.Equal(t, r2.Gen(), meta2.gen()) + // The generation does not match between r1 and the metadata + assert.NotEqual(t, r1.Gen(), meta2.gen()) // Assert that the data/metadata pointed to by r1 and r2 is the same assert.Equal(t, dataPtr, r2.DataPtr()) - assert.Equal(t, metaPtr, r2.metadataPtr()) + assert.Equal(t, metadata, r2.metadata()) // Assert that r2 has a different generation than r1 assert.NotEqual(t, gen, r2.Gen()) - // Assert that r1 is no longer valid, but r2 is valid + // Assert that data is no longer accessible via r1 assert.Panics(t, func() { r1.DataPtr() }) + assert.Panics(t, func() { r1.Bytes(8) }) + + // Assert that data is accessible via r2 assert.NotPanics(t, func() { r2.DataPtr() }) + assert.NotPanics(t, func() { r2.Bytes(8) }) } diff --git a/offheap/internal/pointerstore/pointer_store.go b/offheap/internal/pointerstore/pointer_store.go index 500fe96..7fbd2ca 100644 --- a/offheap/internal/pointerstore/pointer_store.go +++ b/offheap/internal/pointerstore/pointer_store.go @@ -32,7 +32,7 @@ type Store struct { // freeRWLock protects rootFree freeLock sync.Mutex - rootFree RefPointer + freeList freeStack // objectsLock protects objects // Allocating to an existing slab with a free slot only needs a read lock @@ -67,8 +67,9 @@ func (s *Store) Free(r RefPointer) { s.freeLock.Lock() defer s.freeLock.Unlock() - r.Free(s.rootFree) - s.rootFree = r + // Set up the reference to be 'free' + r.free() + s.freeList.push(r) s.frees.Add(1) } @@ -123,16 +124,13 @@ func (s *Store) allocFromFree() (RefPointer, bool) { s.freeLock.Lock() defer s.freeLock.Unlock() - // No free objects available - allocFromFree failed - if s.rootFree.IsNil() { + r, ok := s.freeList.pop() + if !ok { return RefPointer{}, false } - // Get pointer to the next available freed slot - alloc := s.rootFree - s.rootFree = alloc.AllocFromFree() - - return alloc, true + // Create new allocated reference + return r.allocFromFree(), true } func (s *Store) allocFromOffset() RefPointer { diff --git a/offheap/internal/pointerstore/pointer_store_test.go b/offheap/internal/pointerstore/pointer_store_test.go index 1844db7..ede351e 100644 --- a/offheap/internal/pointerstore/pointer_store_test.go +++ b/offheap/internal/pointerstore/pointer_store_test.go @@ -7,6 +7,7 @@ package pointerstore import ( "fmt" "testing" + "unsafe" "github.com/stretchr/testify/assert" ) @@ -73,7 +74,7 @@ func TestSlabIntegrity(t *testing.T) { } baseSlabData := refs[0].DataPtr() - baseSlabMetadata := refs[0].metadataPtr() + baseSlabMetadata := uintptr(unsafe.Pointer(refs[0].metadata())) // Check that the metadata is allocated immediately _after_ the data assert.Equal(t, baseSlabMetadata, baseSlabData+uintptr(conf.TotalObjectSize)) @@ -86,7 +87,7 @@ func TestSlabIntegrity(t *testing.T) { assert.Equal(t, baseSlabData+expectedDataOffset, dataPtr) // Check that the metadata allocations are spaced out appropriately - metaPtr := ref.metadataPtr() + metaPtr := uintptr(unsafe.Pointer(ref.metadata())) expectedMetaOffset := uintptr(conf.MetadataSize) * uintptr(i) assert.Equal(t, baseSlabMetadata+expectedMetaOffset, metaPtr) } diff --git a/offheap/internal/pointerstore/tagged_address.go b/offheap/internal/pointerstore/tagged_address.go new file mode 100644 index 0000000..6998882 --- /dev/null +++ b/offheap/internal/pointerstore/tagged_address.go @@ -0,0 +1,88 @@ +// Copyright 2025 Francis Michael Stephens. All rights reserved. Use of this +// source code is governed by an MIT license that can be found in the LICENSE +// file. + +package pointerstore + +import ( + "fmt" + "unsafe" +) + +const ( + // handy nil pointer constant + nilPtr = uintptr(0) + // maximum value of the generation tag (127) + maxGen = 0x7F + // This leaves 8 bits for the generation and isFree tags + maskShift = 56 + // Highest bit indicates if address is free + isFreeMask = taggedAddress(0x80 << maskShift) + // Next 7 bits indicate generation tag + genMask = taggedAddress(maxGen << maskShift) + // A generation value of one. This is used to increment generation + // values + genOne = taggedAddress(1 << maskShift) + // Mask revealing all 8 tag bits + tagMask = isFreeMask | genMask + // Mask revealing 56 address bits + addressMask = ^tagMask + // Mask revealing address and is-free bit, allows us to safely set generation tag + addressAndIsFreeMask = addressMask | isFreeMask + // Mask which can be used to directly set the is-free bit to 1 + setFree = isFreeMask + // Mask which can be used to directly set the is-free bit to 0 + setNotFree = ^setFree +) + +type taggedAddress uint64 + +func newTaggedAddress(ptr uintptr) taggedAddress { + ta := taggedAddress(ptr) + if ta.gen() != 0 { + panic(fmt.Errorf("cannot create tagged address from pointer (%d) with bits set in the generation tag (%d)", ptr, ta.gen())) + } + return ta +} + +func (a taggedAddress) gen() uint8 { + return (uint8)((a & genMask) >> maskShift) +} + +func (a taggedAddress) pointer() uintptr { + return uintptr(a & addressMask) +} + +func (a taggedAddress) bytes(size int) []byte { + return ([]byte)(unsafe.Slice((*byte)((unsafe.Pointer)(a.pointer())), size)) +} + +func (a taggedAddress) isFree() bool { + return a&setFree == setFree +} + +func (a taggedAddress) isNil() bool { + return a.pointer() == nilPtr +} + +func (a taggedAddress) withGen(gen uint8) taggedAddress { + // move the new gen into postion + newGen := (taggedAddress(gen) << maskShift) & genMask + // Set the new generation value back into the address + return (a & addressAndIsFreeMask) | newGen +} + +func (a taggedAddress) withIncGen() taggedAddress { + // increment the generation value alone + newGen := (a + genOne) & genMask + // Set the new generation value back into the address + return (a & addressAndIsFreeMask) | newGen +} + +func (a taggedAddress) withFree() taggedAddress { + return a | setFree +} + +func (a taggedAddress) withNotFree() taggedAddress { + return a & setNotFree +} diff --git a/offheap/internal/pointerstore/tagged_address_test.go b/offheap/internal/pointerstore/tagged_address_test.go new file mode 100644 index 0000000..0a44a55 --- /dev/null +++ b/offheap/internal/pointerstore/tagged_address_test.go @@ -0,0 +1,170 @@ +// Copyright 2025 Francis Michael Stephens. All rights reserved. Use of this +// source code is governed by an MIT license that can be found in the LICENSE +// file. + +package pointerstore + +import ( + "math/rand" + "testing" + "unsafe" + + "github.com/stretchr/testify/assert" +) + +const maxAddress = (1 << (64 - maskShift)) - 1 + +// For a nil uintptr the address and generation tag are zero +func TestTaggedAddress_ZeroValue(t *testing.T) { + var ta taggedAddress + + // address and gen are 0 + assert.True(t, ta.isNil()) + assert.Equal(t, uintptr(unsafe.Pointer(nil)), ta.pointer()) + assert.Equal(t, uint8(0), ta.gen()) +} + +// For a nil uintptr the address and generation tag are zero +func TestTaggedAddress_NewTaggedAddress_Zero(t *testing.T) { + nilPtr := uintptr(unsafe.Pointer(nil)) + ta := newTaggedAddress(nilPtr) + + // address and gen are 0 + assert.True(t, ta.isNil()) + assert.Equal(t, nilPtr, ta.pointer()) + assert.Equal(t, uint8(0), ta.gen()) +} + +// For any legal address and generation tag combination the address and +// generation tag are available unaltered +func TestTaggedAddress_AddressAndGenAreSeparate(t *testing.T) { + for range 10 { + newPtr := uintptr(rand.Int63n(maxAddress + 1)) + ta := newTaggedAddress(newPtr) + + for i := 1; i <= maxGen; i++ { + gen := uint8(i) + // set tagged address generation + ta = ta.withGen(gen) + + // If the address is not 0 then isNil() must be false + assert.Equal(t, newPtr == nilPtr, ta.isNil()) + // Assert that the original address is preserved + assert.Equal(t, newPtr, ta.pointer()) + // Assert that the generation is preserved + assert.Equal(t, gen, ta.gen()) + } + } +} + +// For any legal address and generation tag combination the address and +// generation tag are available unaltered +func TestTaggedAddress_WithGen(t *testing.T) { + for range 10 { + newPtr := uintptr(rand.Int63n(maxAddress + 1)) + ta := newTaggedAddress(newPtr) + testWithGen(t, ta.withNotFree()) + testWithGen(t, ta.withFree()) + } +} + +func testWithGen(t *testing.T, ta taggedAddress) { + ptr := ta.pointer() + isFree := ta.isFree() + for i := 0; i <= maxGen*4; i++ { + // increment tagged address generation + ta = ta.withGen(uint8(i)) + + // If the address is not 0 then isNil() must be false + assert.Equal(t, ptr == nilPtr, ta.isNil()) + // Assert that the original address is preserved + assert.Equal(t, ptr, ta.pointer()) + // Assert if the address remains free or not free + assert.Equal(t, isFree, ta.isFree()) + // Assert that the generation is preserved + assert.Equal(t, uint8(i%128), ta.gen()) + } +} + +// For any legal address and generation tag combination the address and +// generation tag are available unaltered +func TestTaggedAddress_IncGen(t *testing.T) { + for range 10 { + newPtr := uintptr(rand.Int63n(maxAddress + 1)) + ta := newTaggedAddress(newPtr) + testIncGen(t, ta.withNotFree()) + testIncGen(t, ta.withFree()) + } +} + +func testIncGen(t *testing.T, ta taggedAddress) { + ptr := ta.pointer() + isFree := ta.isFree() + for i := 0; i <= maxGen*4; i++ { + // If the address is not 0 then isNil() must be false + assert.Equal(t, ptr == nilPtr, ta.isNil()) + // Assert that the original address is preserved + assert.Equal(t, ptr, ta.pointer()) + // Assert if the address remains free or not free + assert.Equal(t, isFree, ta.isFree()) + // Assert that the generation is preserved + assert.Equal(t, uint8(i%128), ta.gen()) + + // increment tagged address generation + ta = ta.withIncGen() + } +} + +// For any address with bits set in the generation tag region (i.e. upper 8 +// bits) newTaggedAddress() panics +func TestTaggedAddress_PointerWithGenBitsPanics(t *testing.T) { + for range 10 { + newPtr := uintptr(rand.Int63n(maxAddress + 1)) + + for i := 1; i <= maxGen; i++ { + genBits := uintptr(i) + // Create a pointer with bits set in the gen tag bits + badPtr := newPtr | ((genBits) << maskShift) + assert.Panics(t, func() { newTaggedAddress(badPtr) }) + } + } +} + +func TestTaggedAddress_IsFree(t *testing.T) { + for range 10 { + newPtr := uintptr(rand.Int63n(maxAddress + 1)) + + ta := newTaggedAddress(newPtr) + address := ta.pointer() + gen := ta.gen() + // A tagged address is created not-free. It is assumed that a + // tagged address is only created for an allocation + assert.False(t, ta.isFree()) + // Neither the address nor the generation tag are changed by the is-free bit + assert.Equal(t, address, ta.pointer()) + assert.Equal(t, gen, ta.gen()) + + ta = ta.withFree() + assert.True(t, ta.isFree()) + // Neither the address nor the generation tag are changed by the is-free bit + assert.Equal(t, address, ta.pointer()) + assert.Equal(t, gen, ta.gen()) + + // Side-quest, set the generation tag and ensure this doesn't + // interfere with the is-free bit + ta = ta.withGen(maxGen) + assert.True(t, ta.isFree()) + // Neither the address nor the generation tag are changed by the is-free bit + assert.Equal(t, address, ta.pointer()) + assert.Equal(t, uint8(maxGen), ta.gen()) + + // Set the generation back + ta = ta.withGen(gen) + + ta = ta.withNotFree() + assert.False(t, ta.isFree()) + // Neither the address nor the generation tag are changed by the is-free bit + assert.Equal(t, address, ta.pointer()) + assert.Equal(t, gen, ta.gen()) + } +} diff --git a/offheap/object_bench_test.go b/offheap/object_bench_test.go new file mode 100644 index 0000000..256d687 --- /dev/null +++ b/offheap/object_bench_test.go @@ -0,0 +1,33 @@ +package offheap + +import "testing" + +type benchStruct struct { + field int +} + +func BenchmarkAllocWriteRead(b *testing.B) { + os := New() + + refs := make([]RefObject[benchStruct], 0, b.N) + + for range b.N { + ref := AllocObject[benchStruct](os) + refs = append(refs, ref) + } + + b.ResetTimer() + b.ReportAllocs() + + for i := range b.N { + refs[i].Value().field = i + } + + sum := 0 + + for i := range b.N { + sum += refs[i].Value().field + } + + println(sum) +} diff --git a/offheap/object_reference_test.go b/offheap/object_reference_test.go index f25d617..04368d6 100644 --- a/offheap/object_reference_test.go +++ b/offheap/object_reference_test.go @@ -345,10 +345,10 @@ func Test_Object_CannotAllocateVeryBigStruct(t *testing.T) { // // This test should alert us if this problem ever reappears. // -// NB: In the future meta-data will likely be moved to a separate allocation -// space and some details described above will become out of date. The test -// will still be useful though. Zero sized types are a likely source of -// edge-case bugs for all eternity. +// NB: At time of update meta-data has been moved to a separate allocation +// space and some details described above are out of date. The test is still +// useful though. Zero sized types are a likely source of edge-case bugs for +// all eternity. func Test_Object_ZeroSizedType_FullSlab(t *testing.T) { os := New() defer func() {