Linux vmi284606.contaboserver.net 4.15.0-213-generic #224-Ubuntu SMP Mon Jun 19 13:30:12 UTC 2023 x86_64
Apache/2.4.57 (Ubuntu)
: 167.86.127.34 | : 216.73.217.1
Cant Read [ /etc/named.conf ]
7.2.24-0ubuntu0.18.04.17
root
Terminal
AUTO ROOT
Adminer
Backdoor Destroyer
Linux Exploit
Lock Shell
Lock File
Create User
CREATE RDP
PHP Mailer
BACKCONNECT
UNLOCK SHELL
HASH IDENTIFIER
README
+ Create Folder
+ Create File
/
usr /
local /
go /
src /
sync /
[ HOME SHELL ]
Name
Size
Permission
Action
atomic
[ DIR ]
drwxr-xr-x
cond.go
2.54
KB
-rw-r--r--
cond_test.go
5.07
KB
-rw-r--r--
example_pool_test.go
1
KB
-rw-r--r--
example_test.go
1.14
KB
-rw-r--r--
export_test.go
1.27
KB
-rw-r--r--
map.go
11
KB
-rw-r--r--
map_bench_test.go
4.79
KB
-rw-r--r--
map_reference_test.go
3.39
KB
-rw-r--r--
map_test.go
3.45
KB
-rw-r--r--
mutex.go
7.32
KB
-rw-r--r--
mutex_test.go
5.7
KB
-rw-r--r--
once.go
2.25
KB
-rw-r--r--
once_test.go
1.1
KB
-rw-r--r--
pool.go
8.15
KB
-rw-r--r--
pool_test.go
7.25
KB
-rw-r--r--
poolqueue.go
8.87
KB
-rw-r--r--
runtime.go
2.17
KB
-rw-r--r--
runtime_sema_test.go
1.34
KB
-rw-r--r--
rwmutex.go
4.42
KB
-rw-r--r--
rwmutex_test.go
4.36
KB
-rw-r--r--
waitgroup.go
4.41
KB
-rw-r--r--
waitgroup_test.go
5.78
KB
-rw-r--r--
Delete
Unzip
Zip
${this.title}
Close
Code Editor : map_bench_test.go
// Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package sync_test import ( "fmt" "reflect" "sync" "sync/atomic" "testing" ) type bench struct { setup func(*testing.B, mapInterface) perG func(b *testing.B, pb *testing.PB, i int, m mapInterface) } func benchMap(b *testing.B, bench bench) { for _, m := range [...]mapInterface{&DeepCopyMap{}, &RWMutexMap{}, &sync.Map{}} { b.Run(fmt.Sprintf("%T", m), func(b *testing.B) { m = reflect.New(reflect.TypeOf(m).Elem()).Interface().(mapInterface) if bench.setup != nil { bench.setup(b, m) } b.ResetTimer() var i int64 b.RunParallel(func(pb *testing.PB) { id := int(atomic.AddInt64(&i, 1) - 1) bench.perG(b, pb, id*b.N, m) }) }) } } func BenchmarkLoadMostlyHits(b *testing.B) { const hits, misses = 1023, 1 benchMap(b, bench{ setup: func(_ *testing.B, m mapInterface) { for i := 0; i < hits; i++ { m.LoadOrStore(i, i) } // Prime the map to get it into a steady state. for i := 0; i < hits*2; i++ { m.Load(i % hits) } }, perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { for ; pb.Next(); i++ { m.Load(i % (hits + misses)) } }, }) } func BenchmarkLoadMostlyMisses(b *testing.B) { const hits, misses = 1, 1023 benchMap(b, bench{ setup: func(_ *testing.B, m mapInterface) { for i := 0; i < hits; i++ { m.LoadOrStore(i, i) } // Prime the map to get it into a steady state. for i := 0; i < hits*2; i++ { m.Load(i % hits) } }, perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { for ; pb.Next(); i++ { m.Load(i % (hits + misses)) } }, }) } func BenchmarkLoadOrStoreBalanced(b *testing.B) { const hits, misses = 128, 128 benchMap(b, bench{ setup: func(b *testing.B, m mapInterface) { if _, ok := m.(*DeepCopyMap); ok { b.Skip("DeepCopyMap has quadratic running time.") } for i := 0; i < hits; i++ { m.LoadOrStore(i, i) } // Prime the map to get it into a steady state. for i := 0; i < hits*2; i++ { m.Load(i % hits) } }, perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { for ; pb.Next(); i++ { j := i % (hits + misses) if j < hits { if _, ok := m.LoadOrStore(j, i); !ok { b.Fatalf("unexpected miss for %v", j) } } else { if v, loaded := m.LoadOrStore(i, i); loaded { b.Fatalf("failed to store %v: existing value %v", i, v) } } } }, }) } func BenchmarkLoadOrStoreUnique(b *testing.B) { benchMap(b, bench{ setup: func(b *testing.B, m mapInterface) { if _, ok := m.(*DeepCopyMap); ok { b.Skip("DeepCopyMap has quadratic running time.") } }, perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { for ; pb.Next(); i++ { m.LoadOrStore(i, i) } }, }) } func BenchmarkLoadOrStoreCollision(b *testing.B) { benchMap(b, bench{ setup: func(_ *testing.B, m mapInterface) { m.LoadOrStore(0, 0) }, perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { for ; pb.Next(); i++ { m.LoadOrStore(0, 0) } }, }) } func BenchmarkRange(b *testing.B) { const mapSize = 1 << 10 benchMap(b, bench{ setup: func(_ *testing.B, m mapInterface) { for i := 0; i < mapSize; i++ { m.Store(i, i) } }, perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { for ; pb.Next(); i++ { m.Range(func(_, _ interface{}) bool { return true }) } }, }) } // BenchmarkAdversarialAlloc tests performance when we store a new value // immediately whenever the map is promoted to clean and otherwise load a // unique, missing key. // // This forces the Load calls to always acquire the map's mutex. func BenchmarkAdversarialAlloc(b *testing.B) { benchMap(b, bench{ perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { var stores, loadsSinceStore int64 for ; pb.Next(); i++ { m.Load(i) if loadsSinceStore++; loadsSinceStore > stores { m.LoadOrStore(i, stores) loadsSinceStore = 0 stores++ } } }, }) } // BenchmarkAdversarialDelete tests performance when we periodically delete // one key and add a different one in a large map. // // This forces the Load calls to always acquire the map's mutex and periodically // makes a full copy of the map despite changing only one entry. func BenchmarkAdversarialDelete(b *testing.B) { const mapSize = 1 << 10 benchMap(b, bench{ setup: func(_ *testing.B, m mapInterface) { for i := 0; i < mapSize; i++ { m.Store(i, i) } }, perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { for ; pb.Next(); i++ { m.Load(i) if i%mapSize == 0 { m.Range(func(k, _ interface{}) bool { m.Delete(k) return false }) m.Store(i, i) } } }, }) }
Close