Linux vmi284606.contaboserver.net 4.15.0-213-generic #224-Ubuntu SMP Mon Jun 19 13:30:12 UTC 2023 x86_64
Apache/2.4.57 (Ubuntu)
: 167.86.127.34 | : 216.73.217.1
Cant Read [ /etc/named.conf ]
7.2.24-0ubuntu0.18.04.17
root
Terminal
AUTO ROOT
Adminer
Backdoor Destroyer
Linux Exploit
Lock Shell
Lock File
Create User
CREATE RDP
PHP Mailer
BACKCONNECT
UNLOCK SHELL
HASH IDENTIFIER
README
+ Create Folder
+ Create File
/
usr /
local /
go /
src /
sync /
[ HOME SHELL ]
Name
Size
Permission
Action
atomic
[ DIR ]
drwxr-xr-x
cond.go
2.54
KB
-rw-r--r--
cond_test.go
5.07
KB
-rw-r--r--
example_pool_test.go
1
KB
-rw-r--r--
example_test.go
1.14
KB
-rw-r--r--
export_test.go
1.27
KB
-rw-r--r--
map.go
11
KB
-rw-r--r--
map_bench_test.go
4.79
KB
-rw-r--r--
map_reference_test.go
3.39
KB
-rw-r--r--
map_test.go
3.45
KB
-rw-r--r--
mutex.go
7.32
KB
-rw-r--r--
mutex_test.go
5.7
KB
-rw-r--r--
once.go
2.25
KB
-rw-r--r--
once_test.go
1.1
KB
-rw-r--r--
pool.go
8.15
KB
-rw-r--r--
pool_test.go
7.25
KB
-rw-r--r--
poolqueue.go
8.87
KB
-rw-r--r--
runtime.go
2.17
KB
-rw-r--r--
runtime_sema_test.go
1.34
KB
-rw-r--r--
rwmutex.go
4.42
KB
-rw-r--r--
rwmutex_test.go
4.36
KB
-rw-r--r--
waitgroup.go
4.41
KB
-rw-r--r--
waitgroup_test.go
5.78
KB
-rw-r--r--
Delete
Unzip
Zip
${this.title}
Close
Code Editor : waitgroup_test.go
// Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package sync_test import ( "internal/race" "runtime" . "sync" "sync/atomic" "testing" ) func testWaitGroup(t *testing.T, wg1 *WaitGroup, wg2 *WaitGroup) { n := 16 wg1.Add(n) wg2.Add(n) exited := make(chan bool, n) for i := 0; i != n; i++ { go func() { wg1.Done() wg2.Wait() exited <- true }() } wg1.Wait() for i := 0; i != n; i++ { select { case <-exited: t.Fatal("WaitGroup released group too soon") default: } wg2.Done() } for i := 0; i != n; i++ { <-exited // Will block if barrier fails to unlock someone. } } func TestWaitGroup(t *testing.T) { wg1 := &WaitGroup{} wg2 := &WaitGroup{} // Run the same test a few times to ensure barrier is in a proper state. for i := 0; i != 8; i++ { testWaitGroup(t, wg1, wg2) } } func knownRacy(t *testing.T) { if race.Enabled { t.Skip("skipping known-racy test under the race detector") } } func TestWaitGroupMisuse(t *testing.T) { defer func() { err := recover() if err != "sync: negative WaitGroup counter" { t.Fatalf("Unexpected panic: %#v", err) } }() wg := &WaitGroup{} wg.Add(1) wg.Done() wg.Done() t.Fatal("Should panic") } // pollUntilEqual blocks until v, loaded atomically, is // equal to the target. func pollUntilEqual(v *uint32, target uint32) { for { for i := 0; i < 1e3; i++ { if atomic.LoadUint32(v) == target { return } } // yield to avoid deadlock with the garbage collector // see issue #20072 runtime.Gosched() } } func TestWaitGroupMisuse2(t *testing.T) { knownRacy(t) if runtime.NumCPU() <= 4 { t.Skip("NumCPU<=4, skipping: this test requires parallelism") } defer func() { err := recover() if err != "sync: negative WaitGroup counter" && err != "sync: WaitGroup misuse: Add called concurrently with Wait" && err != "sync: WaitGroup is reused before previous Wait has returned" { t.Fatalf("Unexpected panic: %#v", err) } }() defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4)) done := make(chan interface{}, 2) // The detection is opportunistic, so we want it to panic // at least in one run out of a million. for i := 0; i < 1e6; i++ { var wg WaitGroup var here uint32 wg.Add(1) go func() { defer func() { done <- recover() }() atomic.AddUint32(&here, 1) pollUntilEqual(&here, 3) wg.Wait() }() go func() { defer func() { done <- recover() }() atomic.AddUint32(&here, 1) pollUntilEqual(&here, 3) wg.Add(1) // This is the bad guy. wg.Done() }() atomic.AddUint32(&here, 1) pollUntilEqual(&here, 3) wg.Done() for j := 0; j < 2; j++ { if err := <-done; err != nil { panic(err) } } } t.Fatal("Should panic") } func TestWaitGroupMisuse3(t *testing.T) { knownRacy(t) if runtime.NumCPU() <= 1 { t.Skip("NumCPU==1, skipping: this test requires parallelism") } defer func() { err := recover() if err != "sync: negative WaitGroup counter" && err != "sync: WaitGroup misuse: Add called concurrently with Wait" && err != "sync: WaitGroup is reused before previous Wait has returned" { t.Fatalf("Unexpected panic: %#v", err) } }() defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4)) done := make(chan interface{}, 3) // The detection is opportunistically, so we want it to panic // at least in one run out of a million. for i := 0; i < 1e6; i++ { var wg WaitGroup wg.Add(1) go func() { defer func() { done <- recover() }() wg.Done() }() go func() { defer func() { done <- recover() }() wg.Wait() // Start reusing the wg before waiting for the Wait below to return. wg.Add(1) go func() { wg.Done() }() wg.Wait() }() go func() { defer func() { done <- recover() }() wg.Wait() }() for j := 0; j < 3; j++ { if err := <-done; err != nil { panic(err) } } } t.Fatal("Should panic") } func TestWaitGroupRace(t *testing.T) { // Run this test for about 1ms. for i := 0; i < 1000; i++ { wg := &WaitGroup{} n := new(int32) // spawn goroutine 1 wg.Add(1) go func() { atomic.AddInt32(n, 1) wg.Done() }() // spawn goroutine 2 wg.Add(1) go func() { atomic.AddInt32(n, 1) wg.Done() }() // Wait for goroutine 1 and 2 wg.Wait() if atomic.LoadInt32(n) != 2 { t.Fatal("Spurious wakeup from Wait") } } } func TestWaitGroupAlign(t *testing.T) { type X struct { x byte wg WaitGroup } var x X x.wg.Add(1) go func(x *X) { x.wg.Done() }(&x) x.wg.Wait() } func BenchmarkWaitGroupUncontended(b *testing.B) { type PaddedWaitGroup struct { WaitGroup pad [128]uint8 } b.RunParallel(func(pb *testing.PB) { var wg PaddedWaitGroup for pb.Next() { wg.Add(1) wg.Done() wg.Wait() } }) } func benchmarkWaitGroupAddDone(b *testing.B, localWork int) { var wg WaitGroup b.RunParallel(func(pb *testing.PB) { foo := 0 for pb.Next() { wg.Add(1) for i := 0; i < localWork; i++ { foo *= 2 foo /= 2 } wg.Done() } _ = foo }) } func BenchmarkWaitGroupAddDone(b *testing.B) { benchmarkWaitGroupAddDone(b, 0) } func BenchmarkWaitGroupAddDoneWork(b *testing.B) { benchmarkWaitGroupAddDone(b, 100) } func benchmarkWaitGroupWait(b *testing.B, localWork int) { var wg WaitGroup b.RunParallel(func(pb *testing.PB) { foo := 0 for pb.Next() { wg.Wait() for i := 0; i < localWork; i++ { foo *= 2 foo /= 2 } } _ = foo }) } func BenchmarkWaitGroupWait(b *testing.B) { benchmarkWaitGroupWait(b, 0) } func BenchmarkWaitGroupWaitWork(b *testing.B) { benchmarkWaitGroupWait(b, 100) } func BenchmarkWaitGroupActuallyWait(b *testing.B) { b.ReportAllocs() b.RunParallel(func(pb *testing.PB) { for pb.Next() { var wg WaitGroup wg.Add(1) go func() { wg.Done() }() wg.Wait() } }) }
Close