Linux vmi284606.contaboserver.net 4.15.0-213-generic #224-Ubuntu SMP Mon Jun 19 13:30:12 UTC 2023 x86_64
Apache/2.4.57 (Ubuntu)
: 167.86.127.34 | : 216.73.217.31
Cant Read [ /etc/named.conf ]
7.2.24-0ubuntu0.18.04.17
root
Terminal
AUTO ROOT
Adminer
Backdoor Destroyer
Linux Exploit
Lock Shell
Lock File
Create User
CREATE RDP
PHP Mailer
BACKCONNECT
UNLOCK SHELL
HASH IDENTIFIER
README
+ Create Folder
+ Create File
/
usr /
local /
go /
src /
runtime /
internal /
atomic /
[ HOME SHELL ]
Name
Size
Permission
Action
asm_386.s
5.27
KB
-rw-r--r--
asm_amd64.s
3.89
KB
-rw-r--r--
asm_arm.s
5.22
KB
-rw-r--r--
asm_arm64.s
1.63
KB
-rw-r--r--
asm_mips64x.s
3.96
KB
-rw-r--r--
asm_mipsx.s
2.88
KB
-rw-r--r--
asm_ppc64x.s
4.48
KB
-rw-r--r--
asm_s390x.s
4.79
KB
-rw-r--r--
atomic_386.go
1.62
KB
-rw-r--r--
atomic_amd64.go
1.73
KB
-rw-r--r--
atomic_arm.go
4.31
KB
-rw-r--r--
atomic_arm64.go
1.38
KB
-rw-r--r--
atomic_arm64.s
3.21
KB
-rw-r--r--
atomic_mips64x.go
1.45
KB
-rw-r--r--
atomic_mips64x.s
1.06
KB
-rw-r--r--
atomic_mipsx.go
2.49
KB
-rw-r--r--
atomic_mipsx.s
482
B
-rw-r--r--
atomic_ppc64x.go
1.45
KB
-rw-r--r--
atomic_ppc64x.s
1.34
KB
-rw-r--r--
atomic_riscv64.go
1.37
KB
-rw-r--r--
atomic_riscv64.s
5.48
KB
-rw-r--r--
atomic_s390x.go
1.68
KB
-rw-r--r--
atomic_test.go
5.1
KB
-rw-r--r--
atomic_wasm.go
3.69
KB
-rw-r--r--
bench_test.go
1.78
KB
-rw-r--r--
stubs.go
795
B
-rw-r--r--
sys_linux_arm.s
3.14
KB
-rw-r--r--
sys_nonlinux_arm.s
1.34
KB
-rw-r--r--
Delete
Unzip
Zip
${this.title}
Close
Code Editor : atomic_arm.go
// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build arm package atomic import ( "internal/cpu" "unsafe" ) // Export some functions via linkname to assembly in sync/atomic. //go:linkname Xchg //go:linkname Xchguintptr type spinlock struct { v uint32 } //go:nosplit func (l *spinlock) lock() { for { if Cas(&l.v, 0, 1) { return } } } //go:nosplit func (l *spinlock) unlock() { Store(&l.v, 0) } var locktab [57]struct { l spinlock pad [cpu.CacheLinePadSize - unsafe.Sizeof(spinlock{})]byte } func addrLock(addr *uint64) *spinlock { return &locktab[(uintptr(unsafe.Pointer(addr))>>3)%uintptr(len(locktab))].l } // Atomic add and return new value. //go:nosplit func Xadd(val *uint32, delta int32) uint32 { for { oval := *val nval := oval + uint32(delta) if Cas(val, oval, nval) { return nval } } } //go:noescape func Xadduintptr(ptr *uintptr, delta uintptr) uintptr //go:nosplit func Xchg(addr *uint32, v uint32) uint32 { for { old := *addr if Cas(addr, old, v) { return old } } } //go:nosplit func Xchguintptr(addr *uintptr, v uintptr) uintptr { return uintptr(Xchg((*uint32)(unsafe.Pointer(addr)), uint32(v))) } // Not noescape -- it installs a pointer to addr. func StorepNoWB(addr unsafe.Pointer, v unsafe.Pointer) //go:noescape func Store(addr *uint32, v uint32) //go:noescape func StoreRel(addr *uint32, v uint32) //go:nosplit func goCas64(addr *uint64, old, new uint64) bool { if uintptr(unsafe.Pointer(addr))&7 != 0 { *(*int)(nil) = 0 // crash on unaligned uint64 } _ = *addr // if nil, fault before taking the lock var ok bool addrLock(addr).lock() if *addr == old { *addr = new ok = true } addrLock(addr).unlock() return ok } //go:nosplit func goXadd64(addr *uint64, delta int64) uint64 { if uintptr(unsafe.Pointer(addr))&7 != 0 { *(*int)(nil) = 0 // crash on unaligned uint64 } _ = *addr // if nil, fault before taking the lock var r uint64 addrLock(addr).lock() r = *addr + uint64(delta) *addr = r addrLock(addr).unlock() return r } //go:nosplit func goXchg64(addr *uint64, v uint64) uint64 { if uintptr(unsafe.Pointer(addr))&7 != 0 { *(*int)(nil) = 0 // crash on unaligned uint64 } _ = *addr // if nil, fault before taking the lock var r uint64 addrLock(addr).lock() r = *addr *addr = v addrLock(addr).unlock() return r } //go:nosplit func goLoad64(addr *uint64) uint64 { if uintptr(unsafe.Pointer(addr))&7 != 0 { *(*int)(nil) = 0 // crash on unaligned uint64 } _ = *addr // if nil, fault before taking the lock var r uint64 addrLock(addr).lock() r = *addr addrLock(addr).unlock() return r } //go:nosplit func goStore64(addr *uint64, v uint64) { if uintptr(unsafe.Pointer(addr))&7 != 0 { *(*int)(nil) = 0 // crash on unaligned uint64 } _ = *addr // if nil, fault before taking the lock addrLock(addr).lock() *addr = v addrLock(addr).unlock() } //go:nosplit func Or8(addr *uint8, v uint8) { // Align down to 4 bytes and use 32-bit CAS. uaddr := uintptr(unsafe.Pointer(addr)) addr32 := (*uint32)(unsafe.Pointer(uaddr &^ 3)) word := uint32(v) << ((uaddr & 3) * 8) // little endian for { old := *addr32 if Cas(addr32, old, old|word) { return } } } //go:nosplit func And8(addr *uint8, v uint8) { // Align down to 4 bytes and use 32-bit CAS. uaddr := uintptr(unsafe.Pointer(addr)) addr32 := (*uint32)(unsafe.Pointer(uaddr &^ 3)) word := uint32(v) << ((uaddr & 3) * 8) // little endian mask := uint32(0xFF) << ((uaddr & 3) * 8) // little endian word |= ^mask for { old := *addr32 if Cas(addr32, old, old&word) { return } } } //go:nosplit func armcas(ptr *uint32, old, new uint32) bool //go:noescape func Load(addr *uint32) uint32 // NO go:noescape annotation; *addr escapes if result escapes (#31525) func Loadp(addr unsafe.Pointer) unsafe.Pointer //go:noescape func Load8(addr *uint8) uint8 //go:noescape func LoadAcq(addr *uint32) uint32 //go:noescape func Cas64(addr *uint64, old, new uint64) bool //go:noescape func CasRel(addr *uint32, old, new uint32) bool //go:noescape func Xadd64(addr *uint64, delta int64) uint64 //go:noescape func Xchg64(addr *uint64, v uint64) uint64 //go:noescape func Load64(addr *uint64) uint64 //go:noescape func Store8(addr *uint8, v uint8) //go:noescape func Store64(addr *uint64, v uint64)
Close