Linux vmi284606.contaboserver.net 4.15.0-213-generic #224-Ubuntu SMP Mon Jun 19 13:30:12 UTC 2023 x86_64
Apache/2.4.57 (Ubuntu)
: 167.86.127.34 | : 216.73.217.31
Cant Read [ /etc/named.conf ]
7.2.24-0ubuntu0.18.04.17
root
Terminal
AUTO ROOT
Adminer
Backdoor Destroyer
Linux Exploit
Lock Shell
Lock File
Create User
CREATE RDP
PHP Mailer
BACKCONNECT
UNLOCK SHELL
HASH IDENTIFIER
README
+ Create Folder
+ Create File
/
usr /
local /
go /
src /
cmd /
compile /
internal /
ssa /
[ HOME SHELL ]
Name
Size
Permission
Action
gen
[ DIR ]
drwxr-xr-x
testdata
[ DIR ]
drwxr-xr-x
README.md
7.76
KB
-rw-r--r--
TODO
950
B
-rw-r--r--
biasedsparsemap.go
2.71
KB
-rw-r--r--
block.go
8.64
KB
-rw-r--r--
branchelim.go
11.41
KB
-rw-r--r--
branchelim_test.go
5.21
KB
-rw-r--r--
cache.go
2.46
KB
-rw-r--r--
check.go
13.9
KB
-rw-r--r--
checkbce.go
956
B
-rw-r--r--
compile.go
16.22
KB
-rw-r--r--
config.go
11.39
KB
-rw-r--r--
copyelim.go
1.82
KB
-rw-r--r--
copyelim_test.go
1.29
KB
-rw-r--r--
critical.go
3.15
KB
-rw-r--r--
cse.go
10.62
KB
-rw-r--r--
cse_test.go
4.21
KB
-rw-r--r--
deadcode.go
9.73
KB
-rw-r--r--
deadcode_test.go
3.49
KB
-rw-r--r--
deadstore.go
8.98
KB
-rw-r--r--
deadstore_test.go
4.09
KB
-rw-r--r--
debug.go
33.92
KB
-rw-r--r--
debug_test.go
28.68
KB
-rw-r--r--
decompose.go
10.91
KB
-rw-r--r--
dom.go
7.98
KB
-rw-r--r--
dom_test.go
13.34
KB
-rw-r--r--
export_test.go
5.68
KB
-rw-r--r--
flagalloc.go
5.49
KB
-rw-r--r--
func.go
22.17
KB
-rw-r--r--
func_test.go
12.85
KB
-rw-r--r--
fuse.go
5.7
KB
-rw-r--r--
fuse_test.go
5.04
KB
-rw-r--r--
html.go
29.04
KB
-rw-r--r--
id.go
576
B
-rw-r--r--
layout.go
3.56
KB
-rw-r--r--
lca.go
3.71
KB
-rw-r--r--
lca_test.go
1.65
KB
-rw-r--r--
likelyadjust.go
15.23
KB
-rw-r--r--
location.go
2.8
KB
-rw-r--r--
loopbce.go
9.85
KB
-rw-r--r--
loopreschedchecks.go
15.36
KB
-rw-r--r--
looprotate.go
2.24
KB
-rw-r--r--
lower.go
1.24
KB
-rw-r--r--
magic.go
13.42
KB
-rw-r--r--
magic_test.go
9.1
KB
-rw-r--r--
nilcheck.go
11.11
KB
-rw-r--r--
nilcheck_test.go
12.07
KB
-rw-r--r--
numberlines.go
8.44
KB
-rw-r--r--
op.go
8.74
KB
-rw-r--r--
opGen.go
839.32
KB
-rw-r--r--
opt.go
290
B
-rw-r--r--
passbm_test.go
3.14
KB
-rw-r--r--
phielim.go
1.47
KB
-rw-r--r--
phiopt.go
3.96
KB
-rw-r--r--
poset.go
37.23
KB
-rw-r--r--
poset_test.go
18.14
KB
-rw-r--r--
print.go
2.98
KB
-rw-r--r--
prove.go
36.68
KB
-rw-r--r--
redblack32.go
8.9
KB
-rw-r--r--
redblack32_test.go
5.96
KB
-rw-r--r--
regalloc.go
76.36
KB
-rw-r--r--
regalloc_test.go
6.41
KB
-rw-r--r--
rewrite.go
33.06
KB
-rw-r--r--
rewrite386.go
514.14
KB
-rw-r--r--
rewrite386splitload.go
3.7
KB
-rw-r--r--
rewriteAMD64.go
1.31
MB
-rw-r--r--
rewriteAMD64splitload.go
4.95
KB
-rw-r--r--
rewriteARM.go
538.26
KB
-rw-r--r--
rewriteARM64.go
837.68
KB
-rw-r--r--
rewriteMIPS.go
203.31
KB
-rw-r--r--
rewriteMIPS64.go
227.84
KB
-rw-r--r--
rewritePPC64.go
661.88
KB
-rw-r--r--
rewriteRISCV64.go
123.11
KB
-rw-r--r--
rewriteS390X.go
904.36
KB
-rw-r--r--
rewriteWasm.go
136.86
KB
-rw-r--r--
rewrite_test.go
3.01
KB
-rw-r--r--
rewritedec.go
10.56
KB
-rw-r--r--
rewritedec64.go
64.01
KB
-rw-r--r--
rewritedecArgs.go
6.6
KB
-rw-r--r--
rewritegeneric.go
1.09
MB
-rw-r--r--
schedule.go
13.99
KB
-rw-r--r--
schedule_test.go
2.91
KB
-rw-r--r--
shift_test.go
4.05
KB
-rw-r--r--
shortcircuit.go
3.97
KB
-rw-r--r--
shortcircuit_test.go
1.31
KB
-rw-r--r--
sizeof_test.go
855
B
-rw-r--r--
softfloat.go
1.94
KB
-rw-r--r--
sparsemap.go
1.98
KB
-rw-r--r--
sparseset.go
1.54
KB
-rw-r--r--
sparsetree.go
7.88
KB
-rw-r--r--
sparsetreemap.go
7.67
KB
-rw-r--r--
stackalloc.go
10.95
KB
-rw-r--r--
stackframe.go
290
B
-rw-r--r--
stmtlines_test.go
2.75
KB
-rw-r--r--
tighten.go
4.21
KB
-rw-r--r--
trim.go
4.24
KB
-rw-r--r--
value.go
10.71
KB
-rw-r--r--
writebarrier.go
17.92
KB
-rw-r--r--
writebarrier_test.go
1.75
KB
-rw-r--r--
xposmap.go
3.29
KB
-rw-r--r--
zcse.go
2.12
KB
-rw-r--r--
zeroextension_test.go
1.66
KB
-rw-r--r--
Delete
Unzip
Zip
${this.title}
Close
Code Editor : stackalloc.go
// Copyright 2015 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // TODO: live at start of block instead? package ssa import ( "cmd/compile/internal/types" "cmd/internal/src" "fmt" ) type stackAllocState struct { f *Func // live is the output of stackalloc. // live[b.id] = live values at the end of block b. live [][]ID // The following slices are reused across multiple users // of stackAllocState. values []stackValState interfere [][]ID // interfere[v.id] = values that interfere with v. names []LocalSlot slots []int used []bool nArgSlot, // Number of Values sourced to arg slot nNotNeed, // Number of Values not needing a stack slot nNamedSlot, // Number of Values using a named stack slot nReuse, // Number of values reusing a stack slot nAuto, // Number of autos allocated for stack slots. nSelfInterfere int32 // Number of self-interferences } func newStackAllocState(f *Func) *stackAllocState { s := f.Cache.stackAllocState if s == nil { return new(stackAllocState) } if s.f != nil { f.fe.Fatalf(src.NoXPos, "newStackAllocState called without previous free") } return s } func putStackAllocState(s *stackAllocState) { for i := range s.values { s.values[i] = stackValState{} } for i := range s.interfere { s.interfere[i] = nil } for i := range s.names { s.names[i] = LocalSlot{} } for i := range s.slots { s.slots[i] = 0 } for i := range s.used { s.used[i] = false } s.f.Cache.stackAllocState = s s.f = nil s.live = nil s.nArgSlot, s.nNotNeed, s.nNamedSlot, s.nReuse, s.nAuto, s.nSelfInterfere = 0, 0, 0, 0, 0, 0 } type stackValState struct { typ *types.Type spill *Value needSlot bool isArg bool } // stackalloc allocates storage in the stack frame for // all Values that did not get a register. // Returns a map from block ID to the stack values live at the end of that block. func stackalloc(f *Func, spillLive [][]ID) [][]ID { if f.pass.debug > stackDebug { fmt.Println("before stackalloc") fmt.Println(f.String()) } s := newStackAllocState(f) s.init(f, spillLive) defer putStackAllocState(s) s.stackalloc() if f.pass.stats > 0 { f.LogStat("stack_alloc_stats", s.nArgSlot, "arg_slots", s.nNotNeed, "slot_not_needed", s.nNamedSlot, "named_slots", s.nAuto, "auto_slots", s.nReuse, "reused_slots", s.nSelfInterfere, "self_interfering") } return s.live } func (s *stackAllocState) init(f *Func, spillLive [][]ID) { s.f = f // Initialize value information. if n := f.NumValues(); cap(s.values) >= n { s.values = s.values[:n] } else { s.values = make([]stackValState, n) } for _, b := range f.Blocks { for _, v := range b.Values { s.values[v.ID].typ = v.Type s.values[v.ID].needSlot = !v.Type.IsMemory() && !v.Type.IsVoid() && !v.Type.IsFlags() && f.getHome(v.ID) == nil && !v.rematerializeable() && !v.OnWasmStack s.values[v.ID].isArg = v.Op == OpArg if f.pass.debug > stackDebug && s.values[v.ID].needSlot { fmt.Printf("%s needs a stack slot\n", v) } if v.Op == OpStoreReg { s.values[v.Args[0].ID].spill = v } } } // Compute liveness info for values needing a slot. s.computeLive(spillLive) // Build interference graph among values needing a slot. s.buildInterferenceGraph() } func (s *stackAllocState) stackalloc() { f := s.f // Build map from values to their names, if any. // A value may be associated with more than one name (e.g. after // the assignment i=j). This step picks one name per value arbitrarily. if n := f.NumValues(); cap(s.names) >= n { s.names = s.names[:n] } else { s.names = make([]LocalSlot, n) } names := s.names for _, name := range f.Names { // Note: not "range f.NamedValues" above, because // that would be nondeterministic. for _, v := range f.NamedValues[name] { names[v.ID] = name } } // Allocate args to their assigned locations. for _, v := range f.Entry.Values { if v.Op != OpArg { continue } loc := LocalSlot{N: v.Aux.(GCNode), Type: v.Type, Off: v.AuxInt} if f.pass.debug > stackDebug { fmt.Printf("stackalloc %s to %s\n", v, loc) } f.setHome(v, loc) } // For each type, we keep track of all the stack slots we // have allocated for that type. // TODO: share slots among equivalent types. We would need to // only share among types with the same GC signature. See the // type.Equal calls below for where this matters. locations := map[*types.Type][]LocalSlot{} // Each time we assign a stack slot to a value v, we remember // the slot we used via an index into locations[v.Type]. slots := s.slots if n := f.NumValues(); cap(slots) >= n { slots = slots[:n] } else { slots = make([]int, n) s.slots = slots } for i := range slots { slots[i] = -1 } // Pick a stack slot for each value needing one. var used []bool if n := f.NumValues(); cap(s.used) >= n { used = s.used[:n] } else { used = make([]bool, n) s.used = used } for _, b := range f.Blocks { for _, v := range b.Values { if !s.values[v.ID].needSlot { s.nNotNeed++ continue } if v.Op == OpArg { s.nArgSlot++ continue // already picked } // If this is a named value, try to use the name as // the spill location. var name LocalSlot if v.Op == OpStoreReg { name = names[v.Args[0].ID] } else { name = names[v.ID] } if name.N != nil && v.Type.Compare(name.Type) == types.CMPeq { for _, id := range s.interfere[v.ID] { h := f.getHome(id) if h != nil && h.(LocalSlot).N == name.N && h.(LocalSlot).Off == name.Off { // A variable can interfere with itself. // It is rare, but it can happen. s.nSelfInterfere++ goto noname } } if f.pass.debug > stackDebug { fmt.Printf("stackalloc %s to %s\n", v, name) } s.nNamedSlot++ f.setHome(v, name) continue } noname: // Set of stack slots we could reuse. locs := locations[v.Type] // Mark all positions in locs used by interfering values. for i := 0; i < len(locs); i++ { used[i] = false } for _, xid := range s.interfere[v.ID] { slot := slots[xid] if slot >= 0 { used[slot] = true } } // Find an unused stack slot. var i int for i = 0; i < len(locs); i++ { if !used[i] { s.nReuse++ break } } // If there is no unused stack slot, allocate a new one. if i == len(locs) { s.nAuto++ locs = append(locs, LocalSlot{N: f.fe.Auto(v.Pos, v.Type), Type: v.Type, Off: 0}) locations[v.Type] = locs } // Use the stack variable at that index for v. loc := locs[i] if f.pass.debug > stackDebug { fmt.Printf("stackalloc %s to %s\n", v, loc) } f.setHome(v, loc) slots[v.ID] = i } } } // computeLive computes a map from block ID to a list of // stack-slot-needing value IDs live at the end of that block. // TODO: this could be quadratic if lots of variables are live across lots of // basic blocks. Figure out a way to make this function (or, more precisely, the user // of this function) require only linear size & time. func (s *stackAllocState) computeLive(spillLive [][]ID) { s.live = make([][]ID, s.f.NumBlocks()) var phis []*Value live := s.f.newSparseSet(s.f.NumValues()) defer s.f.retSparseSet(live) t := s.f.newSparseSet(s.f.NumValues()) defer s.f.retSparseSet(t) // Instead of iterating over f.Blocks, iterate over their postordering. // Liveness information flows backward, so starting at the end // increases the probability that we will stabilize quickly. po := s.f.postorder() for { changed := false for _, b := range po { // Start with known live values at the end of the block live.clear() live.addAll(s.live[b.ID]) // Propagate backwards to the start of the block phis = phis[:0] for i := len(b.Values) - 1; i >= 0; i-- { v := b.Values[i] live.remove(v.ID) if v.Op == OpPhi { // Save phi for later. // Note: its args might need a stack slot even though // the phi itself doesn't. So don't use needSlot. if !v.Type.IsMemory() && !v.Type.IsVoid() { phis = append(phis, v) } continue } for _, a := range v.Args { if s.values[a.ID].needSlot { live.add(a.ID) } } } // for each predecessor of b, expand its list of live-at-end values // invariant: s contains the values live at the start of b (excluding phi inputs) for i, e := range b.Preds { p := e.b t.clear() t.addAll(s.live[p.ID]) t.addAll(live.contents()) t.addAll(spillLive[p.ID]) for _, v := range phis { a := v.Args[i] if s.values[a.ID].needSlot { t.add(a.ID) } if spill := s.values[a.ID].spill; spill != nil { //TODO: remove? Subsumed by SpillUse? t.add(spill.ID) } } if t.size() == len(s.live[p.ID]) { continue } // grow p's live set s.live[p.ID] = append(s.live[p.ID][:0], t.contents()...) changed = true } } if !changed { break } } if s.f.pass.debug > stackDebug { for _, b := range s.f.Blocks { fmt.Printf("stacklive %s %v\n", b, s.live[b.ID]) } } } func (f *Func) getHome(vid ID) Location { if int(vid) >= len(f.RegAlloc) { return nil } return f.RegAlloc[vid] } func (f *Func) setHome(v *Value, loc Location) { for v.ID >= ID(len(f.RegAlloc)) { f.RegAlloc = append(f.RegAlloc, nil) } f.RegAlloc[v.ID] = loc } func (s *stackAllocState) buildInterferenceGraph() { f := s.f if n := f.NumValues(); cap(s.interfere) >= n { s.interfere = s.interfere[:n] } else { s.interfere = make([][]ID, n) } live := f.newSparseSet(f.NumValues()) defer f.retSparseSet(live) for _, b := range f.Blocks { // Propagate liveness backwards to the start of the block. // Two values interfere if one is defined while the other is live. live.clear() live.addAll(s.live[b.ID]) for i := len(b.Values) - 1; i >= 0; i-- { v := b.Values[i] if s.values[v.ID].needSlot { live.remove(v.ID) for _, id := range live.contents() { // Note: args can have different types and still interfere // (with each other or with other values). See issue 23522. if s.values[v.ID].typ.Compare(s.values[id].typ) == types.CMPeq || v.Op == OpArg || s.values[id].isArg { s.interfere[v.ID] = append(s.interfere[v.ID], id) s.interfere[id] = append(s.interfere[id], v.ID) } } } for _, a := range v.Args { if s.values[a.ID].needSlot { live.add(a.ID) } } if v.Op == OpArg && s.values[v.ID].needSlot { // OpArg is an input argument which is pre-spilled. // We add back v.ID here because we want this value // to appear live even before this point. Being live // all the way to the start of the entry block prevents other // values from being allocated to the same slot and clobbering // the input value before we have a chance to load it. live.add(v.ID) } } } if f.pass.debug > stackDebug { for vid, i := range s.interfere { if len(i) > 0 { fmt.Printf("v%d interferes with", vid) for _, x := range i { fmt.Printf(" v%d", x) } fmt.Println() } } } }
Close