-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathprefetch.go
87 lines (80 loc) · 1.68 KB
/
prefetch.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
package lazy
import (
"runtime"
"sync"
"sync/atomic"
)
const PrefetchFactor = 2
type Prefetch func(int, func()Stream)Stream
type prefetch struct{
width int
mu sync.Mutex
ring []struct{val interface{}; i int64}
stop, eos int32
}
func (p *prefetch) Get(worker int, open func()Stream) Stream {
// this function is called once for every drain worker
if p.width < 2 {
return open()
}
// concurrent stream processing with p.width workers
p.mu.Lock()
if p.ring == nil {
p.ring = make([]struct{val interface{}; i int64},p.width*p.width*PrefetchFactor)
go func(stream Stream) {
j := 0
for atomic.LoadInt32(&p.stop) == 0 {
v,_ := stream(true)
t := 0
for atomic.LoadInt32(&p.stop) == 0 {
k := j % len(p.ring)
if atomic.LoadInt64(&p.ring[k].i) == 0 {
p.ring[k].val = v
atomic.StoreInt64(&p.ring[k].i, int64(j+1))
j++
break
} else {
t++
if t == p.width {
runtime.Gosched()
t = 0
}
}
}
if _, ok := v.(EndOfStream); ok {
atomic.StoreInt32(&p.eos,1)
break
}
}
stream(false)
}(open())
}
p.mu.Unlock()
return p.substream(worker)
}
func (p *prefetch) substream(worker int) Stream {
i := worker
return func(next bool)(v interface{}, j int){
if !next {
atomic.StoreInt32(&p.stop,1)
return EoS, 0
}
for {
k := i % len(p.ring)
if int64(i+1) == atomic.LoadInt64(&p.ring[k].i) {
v, j = p.ring[k].val, i
i += p.width
atomic.StoreInt64(&p.ring[k].i,0)
return
}
if atomic.LoadInt32(&p.eos) != 0 {
return EoS, i
}
// switch to another goroutine
runtime.Gosched()
}
}
}
func NoPrefetch(_ int,open func()Stream)Stream {
return open()
}