mirror of
https://github.com/junegunn/fzf.git
synced 2026-02-27 20:12:35 +08:00
All input lines now enter the chunklist with sequential indices, and header lines are excluded from matching via Pattern.startIndex and PassMerger offset. This allows the number of header lines to be changed at runtime with change-header-lines(N), transform-header-lines, and bg-transform-header-lines actions. - Remove EvtHeader event; header items are read directly from chunks - Add startIndex to Pattern and PassMerger for skipping header items - Add targetIndex field to Terminal for cursor repositioning across header-lines changes Close #4659
275 lines
6.2 KiB
Go
275 lines
6.2 KiB
Go
package fzf
|
|
|
|
import (
|
|
"fmt"
|
|
"runtime"
|
|
"sort"
|
|
"sync"
|
|
"time"
|
|
|
|
"github.com/junegunn/fzf/src/util"
|
|
)
|
|
|
|
// MatchRequest represents a search request
|
|
type MatchRequest struct {
|
|
chunks []*Chunk
|
|
pattern *Pattern
|
|
final bool
|
|
sort bool
|
|
revision revision
|
|
}
|
|
|
|
type MatchResult struct {
|
|
merger *Merger
|
|
passMerger *Merger
|
|
cancelled bool
|
|
}
|
|
|
|
func (mr MatchResult) cacheable() bool {
|
|
return mr.merger != nil && mr.merger.cacheable()
|
|
}
|
|
|
|
func (mr MatchResult) final() bool {
|
|
return mr.merger != nil && mr.merger.final
|
|
}
|
|
|
|
// Matcher is responsible for performing search
|
|
type Matcher struct {
|
|
cache *ChunkCache
|
|
patternBuilder func([]rune) *Pattern
|
|
sort bool
|
|
tac bool
|
|
eventBox *util.EventBox
|
|
reqBox *util.EventBox
|
|
partitions int
|
|
slab []*util.Slab
|
|
mergerCache map[string]MatchResult
|
|
revision revision
|
|
}
|
|
|
|
const (
|
|
reqRetry util.EventType = iota
|
|
reqReset
|
|
)
|
|
|
|
// NewMatcher returns a new Matcher
|
|
func NewMatcher(cache *ChunkCache, patternBuilder func([]rune) *Pattern,
|
|
sort bool, tac bool, eventBox *util.EventBox, revision revision) *Matcher {
|
|
partitions := min(numPartitionsMultiplier*runtime.NumCPU(), maxPartitions)
|
|
return &Matcher{
|
|
cache: cache,
|
|
patternBuilder: patternBuilder,
|
|
sort: sort,
|
|
tac: tac,
|
|
eventBox: eventBox,
|
|
reqBox: util.NewEventBox(),
|
|
partitions: partitions,
|
|
slab: make([]*util.Slab, partitions),
|
|
mergerCache: make(map[string]MatchResult),
|
|
revision: revision}
|
|
}
|
|
|
|
// Loop puts Matcher in action
|
|
func (m *Matcher) Loop() {
|
|
prevCount := 0
|
|
|
|
for {
|
|
var request MatchRequest
|
|
|
|
stop := false
|
|
m.reqBox.Wait(func(events *util.Events) {
|
|
for t, val := range *events {
|
|
if t == reqQuit {
|
|
stop = true
|
|
return
|
|
}
|
|
switch val := val.(type) {
|
|
case MatchRequest:
|
|
request = val
|
|
default:
|
|
panic(fmt.Sprintf("Unexpected type: %T", val))
|
|
}
|
|
}
|
|
events.Clear()
|
|
})
|
|
if stop {
|
|
break
|
|
}
|
|
|
|
cacheCleared := false
|
|
if request.sort != m.sort || request.revision != m.revision {
|
|
m.sort = request.sort
|
|
m.mergerCache = make(map[string]MatchResult)
|
|
if !request.revision.compatible(m.revision) {
|
|
m.cache.Clear()
|
|
}
|
|
m.revision = request.revision
|
|
cacheCleared = true
|
|
}
|
|
|
|
// Restart search
|
|
patternString := request.pattern.AsString()
|
|
var result MatchResult
|
|
count := CountItems(request.chunks)
|
|
|
|
if !cacheCleared {
|
|
if count == prevCount {
|
|
// Look up mergerCache
|
|
if cached, found := m.mergerCache[patternString]; found && cached.final() == request.final {
|
|
result = cached
|
|
}
|
|
} else {
|
|
// Invalidate mergerCache
|
|
prevCount = count
|
|
m.mergerCache = make(map[string]MatchResult)
|
|
}
|
|
}
|
|
|
|
if result.merger == nil {
|
|
result = m.scan(request)
|
|
}
|
|
|
|
if !result.cancelled {
|
|
if result.cacheable() {
|
|
m.mergerCache[patternString] = result
|
|
}
|
|
result.merger.final = request.final
|
|
m.eventBox.Set(EvtSearchFin, result)
|
|
}
|
|
}
|
|
}
|
|
|
|
func (m *Matcher) sliceChunks(chunks []*Chunk) [][]*Chunk {
|
|
partitions := m.partitions
|
|
perSlice := len(chunks) / partitions
|
|
|
|
if perSlice == 0 {
|
|
partitions = len(chunks)
|
|
perSlice = 1
|
|
}
|
|
|
|
slices := make([][]*Chunk, partitions)
|
|
for i := 0; i < partitions; i++ {
|
|
start := i * perSlice
|
|
end := start + perSlice
|
|
if i == partitions-1 {
|
|
end = len(chunks)
|
|
}
|
|
slices[i] = chunks[start:end]
|
|
}
|
|
return slices
|
|
}
|
|
|
|
type partialResult struct {
|
|
index int
|
|
matches []Result
|
|
}
|
|
|
|
func (m *Matcher) scan(request MatchRequest) MatchResult {
|
|
startedAt := time.Now()
|
|
|
|
numChunks := len(request.chunks)
|
|
if numChunks == 0 {
|
|
m := EmptyMerger(request.revision)
|
|
return MatchResult{m, m, false}
|
|
}
|
|
pattern := request.pattern
|
|
passMerger := PassMerger(&request.chunks, m.tac, request.revision, pattern.startIndex)
|
|
if pattern.IsEmpty() {
|
|
return MatchResult{passMerger, passMerger, false}
|
|
}
|
|
|
|
minIndex := request.chunks[0].items[0].Index()
|
|
maxIndex := request.chunks[numChunks-1].lastIndex(minIndex)
|
|
cancelled := util.NewAtomicBool(false)
|
|
|
|
slices := m.sliceChunks(request.chunks)
|
|
numSlices := len(slices)
|
|
resultChan := make(chan partialResult, numSlices)
|
|
countChan := make(chan int, numChunks)
|
|
waitGroup := sync.WaitGroup{}
|
|
|
|
for idx, chunks := range slices {
|
|
waitGroup.Add(1)
|
|
if m.slab[idx] == nil {
|
|
m.slab[idx] = util.MakeSlab(slab16Size, slab32Size)
|
|
}
|
|
go func(idx int, slab *util.Slab, chunks []*Chunk) {
|
|
defer func() { waitGroup.Done() }()
|
|
count := 0
|
|
allMatches := make([][]Result, len(chunks))
|
|
for idx, chunk := range chunks {
|
|
matches := request.pattern.Match(chunk, slab)
|
|
allMatches[idx] = matches
|
|
count += len(matches)
|
|
if cancelled.Get() {
|
|
return
|
|
}
|
|
countChan <- len(matches)
|
|
}
|
|
sliceMatches := make([]Result, 0, count)
|
|
for _, matches := range allMatches {
|
|
sliceMatches = append(sliceMatches, matches...)
|
|
}
|
|
if m.sort && request.pattern.sortable {
|
|
if m.tac {
|
|
sort.Sort(ByRelevanceTac(sliceMatches))
|
|
} else {
|
|
sort.Sort(ByRelevance(sliceMatches))
|
|
}
|
|
}
|
|
resultChan <- partialResult{idx, sliceMatches}
|
|
}(idx, m.slab[idx], chunks)
|
|
}
|
|
|
|
wait := func() bool {
|
|
cancelled.Set(true)
|
|
waitGroup.Wait()
|
|
return true
|
|
}
|
|
|
|
count := 0
|
|
matchCount := 0
|
|
for matchesInChunk := range countChan {
|
|
count++
|
|
matchCount += matchesInChunk
|
|
|
|
if count == numChunks {
|
|
break
|
|
}
|
|
|
|
if m.reqBox.Peek(reqReset) {
|
|
return MatchResult{nil, nil, wait()}
|
|
}
|
|
|
|
if time.Since(startedAt) > progressMinDuration {
|
|
m.eventBox.Set(EvtSearchProgress, float32(count)/float32(numChunks))
|
|
}
|
|
}
|
|
|
|
partialResults := make([][]Result, numSlices)
|
|
for range slices {
|
|
partialResult := <-resultChan
|
|
partialResults[partialResult.index] = partialResult.matches
|
|
}
|
|
merger := NewMerger(pattern, partialResults, m.sort && request.pattern.sortable, m.tac, request.revision, minIndex, maxIndex)
|
|
return MatchResult{merger, passMerger, false}
|
|
}
|
|
|
|
// Reset is called to interrupt/signal the ongoing search
|
|
func (m *Matcher) Reset(chunks []*Chunk, patternRunes []rune, cancel bool, final bool, sort bool, revision revision) {
|
|
pattern := m.patternBuilder(patternRunes)
|
|
|
|
var event util.EventType
|
|
if cancel {
|
|
event = reqReset
|
|
} else {
|
|
event = reqRetry
|
|
}
|
|
m.reqBox.Set(event, MatchRequest{chunks, pattern, final, sort, revision})
|
|
}
|
|
|
|
func (m *Matcher) Stop() {
|
|
m.reqBox.Set(reqQuit, nil)
|
|
}
|