mirror of
https://github.com/junegunn/fzf.git
synced 2025-12-09 06:04:50 +08:00
Compare commits
96 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e89eebb7ba | ||
|
|
fee404399a | ||
|
|
6b4805ca1a | ||
|
|
159699b5d7 | ||
|
|
af809c9661 | ||
|
|
329de8f416 | ||
|
|
e825b07e85 | ||
|
|
71fdb99a07 | ||
|
|
55ee4186aa | ||
|
|
941b0a0ff7 | ||
|
|
6aae12288e | ||
|
|
302cc552ef | ||
|
|
a2a4df0886 | ||
|
|
3399e39968 | ||
|
|
87874bba88 | ||
|
|
c304fc4333 | ||
|
|
6977cf268f | ||
|
|
931c78a70c | ||
|
|
8d23646fe6 | ||
|
|
656963e018 | ||
|
|
644277faf1 | ||
|
|
0558dfee79 | ||
|
|
487c8fe88f | ||
|
|
0d171ba1d8 | ||
|
|
2069bbc8b5 | ||
|
|
053d628b53 | ||
|
|
6bc592e6c9 | ||
|
|
6c76d8cd1c | ||
|
|
a09e411936 | ||
|
|
02a7b96f33 | ||
|
|
e55e029ae8 | ||
|
|
6b18b144cf | ||
|
|
6d53089cc1 | ||
|
|
e85a8a68d0 | ||
|
|
dc55e68524 | ||
|
|
462c68b625 | ||
|
|
999d374f0c | ||
|
|
b208aa675e | ||
|
|
2b98fee136 | ||
|
|
e5e75efebc | ||
|
|
4a4fef2daf | ||
|
|
ecb6b234cc | ||
|
|
39dbc8acdb | ||
|
|
a56489bc7f | ||
|
|
99927c7071 | ||
|
|
3e28403978 | ||
|
|
37370f057f | ||
|
|
f4b46fad27 | ||
|
|
9d2c6a95f4 | ||
|
|
376a76d1d3 | ||
|
|
1fcc07e54e | ||
|
|
8db3345c2f | ||
|
|
69aa2fea68 | ||
|
|
298749bfcd | ||
|
|
f1f31baae1 | ||
|
|
e1c8f19e8f | ||
|
|
5e302c70e9 | ||
|
|
4c5a679066 | ||
|
|
41f0b2c354 | ||
|
|
a0a3c349c9 | ||
|
|
bc3983181d | ||
|
|
980b58ef5a | ||
|
|
a2604c0963 | ||
|
|
6dbc108da2 | ||
|
|
bd98f988f0 | ||
|
|
06301c7847 | ||
|
|
18a1aeaa91 | ||
|
|
c9f16b6430 | ||
|
|
bc9d2abdb6 | ||
|
|
28810c178f | ||
|
|
a9e64efe45 | ||
|
|
6b5886c034 | ||
|
|
7727ad43af | ||
|
|
bbe10f4f77 | ||
|
|
5e72709613 | ||
|
|
9e85cba0d0 | ||
|
|
4b59ced08f | ||
|
|
8dbdd55730 | ||
|
|
6725151a99 | ||
|
|
d4f3d5a164 | ||
|
|
7b5ccc45bc | ||
|
|
940214a1a2 | ||
|
|
68bd410159 | ||
|
|
b13fcfd831 | ||
|
|
07ef2b051c | ||
|
|
3fc795340d | ||
|
|
70cfa6af13 | ||
|
|
dbcaec59ae | ||
|
|
faedae708e | ||
|
|
0c66521b23 | ||
|
|
bf92862459 | ||
|
|
1a68698d76 | ||
|
|
842a73357c | ||
|
|
5efdeccdbb | ||
|
|
050777b8c4 | ||
|
|
a4d78e2200 |
1
.gitignore
vendored
1
.gitignore
vendored
@@ -5,3 +5,4 @@ Gemfile.lock
|
|||||||
.DS_Store
|
.DS_Store
|
||||||
doc/tags
|
doc/tags
|
||||||
vendor
|
vendor
|
||||||
|
gopath
|
||||||
|
|||||||
@@ -1,19 +1,18 @@
|
|||||||
language: ruby
|
language: ruby
|
||||||
|
dist: trusty
|
||||||
|
sudo: required
|
||||||
matrix:
|
matrix:
|
||||||
include:
|
include:
|
||||||
- env: TAGS=
|
- env: TAGS=
|
||||||
rvm: 2.3.3
|
rvm: 2.3.3
|
||||||
# - env: TAGS=tcell
|
# - env: TAGS=tcell
|
||||||
# rvm: 2.2.0
|
# rvm: 2.3.3
|
||||||
|
|
||||||
install:
|
install:
|
||||||
- sudo apt-get update
|
|
||||||
- sudo apt-get install -y libncurses-dev lib32ncurses5-dev libgpm-dev
|
|
||||||
- sudo add-apt-repository -y ppa:pi-rho/dev
|
- sudo add-apt-repository -y ppa:pi-rho/dev
|
||||||
- sudo apt-add-repository -y ppa:fish-shell/release-2
|
- sudo apt-add-repository -y ppa:fish-shell/release-2
|
||||||
- sudo apt-get update
|
- sudo apt-get update
|
||||||
- sudo apt-get install -y tmux=1.9a-1~ppa1~p
|
- sudo apt-get install -y tmux zsh fish
|
||||||
- sudo apt-get install -y zsh fish
|
|
||||||
|
|
||||||
script: |
|
script: |
|
||||||
make test install &&
|
make test install &&
|
||||||
|
|||||||
33
CHANGELOG.md
33
CHANGELOG.md
@@ -1,13 +1,42 @@
|
|||||||
CHANGELOG
|
CHANGELOG
|
||||||
=========
|
=========
|
||||||
|
|
||||||
|
0.17.0
|
||||||
|
------
|
||||||
|
- Performance optimization
|
||||||
|
- One can match literal spaces in extended-search mode with a space prepended
|
||||||
|
by a backslash.
|
||||||
|
- `--expect` is now additive and can be specified multiple times.
|
||||||
|
|
||||||
|
0.16.11
|
||||||
|
-------
|
||||||
|
- Performance optimization
|
||||||
|
- Fixed missing preview update
|
||||||
|
|
||||||
|
0.16.10
|
||||||
|
-------
|
||||||
|
- Fixed invalid handling of ANSI colors in preview window
|
||||||
|
- Further improved `--ansi` performance
|
||||||
|
|
||||||
|
0.16.9
|
||||||
|
------
|
||||||
|
- Memory and performance optimization
|
||||||
|
- Around 20% performance improvement for general use cases
|
||||||
|
- Up to 5x faster processing of `--ansi`
|
||||||
|
- Up to 50% reduction of memory usage
|
||||||
|
- Bug fixes and usability improvements
|
||||||
|
- Fixed handling of bracketed paste mode
|
||||||
|
- [ERROR] on info line when the default command failed
|
||||||
|
- More efficient rendering of preview window
|
||||||
|
- `--no-clear` updated for repetitive relaunching scenarios
|
||||||
|
|
||||||
0.16.8
|
0.16.8
|
||||||
------
|
------
|
||||||
- New `change` event and `top` action for `--bind`
|
- New `change` event and `top` action for `--bind`
|
||||||
- `fzf --bind change:top`
|
- `fzf --bind change:top`
|
||||||
- Move cursor to the top result whenever the query string is changed
|
- Move cursor to the top result whenever the query string is changed
|
||||||
- `fzf --bind ctrl-u:unix-word-rubout+top`
|
- `fzf --bind 'ctrl-w:unix-word-rubout+top,ctrl-u:unix-line-discard+top'`
|
||||||
- `top` combined with `unix-word-rubout`
|
- `top` combined with `unix-word-rubout` and `unix-line-discard`
|
||||||
- Fixed inconsistent tiebreak scores when `--nth` is used
|
- Fixed inconsistent tiebreak scores when `--nth` is used
|
||||||
- Proper display of tab characters in `--prompt`
|
- Proper display of tab characters in `--prompt`
|
||||||
- Fixed not to `--cycle` on page-up/page-down to prevent overshoot
|
- Fixed not to `--cycle` on page-up/page-down to prevent overshoot
|
||||||
|
|||||||
@@ -55,6 +55,19 @@ let g:fzf_action = {
|
|||||||
\ 'ctrl-x': 'split',
|
\ 'ctrl-x': 'split',
|
||||||
\ 'ctrl-v': 'vsplit' }
|
\ 'ctrl-v': 'vsplit' }
|
||||||
|
|
||||||
|
" An action can be a reference to a function that processes selected lines
|
||||||
|
function! s:build_quickfix_list(lines)
|
||||||
|
call setqflist(map(copy(a:lines), '{ "filename": v:val }'))
|
||||||
|
copen
|
||||||
|
cc
|
||||||
|
endfunction
|
||||||
|
|
||||||
|
let g:fzf_action = {
|
||||||
|
\ 'ctrl-q': function('s:build_quickfix_list'),
|
||||||
|
\ 'ctrl-t': 'tab split',
|
||||||
|
\ 'ctrl-x': 'split',
|
||||||
|
\ 'ctrl-v': 'vsplit' }
|
||||||
|
|
||||||
" Default fzf layout
|
" Default fzf layout
|
||||||
" - down / up / left / right
|
" - down / up / left / right
|
||||||
let g:fzf_layout = { 'down': '~40%' }
|
let g:fzf_layout = { 'down': '~40%' }
|
||||||
|
|||||||
114
README.md
114
README.md
@@ -3,15 +3,19 @@
|
|||||||
|
|
||||||
fzf is a general-purpose command-line fuzzy finder.
|
fzf is a general-purpose command-line fuzzy finder.
|
||||||
|
|
||||||

|
<img src="https://raw.githubusercontent.com/junegunn/i/master/fzf-preview.png" width=640>
|
||||||
|
|
||||||
|
It's an interactive Unix filter for command-line that can be used with any
|
||||||
|
list; files, command history, processes, hostnames, bookmarks, git commits,
|
||||||
|
etc.
|
||||||
|
|
||||||
Pros
|
Pros
|
||||||
----
|
----
|
||||||
|
|
||||||
- No dependencies
|
- Portable, no dependencies
|
||||||
- Blazingly fast
|
- Blazingly fast
|
||||||
- The most comprehensive feature set
|
- The most comprehensive feature set
|
||||||
- Flexible layout using tmux panes
|
- Flexible layout
|
||||||
- Batteries included
|
- Batteries included
|
||||||
- Vim/Neovim plugin, key bindings and fuzzy auto-completion
|
- Vim/Neovim plugin, key bindings and fuzzy auto-completion
|
||||||
|
|
||||||
@@ -20,7 +24,7 @@ Table of Contents
|
|||||||
|
|
||||||
* [Installation](#installation)
|
* [Installation](#installation)
|
||||||
* [Using git](#using-git)
|
* [Using git](#using-git)
|
||||||
* [Using Homebrew](#using-homebrew)
|
* [Using Homebrew or Linuxbrew](#using-homebrew-or-linuxbrew)
|
||||||
* [As Vim plugin](#as-vim-plugin)
|
* [As Vim plugin](#as-vim-plugin)
|
||||||
* [Windows](#windows)
|
* [Windows](#windows)
|
||||||
* [Upgrading fzf](#upgrading-fzf)
|
* [Upgrading fzf](#upgrading-fzf)
|
||||||
@@ -42,6 +46,10 @@ Table of Contents
|
|||||||
* [Settings](#settings)
|
* [Settings](#settings)
|
||||||
* [Supported commands](#supported-commands)
|
* [Supported commands](#supported-commands)
|
||||||
* [Vim plugin](#vim-plugin)
|
* [Vim plugin](#vim-plugin)
|
||||||
|
* [Advanced topics](#advanced-topics)
|
||||||
|
* [Performance](#performance)
|
||||||
|
* [Executing external programs](#executing-external-programs)
|
||||||
|
* [Preview window](#preview-window)
|
||||||
* [Tips](#tips)
|
* [Tips](#tips)
|
||||||
* [Respecting .gitignore, <code>.hgignore</code>, and <code>svn:ignore</code>](#respecting-gitignore-hgignore-and-svnignore)
|
* [Respecting .gitignore, <code>.hgignore</code>, and <code>svn:ignore</code>](#respecting-gitignore-hgignore-and-svnignore)
|
||||||
* [git ls-tree for fast traversal](#git-ls-tree-for-fast-traversal)
|
* [git ls-tree for fast traversal](#git-ls-tree-for-fast-traversal)
|
||||||
@@ -75,9 +83,10 @@ git clone --depth 1 https://github.com/junegunn/fzf.git ~/.fzf
|
|||||||
~/.fzf/install
|
~/.fzf/install
|
||||||
```
|
```
|
||||||
|
|
||||||
### Using Homebrew
|
### Using Homebrew or Linuxbrew
|
||||||
|
|
||||||
On OS X, you can use [Homebrew](http://brew.sh/) to install fzf.
|
Alternatively, you can use [Homebrew](http://brew.sh/) or
|
||||||
|
[Linuxbrew](http://linuxbrew.sh/) to install fzf.
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
brew install fzf
|
brew install fzf
|
||||||
@@ -383,13 +392,94 @@ Vim plugin
|
|||||||
|
|
||||||
See [README-VIM.md](README-VIM.md).
|
See [README-VIM.md](README-VIM.md).
|
||||||
|
|
||||||
|
Advanced topics
|
||||||
|
---------------
|
||||||
|
|
||||||
|
### Performance
|
||||||
|
|
||||||
|
fzf is fast, and is [getting even faster][perf]. Performance should not be
|
||||||
|
a problem in most use cases. However, you might want to be aware of the
|
||||||
|
options that affect the performance.
|
||||||
|
|
||||||
|
- `--ansi` tells fzf to extract and parse ANSI color codes in the input and it
|
||||||
|
makes the initial scanning slower. So it's not recommended that you add it
|
||||||
|
to your `$FZF_DEFAULT_OPTS`.
|
||||||
|
- `--nth` makes fzf slower as fzf has to tokenize each line.
|
||||||
|
- `--with-nth` makes fzf slower as fzf has to tokenize and reassemble each
|
||||||
|
line.
|
||||||
|
- If you absolutely need better performance, you can consider using
|
||||||
|
`--algo=v1` (the default being `v2`) to make fzf use faster greedy
|
||||||
|
algorithm. However, this algorithm is not guaranteed to find the optimal
|
||||||
|
ordering of the matches and is not recommended.
|
||||||
|
|
||||||
|
[perf]: https://junegunn.kr/images/fzf-0.16.11.png
|
||||||
|
|
||||||
|
### Executing external programs
|
||||||
|
|
||||||
|
You can set up key bindings for starting external processes without leaving
|
||||||
|
fzf (`execute`, `execute-silent`).
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Press F1 to open the file with less without leaving fzf
|
||||||
|
# Press CTRL-Y to copy the line to clipboard and aborts fzf (requires pbcopy)
|
||||||
|
fzf --bind 'f1:execute(less -f {}),ctrl-y:execute-silent(echo {} | pbcopy)+abort'
|
||||||
|
```
|
||||||
|
|
||||||
|
See *KEY BINDINGS* section of the man page for details.
|
||||||
|
|
||||||
|
### Preview window
|
||||||
|
|
||||||
|
When `--preview` option is set, fzf automatically starts external process with
|
||||||
|
the current line as the argument and shows the result in the split window.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# {} is replaced to the single-quoted string of the focused line
|
||||||
|
fzf --preview 'cat {}'
|
||||||
|
```
|
||||||
|
|
||||||
|
Since preview window is updated only after the process is complete, it's
|
||||||
|
important that the command finishes quickly.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Use head instead of cat so that the command doesn't take too long to finish
|
||||||
|
fzf --preview 'head -100 {}'
|
||||||
|
```
|
||||||
|
|
||||||
|
Preview window supports ANSI colors, so you can use programs that
|
||||||
|
syntax-highlights the content of a file.
|
||||||
|
|
||||||
|
- Highlight: http://www.andre-simon.de/doku/highlight/en/highlight.php
|
||||||
|
- CodeRay: http://coderay.rubychan.de/
|
||||||
|
- Rouge: https://github.com/jneen/rouge
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Try highlight, coderay, rougify in turn, then fall back to cat
|
||||||
|
fzf --preview '[[ $(file --mime {}) =~ binary ]] &&
|
||||||
|
echo {} is a binary file ||
|
||||||
|
(highlight -O ansi -l {} ||
|
||||||
|
coderay {} ||
|
||||||
|
rougify {} ||
|
||||||
|
cat {}) 2> /dev/null | head -500'
|
||||||
|
```
|
||||||
|
|
||||||
|
You can customize the size and position of the preview window using
|
||||||
|
`--preview-window` option. For example,
|
||||||
|
|
||||||
|
```bash
|
||||||
|
fzf --height 40% --reverse --preview 'file {}' --preview-window down:1
|
||||||
|
```
|
||||||
|
|
||||||
|
For more advanced examples, see [Key bindings for git with fzf][fzf-git].
|
||||||
|
|
||||||
|
[fzf-git]: https://junegunn.kr/2016/07/fzf-git/
|
||||||
|
|
||||||
Tips
|
Tips
|
||||||
----
|
----
|
||||||
|
|
||||||
#### Respecting `.gitignore`, `.hgignore`, and `svn:ignore`
|
#### Respecting `.gitignore`, `.hgignore`, and `svn:ignore`
|
||||||
|
|
||||||
[ag](https://github.com/ggreer/the_silver_searcher) or
|
[ag](https://github.com/ggreer/the_silver_searcher) or
|
||||||
[pt](https://github.com/monochromegane/the_platinum_searcher) will do the
|
[rg](https://github.com/BurntSushi/ripgrep) will do the
|
||||||
filtering:
|
filtering:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
@@ -426,10 +516,10 @@ export FZF_DEFAULT_COMMAND='
|
|||||||
|
|
||||||
#### Fish shell
|
#### Fish shell
|
||||||
|
|
||||||
It's [a known bug of fish](https://github.com/fish-shell/fish-shell/issues/1362)
|
Fish shell before version 2.6.0 [doesn't allow](https://github.com/fish-shell/fish-shell/issues/1362)
|
||||||
(will be fixed in 2.6.0) that it doesn't allow reading from STDIN in command
|
reading from STDIN in command substitution, which means simple `vim (fzf)`
|
||||||
substitution, which means simple `vim (fzf)` won't work as expected. The
|
doesn't work as expected. The workaround for fish 2.5.0 and earlier is to use
|
||||||
workaround is to use the `read` fish command:
|
the `read` fish command:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
fzf | read -l result; and vim $result
|
fzf | read -l result; and vim $result
|
||||||
@@ -457,7 +547,7 @@ make use of this feature. `$dir` defaults to `.` when the last token is not a
|
|||||||
valid directory. Example:
|
valid directory. Example:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
set -l FZF_CTRL_T_COMMAND "command find -L \$dir -type f 2> /dev/null | sed '1d; s#^\./##'"
|
set -g FZF_CTRL_T_COMMAND "command find -L \$dir -type f 2> /dev/null | sed '1d; s#^\./##'"
|
||||||
```
|
```
|
||||||
|
|
||||||
[License](LICENSE)
|
[License](LICENSE)
|
||||||
|
|||||||
14
bin/fzf-tmux
14
bin/fzf-tmux
@@ -146,6 +146,7 @@ cleanup() {
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
if [ $# -gt 0 ]; then
|
if [ $# -gt 0 ]; then
|
||||||
|
trap - EXIT
|
||||||
exit 130
|
exit 130
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
@@ -170,21 +171,22 @@ for arg in "${args[@]}"; do
|
|||||||
done
|
done
|
||||||
|
|
||||||
pppid=$$
|
pppid=$$
|
||||||
trap_set="trap 'kill -SIGUSR1 -$pppid' EXIT SIGINT SIGTERM"
|
echo -n "trap 'kill -SIGUSR1 -$pppid' EXIT SIGINT SIGTERM;" > $argsf
|
||||||
trap_unset="trap - EXIT SIGINT SIGTERM"
|
close="; trap - EXIT SIGINT SIGTERM $close"
|
||||||
|
|
||||||
if [[ -n "$term" ]] || [[ -t 0 ]]; then
|
if [[ -n "$term" ]] || [[ -t 0 ]]; then
|
||||||
cat <<< "\"$fzf\" $opts > $fifo2; echo \$? > $fifo3 $close" > $argsf
|
cat <<< "\"$fzf\" $opts > $fifo2; echo \$? > $fifo3 $close" >> $argsf
|
||||||
|
cat $argsf
|
||||||
TMUX=$(echo $TMUX | cut -d , -f 1,2) tmux set-window-option synchronize-panes off \;\
|
TMUX=$(echo $TMUX | cut -d , -f 1,2) tmux set-window-option synchronize-panes off \;\
|
||||||
set-window-option remain-on-exit off \;\
|
set-window-option remain-on-exit off \;\
|
||||||
split-window $opt "$trap_set;cd $(printf %q "$PWD");$envs bash $argsf;$trap_unset" $swap \
|
split-window $opt "cd $(printf %q "$PWD");$envs bash $argsf" $swap \
|
||||||
> /dev/null 2>&1
|
> /dev/null 2>&1
|
||||||
else
|
else
|
||||||
mkfifo $fifo1
|
mkfifo $fifo1
|
||||||
cat <<< "\"$fzf\" $opts < $fifo1 > $fifo2; echo \$? > $fifo3 $close" > $argsf
|
cat <<< "\"$fzf\" $opts < $fifo1 > $fifo2; echo \$? > $fifo3 $close" >> $argsf
|
||||||
TMUX=$(echo $TMUX | cut -d , -f 1,2) tmux set-window-option synchronize-panes off \;\
|
TMUX=$(echo $TMUX | cut -d , -f 1,2) tmux set-window-option synchronize-panes off \;\
|
||||||
set-window-option remain-on-exit off \;\
|
set-window-option remain-on-exit off \;\
|
||||||
split-window $opt "$trap_set;$envs bash $argsf;$trap_unset" $swap \
|
split-window $opt "$envs bash $argsf" $swap \
|
||||||
> /dev/null 2>&1
|
> /dev/null 2>&1
|
||||||
cat <&0 > $fifo1 &
|
cat <&0 > $fifo1 &
|
||||||
fi
|
fi
|
||||||
|
|||||||
15
doc/fzf.txt
15
doc/fzf.txt
@@ -1,4 +1,4 @@
|
|||||||
fzf.txt fzf Last change: April 28 2017
|
fzf.txt fzf Last change: August 14 2017
|
||||||
FZF - TABLE OF CONTENTS *fzf* *fzf-toc*
|
FZF - TABLE OF CONTENTS *fzf* *fzf-toc*
|
||||||
==============================================================================
|
==============================================================================
|
||||||
|
|
||||||
@@ -80,6 +80,19 @@ Examples~
|
|||||||
\ 'ctrl-x': 'split',
|
\ 'ctrl-x': 'split',
|
||||||
\ 'ctrl-v': 'vsplit' }
|
\ 'ctrl-v': 'vsplit' }
|
||||||
|
|
||||||
|
" An action can be a reference to a function that processes selected lines
|
||||||
|
function! s:build_quickfix_list(lines)
|
||||||
|
call setqflist(map(copy(a:lines), '{ "filename": v:val }'))
|
||||||
|
copen
|
||||||
|
cc
|
||||||
|
endfunction
|
||||||
|
|
||||||
|
let g:fzf_action = {
|
||||||
|
\ 'ctrl-q': function('s:build_quickfix_list'),
|
||||||
|
\ 'ctrl-t': 'tab split',
|
||||||
|
\ 'ctrl-x': 'split',
|
||||||
|
\ 'ctrl-v': 'vsplit' }
|
||||||
|
|
||||||
" Default fzf layout
|
" Default fzf layout
|
||||||
" - down / up / left / right
|
" - down / up / left / right
|
||||||
let g:fzf_layout = { 'down': '~40%' }
|
let g:fzf_layout = { 'down': '~40%' }
|
||||||
|
|||||||
4
install
4
install
@@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
set -u
|
set -u
|
||||||
|
|
||||||
version=0.16.8
|
version=0.17.0
|
||||||
auto_completion=
|
auto_completion=
|
||||||
key_bindings=
|
key_bindings=
|
||||||
update_config=2
|
update_config=2
|
||||||
@@ -171,6 +171,8 @@ case "$archi" in
|
|||||||
OpenBSD\ *64) download fzf-$version-openbsd_${binary_arch:-amd64}.tgz ;;
|
OpenBSD\ *64) download fzf-$version-openbsd_${binary_arch:-amd64}.tgz ;;
|
||||||
OpenBSD\ *86) download fzf-$version-openbsd_${binary_arch:-386}.tgz ;;
|
OpenBSD\ *86) download fzf-$version-openbsd_${binary_arch:-386}.tgz ;;
|
||||||
CYGWIN*\ *64) download fzf-$version-windows_${binary_arch:-amd64}.zip ;;
|
CYGWIN*\ *64) download fzf-$version-windows_${binary_arch:-amd64}.zip ;;
|
||||||
|
MINGW*\ *86) download fzf-$version-windows_${binary_arch:-386}.zip ;;
|
||||||
|
MINGW*\ *64) download fzf-$version-windows_${binary_arch:-amd64}.zip ;;
|
||||||
*) binary_available=0 binary_error=1 ;;
|
*) binary_available=0 binary_error=1 ;;
|
||||||
esac
|
esac
|
||||||
|
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||||
THE SOFTWARE.
|
THE SOFTWARE.
|
||||||
..
|
..
|
||||||
.TH fzf-tmux 1 "Jun 2017" "fzf 0.16.8" "fzf-tmux - open fzf in tmux split pane"
|
.TH fzf-tmux 1 "Aug 2017" "fzf 0.17.0" "fzf-tmux - open fzf in tmux split pane"
|
||||||
|
|
||||||
.SH NAME
|
.SH NAME
|
||||||
fzf-tmux - open fzf in tmux split pane
|
fzf-tmux - open fzf in tmux split pane
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||||
THE SOFTWARE.
|
THE SOFTWARE.
|
||||||
..
|
..
|
||||||
.TH fzf 1 "Jun 2017" "fzf 0.16.8" "fzf - a command-line fuzzy finder"
|
.TH fzf 1 "Aug 2017" "fzf 0.17.0" "fzf - a command-line fuzzy finder"
|
||||||
|
|
||||||
.SH NAME
|
.SH NAME
|
||||||
fzf - a command-line fuzzy finder
|
fzf - a command-line fuzzy finder
|
||||||
@@ -111,6 +111,9 @@ Comma-separated list of sort criteria to apply when the scores are tied.
|
|||||||
.B "-m, --multi"
|
.B "-m, --multi"
|
||||||
Enable multi-select with tab/shift-tab
|
Enable multi-select with tab/shift-tab
|
||||||
.TP
|
.TP
|
||||||
|
.B "+m, --no-multi"
|
||||||
|
Disable multi-select
|
||||||
|
.TP
|
||||||
.B "--no-mouse"
|
.B "--no-mouse"
|
||||||
Disable mouse
|
Disable mouse
|
||||||
.TP
|
.TP
|
||||||
@@ -236,6 +239,7 @@ e.g. \fBfzf --color=bg+:24\fR
|
|||||||
\fBbg+ \fRBackground (current line)
|
\fBbg+ \fRBackground (current line)
|
||||||
\fBhl+ \fRHighlighted substrings (current line)
|
\fBhl+ \fRHighlighted substrings (current line)
|
||||||
\fBinfo \fRInfo
|
\fBinfo \fRInfo
|
||||||
|
\fBborder \fRBorder of the preview window and horizontal separators (\fB--border\fR)
|
||||||
\fBprompt \fRPrompt
|
\fBprompt \fRPrompt
|
||||||
\fBpointer \fRPointer to the current line
|
\fBpointer \fRPointer to the current line
|
||||||
\fBmarker \fRMulti-select marker
|
\fBmarker \fRMulti-select marker
|
||||||
@@ -327,10 +331,12 @@ Comma-separated list of keys that can be used to complete fzf in addition to
|
|||||||
the default enter key. When this option is set, fzf will print the name of the
|
the default enter key. When this option is set, fzf will print the name of the
|
||||||
key pressed as the first line of its output (or as the second line if
|
key pressed as the first line of its output (or as the second line if
|
||||||
\fB--print-query\fR is also used). The line will be empty if fzf is completed
|
\fB--print-query\fR is also used). The line will be empty if fzf is completed
|
||||||
with the default enter key.
|
with the default enter key. If \fB--expect\fR option is specified multiple
|
||||||
|
times, fzf will expect the union of the keys. \fB--no-expect\fR will clear the
|
||||||
|
list.
|
||||||
|
|
||||||
.RS
|
.RS
|
||||||
e.g. \fBfzf --expect=ctrl-v,ctrl-t,alt-s,f1,f2,~,@\fR
|
e.g. \fBfzf --expect=ctrl-v,ctrl-t,alt-s --expect=f1,f2,~,@\fR
|
||||||
.RE
|
.RE
|
||||||
.TP
|
.TP
|
||||||
.B "--read0"
|
.B "--read0"
|
||||||
@@ -356,6 +362,9 @@ e.g. \fBfzf --multi | fzf --sync\fR
|
|||||||
.B "--version"
|
.B "--version"
|
||||||
Display version information and exit
|
Display version information and exit
|
||||||
|
|
||||||
|
.TP
|
||||||
|
Note that most options have the opposite versions with \fB--no-\fR prefix.
|
||||||
|
|
||||||
.SH ENVIRONMENT VARIABLES
|
.SH ENVIRONMENT VARIABLES
|
||||||
.TP
|
.TP
|
||||||
.B FZF_DEFAULT_COMMAND
|
.B FZF_DEFAULT_COMMAND
|
||||||
@@ -403,6 +412,9 @@ Unless specified otherwise, fzf will start in "extended-search mode". In this
|
|||||||
mode, you can specify multiple patterns delimited by spaces, such as: \fB'wild
|
mode, you can specify multiple patterns delimited by spaces, such as: \fB'wild
|
||||||
^music .mp3$ sbtrkt !rmx\fR
|
^music .mp3$ sbtrkt !rmx\fR
|
||||||
|
|
||||||
|
You can prepend a backslash to a space (\fB\\ \fR) to match a literal space
|
||||||
|
character.
|
||||||
|
|
||||||
.SS Exact-match (quoted)
|
.SS Exact-match (quoted)
|
||||||
A term that is prefixed by a single-quote character (\fB'\fR) is interpreted as
|
A term that is prefixed by a single-quote character (\fB'\fR) is interpreted as
|
||||||
an "exact-match" (or "non-fuzzy") term. fzf will search for the exact
|
an "exact-match" (or "non-fuzzy") term. fzf will search for the exact
|
||||||
|
|||||||
@@ -66,8 +66,8 @@ function! s:shellesc_cmd(arg)
|
|||||||
let escaped = substitute(a:arg, '[&|<>()@^]', '^&', 'g')
|
let escaped = substitute(a:arg, '[&|<>()@^]', '^&', 'g')
|
||||||
let escaped = substitute(escaped, '%', '%%', 'g')
|
let escaped = substitute(escaped, '%', '%%', 'g')
|
||||||
let escaped = substitute(escaped, '"', '\\^&', 'g')
|
let escaped = substitute(escaped, '"', '\\^&', 'g')
|
||||||
let escaped = substitute(escaped, '\\\+\(\\^\)', '\\\\\1', 'g')
|
let escaped = substitute(escaped, '\(\\\+\)\(\\^\)', '\1\1\2', 'g')
|
||||||
return '^"'.substitute(escaped, '[^\\]\zs\\$', '\\\\', '').'^"'
|
return '^"'.substitute(escaped, '\(\\\+\)$', '\1\1', '').'^"'
|
||||||
endfunction
|
endfunction
|
||||||
|
|
||||||
function! fzf#shellescape(arg, ...)
|
function! fzf#shellescape(arg, ...)
|
||||||
@@ -149,13 +149,8 @@ function! s:tmux_enabled()
|
|||||||
endfunction
|
endfunction
|
||||||
|
|
||||||
function! s:escape(path)
|
function! s:escape(path)
|
||||||
let escaped_chars = '$%#''"'
|
let path = fnameescape(a:path)
|
||||||
|
return s:is_win ? escape(path, '$') : path
|
||||||
if has('unix')
|
|
||||||
let escaped_chars .= ' \'
|
|
||||||
endif
|
|
||||||
|
|
||||||
return escape(a:path, escaped_chars)
|
|
||||||
endfunction
|
endfunction
|
||||||
|
|
||||||
" Upgrade legacy options
|
" Upgrade legacy options
|
||||||
@@ -206,7 +201,10 @@ function! s:common_sink(action, lines) abort
|
|||||||
return
|
return
|
||||||
endif
|
endif
|
||||||
let key = remove(a:lines, 0)
|
let key = remove(a:lines, 0)
|
||||||
let cmd = get(a:action, key, 'e')
|
let Cmd = get(a:action, key, 'e')
|
||||||
|
if type(Cmd) == type(function('call'))
|
||||||
|
return Cmd(a:lines)
|
||||||
|
endif
|
||||||
if len(a:lines) > 1
|
if len(a:lines) > 1
|
||||||
augroup fzf_swap
|
augroup fzf_swap
|
||||||
autocmd SwapExists * let v:swapchoice='o'
|
autocmd SwapExists * let v:swapchoice='o'
|
||||||
@@ -222,7 +220,7 @@ function! s:common_sink(action, lines) abort
|
|||||||
execute 'e' s:escape(item)
|
execute 'e' s:escape(item)
|
||||||
let empty = 0
|
let empty = 0
|
||||||
else
|
else
|
||||||
call s:open(cmd, item)
|
call s:open(Cmd, item)
|
||||||
endif
|
endif
|
||||||
if !has('patch-8.0.0177') && !has('nvim-0.2') && exists('#BufEnter')
|
if !has('patch-8.0.0177') && !has('nvim-0.2') && exists('#BufEnter')
|
||||||
\ && isdirectory(item)
|
\ && isdirectory(item)
|
||||||
@@ -342,13 +340,6 @@ try
|
|||||||
set shell=sh
|
set shell=sh
|
||||||
endif
|
endif
|
||||||
|
|
||||||
if has('nvim')
|
|
||||||
let running = filter(range(1, bufnr('$')), "bufname(v:val) =~# ';#FZF'")
|
|
||||||
if len(running)
|
|
||||||
call s:warn('FZF is already running (in buffer '.join(running, ', ').')!')
|
|
||||||
return []
|
|
||||||
endif
|
|
||||||
endif
|
|
||||||
let dict = exists('a:1') ? s:upgrade(a:1) : {}
|
let dict = exists('a:1') ? s:upgrade(a:1) : {}
|
||||||
let temps = { 'result': s:fzf_tempname() }
|
let temps = { 'result': s:fzf_tempname() }
|
||||||
let optstr = s:evaluate_opts(get(dict, 'options', ''))
|
let optstr = s:evaluate_opts(get(dict, 'options', ''))
|
||||||
@@ -361,11 +352,14 @@ try
|
|||||||
if has('nvim') && !has_key(dict, 'dir')
|
if has('nvim') && !has_key(dict, 'dir')
|
||||||
let dict.dir = s:fzf_getcwd()
|
let dict.dir = s:fzf_getcwd()
|
||||||
endif
|
endif
|
||||||
|
if has('win32unix') && has_key(dict, 'dir')
|
||||||
|
let dict.dir = fnamemodify(dict.dir, ':p')
|
||||||
|
endif
|
||||||
|
|
||||||
if !has_key(dict, 'source') && !empty($FZF_DEFAULT_COMMAND)
|
if !has_key(dict, 'source') && !empty($FZF_DEFAULT_COMMAND) && !s:is_win
|
||||||
let temps.source = s:fzf_tempname().(s:is_win ? '.bat' : '')
|
let temps.source = s:fzf_tempname()
|
||||||
call writefile(s:wrap_cmds(split($FZF_DEFAULT_COMMAND, "\n")), temps.source)
|
call writefile(s:wrap_cmds(split($FZF_DEFAULT_COMMAND, "\n")), temps.source)
|
||||||
let dict.source = (empty($SHELL) ? &shell : $SHELL) . (s:is_win ? ' /c ' : ' ') . fzf#shellescape(temps.source)
|
let dict.source = (empty($SHELL) ? &shell : $SHELL).' '.fzf#shellescape(temps.source)
|
||||||
endif
|
endif
|
||||||
|
|
||||||
if has_key(dict, 'source')
|
if has_key(dict, 'source')
|
||||||
@@ -468,11 +462,11 @@ augroup fzf_popd
|
|||||||
augroup END
|
augroup END
|
||||||
|
|
||||||
function! s:dopopd()
|
function! s:dopopd()
|
||||||
if !exists('w:fzf_prev_dir') || exists('*haslocaldir') && !haslocaldir()
|
if !exists('w:fzf_dir') || s:fzf_getcwd() != w:fzf_dir[1]
|
||||||
return
|
return
|
||||||
endif
|
endif
|
||||||
execute 'lcd' s:escape(w:fzf_prev_dir)
|
execute 'lcd' s:escape(w:fzf_dir[0])
|
||||||
unlet w:fzf_prev_dir
|
unlet w:fzf_dir
|
||||||
endfunction
|
endfunction
|
||||||
|
|
||||||
function! s:xterm_launcher()
|
function! s:xterm_launcher()
|
||||||
@@ -526,12 +520,15 @@ function! s:execute(dict, command, use_height, temps) abort
|
|||||||
let command = batchfile
|
let command = batchfile
|
||||||
let a:temps.batchfile = batchfile
|
let a:temps.batchfile = batchfile
|
||||||
if has('nvim')
|
if has('nvim')
|
||||||
let s:dict = a:dict
|
|
||||||
let s:temps = a:temps
|
|
||||||
let fzf = {}
|
let fzf = {}
|
||||||
|
let fzf.dict = a:dict
|
||||||
|
let fzf.temps = a:temps
|
||||||
function! fzf.on_exit(job_id, exit_status, event) dict
|
function! fzf.on_exit(job_id, exit_status, event) dict
|
||||||
let lines = s:collect(s:temps)
|
if s:present(self.dict, 'dir')
|
||||||
call s:callback(s:dict, lines)
|
execute 'lcd' s:escape(self.dict.dir)
|
||||||
|
endif
|
||||||
|
let lines = s:collect(self.temps)
|
||||||
|
call s:callback(self.dict, lines)
|
||||||
endfunction
|
endfunction
|
||||||
let cmd = 'start /wait cmd /c '.command
|
let cmd = 'start /wait cmd /c '.command
|
||||||
call jobstart(cmd, fzf)
|
call jobstart(cmd, fzf)
|
||||||
@@ -687,7 +684,7 @@ function! s:execute_term(dict, command, temps) abort
|
|||||||
lcd -
|
lcd -
|
||||||
endif
|
endif
|
||||||
endtry
|
endtry
|
||||||
setlocal nospell bufhidden=wipe nobuflisted
|
setlocal nospell bufhidden=wipe nobuflisted nonumber
|
||||||
setf fzf
|
setf fzf
|
||||||
startinsert
|
startinsert
|
||||||
return []
|
return []
|
||||||
@@ -718,7 +715,7 @@ function! s:callback(dict, lines) abort
|
|||||||
let popd = has_key(a:dict, 'prev_dir') &&
|
let popd = has_key(a:dict, 'prev_dir') &&
|
||||||
\ (!&autochdir || (empty(a:lines) || len(a:lines) == 1 && empty(a:lines[0])))
|
\ (!&autochdir || (empty(a:lines) || len(a:lines) == 1 && empty(a:lines[0])))
|
||||||
if popd
|
if popd
|
||||||
let w:fzf_prev_dir = a:dict.prev_dir
|
let w:fzf_dir = [a:dict.prev_dir, a:dict.dir]
|
||||||
endif
|
endif
|
||||||
|
|
||||||
try
|
try
|
||||||
@@ -742,7 +739,7 @@ function! s:callback(dict, lines) abort
|
|||||||
|
|
||||||
" We may have opened a new window or tab
|
" We may have opened a new window or tab
|
||||||
if popd
|
if popd
|
||||||
let w:fzf_prev_dir = a:dict.prev_dir
|
let w:fzf_dir = [a:dict.prev_dir, a:dict.dir]
|
||||||
call s:dopopd()
|
call s:dopopd()
|
||||||
endif
|
endif
|
||||||
endfunction
|
endfunction
|
||||||
@@ -755,7 +752,7 @@ let s:default_action = {
|
|||||||
function! s:shortpath()
|
function! s:shortpath()
|
||||||
let short = pathshorten(fnamemodify(getcwd(), ':~:.'))
|
let short = pathshorten(fnamemodify(getcwd(), ':~:.'))
|
||||||
let slash = (s:is_win && !&shellslash) ? '\' : '/'
|
let slash = (s:is_win && !&shellslash) ? '\' : '/'
|
||||||
return empty(short) ? '~'.slash : short . (short =~ slash.'$' ? '' : slash)
|
return empty(short) ? '~'.slash : short . (short =~ escape(slash, '\').'$' ? '' : slash)
|
||||||
endfunction
|
endfunction
|
||||||
|
|
||||||
function! s:cmd(bang, ...) abort
|
function! s:cmd(bang, ...) abort
|
||||||
@@ -765,8 +762,6 @@ function! s:cmd(bang, ...) abort
|
|||||||
let opts.dir = substitute(substitute(remove(args, -1), '\\\(["'']\)', '\1', 'g'), '[/\\]*$', '/', '')
|
let opts.dir = substitute(substitute(remove(args, -1), '\\\(["'']\)', '\1', 'g'), '[/\\]*$', '/', '')
|
||||||
if s:is_win && !&shellslash
|
if s:is_win && !&shellslash
|
||||||
let opts.dir = substitute(opts.dir, '/', '\\', 'g')
|
let opts.dir = substitute(opts.dir, '/', '\\', 'g')
|
||||||
elseif has('win32unix')
|
|
||||||
let opts.dir = fnamemodify(opts.dir, ':p')
|
|
||||||
endif
|
endif
|
||||||
let prompt = opts.dir
|
let prompt = opts.dir
|
||||||
else
|
else
|
||||||
|
|||||||
@@ -4,14 +4,9 @@ function fzf_key_bindings
|
|||||||
|
|
||||||
# Store current token in $dir as root for the 'find' command
|
# Store current token in $dir as root for the 'find' command
|
||||||
function fzf-file-widget -d "List files and folders"
|
function fzf-file-widget -d "List files and folders"
|
||||||
set -l dir (commandline -t)
|
set -l commandline (__fzf_parse_commandline)
|
||||||
# The commandline token might be escaped, we need to unescape it.
|
set -l dir $commandline[1]
|
||||||
set dir (eval "printf '%s' $dir")
|
set -l fzf_query $commandline[2]
|
||||||
if [ ! -d "$dir" ]
|
|
||||||
set dir .
|
|
||||||
end
|
|
||||||
# Some 'find' versions print undesired duplicated slashes if the path ends with slashes.
|
|
||||||
set dir (string replace --regex '(.)/+$' '$1' "$dir")
|
|
||||||
|
|
||||||
# "-path \$dir'*/\\.*'" matches hidden files/folders inside $dir but not
|
# "-path \$dir'*/\\.*'" matches hidden files/folders inside $dir but not
|
||||||
# $dir itself, even if hidden.
|
# $dir itself, even if hidden.
|
||||||
@@ -19,19 +14,17 @@ function fzf_key_bindings
|
|||||||
command find -L \$dir -mindepth 1 \\( -path \$dir'*/\\.*' -o -fstype 'sysfs' -o -fstype 'devfs' -o -fstype 'devtmpfs' \\) -prune \
|
command find -L \$dir -mindepth 1 \\( -path \$dir'*/\\.*' -o -fstype 'sysfs' -o -fstype 'devfs' -o -fstype 'devtmpfs' \\) -prune \
|
||||||
-o -type f -print \
|
-o -type f -print \
|
||||||
-o -type d -print \
|
-o -type d -print \
|
||||||
-o -type l -print 2> /dev/null | cut -b3-"
|
-o -type l -print 2> /dev/null | sed 's@^\./@@'"
|
||||||
|
|
||||||
set -q FZF_TMUX_HEIGHT; or set FZF_TMUX_HEIGHT 40%
|
set -q FZF_TMUX_HEIGHT; or set FZF_TMUX_HEIGHT 40%
|
||||||
begin
|
begin
|
||||||
set -lx FZF_DEFAULT_OPTS "--height $FZF_TMUX_HEIGHT --reverse $FZF_DEFAULT_OPTS $FZF_CTRL_T_OPTS"
|
set -lx FZF_DEFAULT_OPTS "--height $FZF_TMUX_HEIGHT --reverse $FZF_DEFAULT_OPTS $FZF_CTRL_T_OPTS"
|
||||||
eval "$FZF_CTRL_T_COMMAND | "(__fzfcmd)" -m" | while read -l r; set result $result $r; end
|
eval "$FZF_CTRL_T_COMMAND | "(__fzfcmd)' -m --query "'$fzf_query'"' | while read -l r; set result $result $r; end
|
||||||
end
|
end
|
||||||
if [ -z "$result" ]
|
if [ -z "$result" ]
|
||||||
commandline -f repaint
|
commandline -f repaint
|
||||||
return
|
return
|
||||||
end
|
else
|
||||||
|
|
||||||
if [ "$dir" != . ]
|
|
||||||
# Remove last token from commandline.
|
# Remove last token from commandline.
|
||||||
commandline -t ""
|
commandline -t ""
|
||||||
end
|
end
|
||||||
@@ -46,22 +39,45 @@ function fzf_key_bindings
|
|||||||
set -q FZF_TMUX_HEIGHT; or set FZF_TMUX_HEIGHT 40%
|
set -q FZF_TMUX_HEIGHT; or set FZF_TMUX_HEIGHT 40%
|
||||||
begin
|
begin
|
||||||
set -lx FZF_DEFAULT_OPTS "--height $FZF_TMUX_HEIGHT $FZF_DEFAULT_OPTS --tiebreak=index --bind=ctrl-r:toggle-sort $FZF_CTRL_R_OPTS +m"
|
set -lx FZF_DEFAULT_OPTS "--height $FZF_TMUX_HEIGHT $FZF_DEFAULT_OPTS --tiebreak=index --bind=ctrl-r:toggle-sort $FZF_CTRL_R_OPTS +m"
|
||||||
|
|
||||||
|
set -l FISH_MAJOR (echo $FISH_VERSION | cut -f1 -d.)
|
||||||
|
set -l FISH_MINOR (echo $FISH_VERSION | cut -f2 -d.)
|
||||||
|
|
||||||
|
# history's -z flag is needed for multi-line support.
|
||||||
|
# history's -z flag was added in fish 2.4.0, so don't use it for versions
|
||||||
|
# before 2.4.0.
|
||||||
|
if [ "$FISH_MAJOR" -gt 2 -o \( "$FISH_MAJOR" -eq 2 -a "$FISH_MINOR" -ge 4 \) ];
|
||||||
|
history -z | eval (__fzfcmd) --read0 -q '(commandline)' | perl -pe 'chomp if eof' | read -lz result
|
||||||
|
and commandline -- $result
|
||||||
|
else
|
||||||
history | eval (__fzfcmd) -q '(commandline)' | read -l result
|
history | eval (__fzfcmd) -q '(commandline)' | read -l result
|
||||||
and commandline -- $result
|
and commandline -- $result
|
||||||
end
|
end
|
||||||
|
end
|
||||||
commandline -f repaint
|
commandline -f repaint
|
||||||
end
|
end
|
||||||
|
|
||||||
function fzf-cd-widget -d "Change directory"
|
function fzf-cd-widget -d "Change directory"
|
||||||
|
set -l commandline (__fzf_parse_commandline)
|
||||||
|
set -l dir $commandline[1]
|
||||||
|
set -l fzf_query $commandline[2]
|
||||||
|
|
||||||
set -q FZF_ALT_C_COMMAND; or set -l FZF_ALT_C_COMMAND "
|
set -q FZF_ALT_C_COMMAND; or set -l FZF_ALT_C_COMMAND "
|
||||||
command find -L . -mindepth 1 \\( -path '*/\\.*' -o -fstype 'sysfs' -o -fstype 'devfs' -o -fstype 'devtmpfs' \\) -prune \
|
command find -L \$dir -mindepth 1 \\( -path \$dir'*/\\.*' -o -fstype 'sysfs' -o -fstype 'devfs' -o -fstype 'devtmpfs' \\) -prune \
|
||||||
-o -type d -print 2> /dev/null | cut -b3-"
|
-o -type d -print 2> /dev/null | sed 's@^\./@@'"
|
||||||
set -q FZF_TMUX_HEIGHT; or set FZF_TMUX_HEIGHT 40%
|
set -q FZF_TMUX_HEIGHT; or set FZF_TMUX_HEIGHT 40%
|
||||||
begin
|
begin
|
||||||
set -lx FZF_DEFAULT_OPTS "--height $FZF_TMUX_HEIGHT --reverse $FZF_DEFAULT_OPTS $FZF_ALT_C_OPTS"
|
set -lx FZF_DEFAULT_OPTS "--height $FZF_TMUX_HEIGHT --reverse $FZF_DEFAULT_OPTS $FZF_ALT_C_OPTS"
|
||||||
eval "$FZF_ALT_C_COMMAND | "(__fzfcmd)" +m" | read -l result
|
eval "$FZF_ALT_C_COMMAND | "(__fzfcmd)' +m --query "'$fzf_query'"' | read -l result
|
||||||
[ "$result" ]; and cd $result
|
|
||||||
|
if [ -n "$result" ]
|
||||||
|
cd $result
|
||||||
|
|
||||||
|
# Remove last token from commandline.
|
||||||
|
commandline -t ""
|
||||||
end
|
end
|
||||||
|
end
|
||||||
|
|
||||||
commandline -f repaint
|
commandline -f repaint
|
||||||
end
|
end
|
||||||
|
|
||||||
@@ -84,4 +100,47 @@ function fzf_key_bindings
|
|||||||
bind -M insert \cr fzf-history-widget
|
bind -M insert \cr fzf-history-widget
|
||||||
bind -M insert \ec fzf-cd-widget
|
bind -M insert \ec fzf-cd-widget
|
||||||
end
|
end
|
||||||
|
|
||||||
|
function __fzf_parse_commandline -d 'Parse the current command line token and return split of existing filepath and rest of token'
|
||||||
|
# eval is used to do shell expansion on paths
|
||||||
|
set -l commandline (eval "printf '%s' "(commandline -t))
|
||||||
|
|
||||||
|
if [ -z $commandline ]
|
||||||
|
# Default to current directory with no --query
|
||||||
|
set dir '.'
|
||||||
|
set fzf_query ''
|
||||||
|
else
|
||||||
|
set dir (__fzf_get_dir $commandline)
|
||||||
|
|
||||||
|
if [ "$dir" = "." -a (string sub -l 1 $commandline) != '.' ]
|
||||||
|
# if $dir is "." but commandline is not a relative path, this means no file path found
|
||||||
|
set fzf_query $commandline
|
||||||
|
else
|
||||||
|
# Also remove trailing slash after dir, to "split" input properly
|
||||||
|
set fzf_query (string replace -r "^$dir/?" '' "$commandline")
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
echo $dir
|
||||||
|
echo $fzf_query
|
||||||
|
end
|
||||||
|
|
||||||
|
function __fzf_get_dir -d 'Find the longest existing filepath from input string'
|
||||||
|
set dir $argv
|
||||||
|
|
||||||
|
# Strip all trailing slashes. Ignore if $dir is root dir (/)
|
||||||
|
if [ (string length $dir) -gt 1 ]
|
||||||
|
set dir (string replace -r '/*$' '' $dir)
|
||||||
|
end
|
||||||
|
|
||||||
|
# Iteratively check if dir exists and strip tail end of path
|
||||||
|
while [ ! -d "$dir" ]
|
||||||
|
# If path is absolute, this can keep going until ends up at /
|
||||||
|
# If path is relative, this can keep going until entire input is consumed, dirname returns "."
|
||||||
|
set dir (dirname "$dir")
|
||||||
|
end
|
||||||
|
|
||||||
|
echo $dir
|
||||||
|
end
|
||||||
|
|
||||||
end
|
end
|
||||||
|
|||||||
406
src/algo/algo.go
406
src/algo/algo.go
@@ -78,9 +78,11 @@ Scoring criteria
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
"unicode"
|
"unicode"
|
||||||
|
"unicode/utf8"
|
||||||
|
|
||||||
"github.com/junegunn/fzf/src/util"
|
"github.com/junegunn/fzf/src/util"
|
||||||
)
|
)
|
||||||
@@ -156,27 +158,17 @@ func posArray(withPos bool, len int) *[]int {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func alloc16(offset int, slab *util.Slab, size int, clear bool) (int, []int16) {
|
func alloc16(offset int, slab *util.Slab, size int) (int, []int16) {
|
||||||
if slab != nil && cap(slab.I16) > offset+size {
|
if slab != nil && cap(slab.I16) > offset+size {
|
||||||
slice := slab.I16[offset : offset+size]
|
slice := slab.I16[offset : offset+size]
|
||||||
if clear {
|
|
||||||
for idx := range slice {
|
|
||||||
slice[idx] = 0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return offset + size, slice
|
return offset + size, slice
|
||||||
}
|
}
|
||||||
return offset, make([]int16, size)
|
return offset, make([]int16, size)
|
||||||
}
|
}
|
||||||
|
|
||||||
func alloc32(offset int, slab *util.Slab, size int, clear bool) (int, []int32) {
|
func alloc32(offset int, slab *util.Slab, size int) (int, []int32) {
|
||||||
if slab != nil && cap(slab.I32) > offset+size {
|
if slab != nil && cap(slab.I32) > offset+size {
|
||||||
slice := slab.I32[offset : offset+size]
|
slice := slab.I32[offset : offset+size]
|
||||||
if clear {
|
|
||||||
for idx := range slice {
|
|
||||||
slice[idx] = 0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return offset + size, slice
|
return offset + size, slice
|
||||||
}
|
}
|
||||||
return offset, make([]int32, size)
|
return offset, make([]int32, size)
|
||||||
@@ -227,7 +219,7 @@ func bonusFor(prevClass charClass, class charClass) int16 {
|
|||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
func bonusAt(input util.Chars, idx int) int16 {
|
func bonusAt(input *util.Chars, idx int) int16 {
|
||||||
if idx == 0 {
|
if idx == 0 {
|
||||||
return bonusBoundary
|
return bonusBoundary
|
||||||
}
|
}
|
||||||
@@ -249,155 +241,84 @@ func normalizeRune(r rune) rune {
|
|||||||
// Algo functions make two assumptions
|
// Algo functions make two assumptions
|
||||||
// 1. "pattern" is given in lowercase if "caseSensitive" is false
|
// 1. "pattern" is given in lowercase if "caseSensitive" is false
|
||||||
// 2. "pattern" is already normalized if "normalize" is true
|
// 2. "pattern" is already normalized if "normalize" is true
|
||||||
type Algo func(caseSensitive bool, normalize bool, forward bool, input util.Chars, pattern []rune, withPos bool, slab *util.Slab) (Result, *[]int)
|
type Algo func(caseSensitive bool, normalize bool, forward bool, input *util.Chars, pattern []rune, withPos bool, slab *util.Slab) (Result, *[]int)
|
||||||
|
|
||||||
func FuzzyMatchV2(caseSensitive bool, normalize bool, forward bool, input util.Chars, pattern []rune, withPos bool, slab *util.Slab) (Result, *[]int) {
|
func trySkip(input *util.Chars, caseSensitive bool, b byte, from int) int {
|
||||||
// Assume that pattern is given in lowercase if case-insensitive.
|
byteArray := input.Bytes()[from:]
|
||||||
// First check if there's a match and calculate bonus for each position.
|
idx := bytes.IndexByte(byteArray, b)
|
||||||
// If the input string is too long, consider finding the matching chars in
|
if idx == 0 {
|
||||||
// this phase as well (non-optimal alignment).
|
// Can't skip any further
|
||||||
N := input.Length()
|
return from
|
||||||
M := len(pattern)
|
}
|
||||||
switch M {
|
// We may need to search for the uppercase letter again. We don't have to
|
||||||
case 0:
|
// consider normalization as we can be sure that this is an ASCII string.
|
||||||
return Result{0, 0, 0}, posArray(withPos, M)
|
if !caseSensitive && b >= 'a' && b <= 'z' {
|
||||||
case 1:
|
if idx > 0 {
|
||||||
return ExactMatchNaive(caseSensitive, normalize, forward, input, pattern[0:1], withPos, slab)
|
byteArray = byteArray[:idx]
|
||||||
|
}
|
||||||
|
uidx := bytes.IndexByte(byteArray, b-32)
|
||||||
|
if uidx >= 0 {
|
||||||
|
idx = uidx
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if idx < 0 {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
return from + idx
|
||||||
}
|
}
|
||||||
|
|
||||||
// Since O(nm) algorithm can be prohibitively expensive for large input,
|
func isAscii(runes []rune) bool {
|
||||||
// we fall back to the greedy algorithm.
|
for _, r := range runes {
|
||||||
if slab != nil && N*M > cap(slab.I16) {
|
if r >= utf8.RuneSelf {
|
||||||
return FuzzyMatchV1(caseSensitive, normalize, forward, input, pattern, withPos, slab)
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reuse pre-allocated integer slice to avoid unnecessary sweeping of garbages
|
func asciiFuzzyIndex(input *util.Chars, pattern []rune, caseSensitive bool) int {
|
||||||
offset16 := 0
|
// Can't determine
|
||||||
offset32 := 0
|
if !input.IsBytes() {
|
||||||
// Bonus point for each position
|
return 0
|
||||||
offset16, B := alloc16(offset16, slab, N, false)
|
|
||||||
// The first occurrence of each character in the pattern
|
|
||||||
offset32, F := alloc32(offset32, slab, M, false)
|
|
||||||
// Rune array
|
|
||||||
offset32, T := alloc32(offset32, slab, N, false)
|
|
||||||
|
|
||||||
// Phase 1. Check if there's a match and calculate bonus for each point
|
|
||||||
pidx, lastIdx, prevClass := 0, 0, charNonWord
|
|
||||||
for idx := 0; idx < N; idx++ {
|
|
||||||
char := input.Get(idx)
|
|
||||||
var class charClass
|
|
||||||
if char <= unicode.MaxASCII {
|
|
||||||
class = charClassOfAscii(char)
|
|
||||||
} else {
|
|
||||||
class = charClassOfNonAscii(char)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if !caseSensitive && class == charUpper {
|
// Not possible
|
||||||
if char <= unicode.MaxASCII {
|
if !isAscii(pattern) {
|
||||||
char += 32
|
return -1
|
||||||
} else {
|
|
||||||
char = unicode.To(unicode.LowerCase, char)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if normalize {
|
firstIdx, idx := 0, 0
|
||||||
char = normalizeRune(char)
|
for pidx := 0; pidx < len(pattern); pidx++ {
|
||||||
|
idx = trySkip(input, caseSensitive, byte(pattern[pidx]), idx)
|
||||||
|
if idx < 0 {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
if pidx == 0 && idx > 0 {
|
||||||
|
// Step back to find the right bonus point
|
||||||
|
firstIdx = idx - 1
|
||||||
|
}
|
||||||
|
idx++
|
||||||
|
}
|
||||||
|
return firstIdx
|
||||||
}
|
}
|
||||||
|
|
||||||
T[idx] = char
|
func debugV2(T []rune, pattern []rune, F []int32, lastIdx int, H []int16, C []int16) {
|
||||||
B[idx] = bonusFor(prevClass, class)
|
|
||||||
prevClass = class
|
|
||||||
|
|
||||||
if pidx < M {
|
|
||||||
if char == pattern[pidx] {
|
|
||||||
lastIdx = idx
|
|
||||||
F[pidx] = int32(idx)
|
|
||||||
pidx++
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if char == pattern[M-1] {
|
|
||||||
lastIdx = idx
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if pidx != M {
|
|
||||||
return Result{-1, -1, 0}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Phase 2. Fill in score matrix (H)
|
|
||||||
// Unlike the original algorithm, we do not allow omission.
|
|
||||||
width := lastIdx - int(F[0]) + 1
|
width := lastIdx - int(F[0]) + 1
|
||||||
offset16, H := alloc16(offset16, slab, width*M, false)
|
|
||||||
|
|
||||||
// Possible length of consecutive chunk at each position.
|
for i, f := range F {
|
||||||
offset16, C := alloc16(offset16, slab, width*M, false)
|
|
||||||
|
|
||||||
maxScore, maxScorePos := int16(0), 0
|
|
||||||
for i := 0; i < M; i++ {
|
|
||||||
I := i * width
|
I := i * width
|
||||||
inGap := false
|
|
||||||
for j := int(F[i]); j <= lastIdx; j++ {
|
|
||||||
j0 := j - int(F[0])
|
|
||||||
var s1, s2, consecutive int16
|
|
||||||
|
|
||||||
if j > int(F[i]) {
|
|
||||||
if inGap {
|
|
||||||
s2 = H[I+j0-1] + scoreGapExtention
|
|
||||||
} else {
|
|
||||||
s2 = H[I+j0-1] + scoreGapStart
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if pattern[i] == T[j] {
|
|
||||||
var diag int16
|
|
||||||
if i > 0 && j0 > 0 {
|
|
||||||
diag = H[I-width+j0-1]
|
|
||||||
}
|
|
||||||
s1 = diag + scoreMatch
|
|
||||||
b := B[j]
|
|
||||||
if i > 0 {
|
|
||||||
// j > 0 if i > 0
|
|
||||||
consecutive = C[I-width+j0-1] + 1
|
|
||||||
// Break consecutive chunk
|
|
||||||
if b == bonusBoundary {
|
|
||||||
consecutive = 1
|
|
||||||
} else if consecutive > 1 {
|
|
||||||
b = util.Max16(b, util.Max16(bonusConsecutive, B[j-int(consecutive)+1]))
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
consecutive = 1
|
|
||||||
b *= bonusFirstCharMultiplier
|
|
||||||
}
|
|
||||||
if s1+b < s2 {
|
|
||||||
s1 += B[j]
|
|
||||||
consecutive = 0
|
|
||||||
} else {
|
|
||||||
s1 += b
|
|
||||||
}
|
|
||||||
}
|
|
||||||
C[I+j0] = consecutive
|
|
||||||
|
|
||||||
inGap = s1 < s2
|
|
||||||
score := util.Max16(util.Max16(s1, s2), 0)
|
|
||||||
if i == M-1 && (forward && score > maxScore || !forward && score >= maxScore) {
|
|
||||||
maxScore, maxScorePos = score, j
|
|
||||||
}
|
|
||||||
H[I+j0] = score
|
|
||||||
}
|
|
||||||
|
|
||||||
if DEBUG {
|
|
||||||
if i == 0 {
|
if i == 0 {
|
||||||
fmt.Print(" ")
|
fmt.Print(" ")
|
||||||
for j := int(F[i]); j <= lastIdx; j++ {
|
for j := int(f); j <= lastIdx; j++ {
|
||||||
fmt.Printf(" " + string(input.Get(j)) + " ")
|
fmt.Printf(" " + string(T[j]) + " ")
|
||||||
}
|
}
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
}
|
}
|
||||||
fmt.Print(string(pattern[i]) + " ")
|
fmt.Print(string(pattern[i]) + " ")
|
||||||
for idx := int(F[0]); idx < int(F[i]); idx++ {
|
for idx := int(F[0]); idx < int(f); idx++ {
|
||||||
fmt.Print(" 0 ")
|
fmt.Print(" 0 ")
|
||||||
}
|
}
|
||||||
for idx := int(F[i]); idx <= lastIdx; idx++ {
|
for idx := int(f); idx <= lastIdx; idx++ {
|
||||||
fmt.Printf("%2d ", H[i*width+idx-int(F[0])])
|
fmt.Printf("%2d ", H[i*width+idx-int(F[0])])
|
||||||
}
|
}
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
@@ -407,22 +328,202 @@ func FuzzyMatchV2(caseSensitive bool, normalize bool, forward bool, input util.C
|
|||||||
if idx+int(F[0]) < int(F[i]) {
|
if idx+int(F[0]) < int(F[i]) {
|
||||||
p = 0
|
p = 0
|
||||||
}
|
}
|
||||||
|
if p > 0 {
|
||||||
fmt.Printf("%2d ", p)
|
fmt.Printf("%2d ", p)
|
||||||
|
} else {
|
||||||
|
fmt.Print(" ")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Phase 3. (Optional) Backtrace to find character positions
|
func FuzzyMatchV2(caseSensitive bool, normalize bool, forward bool, input *util.Chars, pattern []rune, withPos bool, slab *util.Slab) (Result, *[]int) {
|
||||||
|
// Assume that pattern is given in lowercase if case-insensitive.
|
||||||
|
// First check if there's a match and calculate bonus for each position.
|
||||||
|
// If the input string is too long, consider finding the matching chars in
|
||||||
|
// this phase as well (non-optimal alignment).
|
||||||
|
M := len(pattern)
|
||||||
|
if M == 0 {
|
||||||
|
return Result{0, 0, 0}, posArray(withPos, M)
|
||||||
|
}
|
||||||
|
N := input.Length()
|
||||||
|
|
||||||
|
// Since O(nm) algorithm can be prohibitively expensive for large input,
|
||||||
|
// we fall back to the greedy algorithm.
|
||||||
|
if slab != nil && N*M > cap(slab.I16) {
|
||||||
|
return FuzzyMatchV1(caseSensitive, normalize, forward, input, pattern, withPos, slab)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Phase 1. Optimized search for ASCII string
|
||||||
|
idx := asciiFuzzyIndex(input, pattern, caseSensitive)
|
||||||
|
if idx < 0 {
|
||||||
|
return Result{-1, -1, 0}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reuse pre-allocated integer slice to avoid unnecessary sweeping of garbages
|
||||||
|
offset16 := 0
|
||||||
|
offset32 := 0
|
||||||
|
offset16, H0 := alloc16(offset16, slab, N)
|
||||||
|
offset16, C0 := alloc16(offset16, slab, N)
|
||||||
|
// Bonus point for each position
|
||||||
|
offset16, B := alloc16(offset16, slab, N)
|
||||||
|
// The first occurrence of each character in the pattern
|
||||||
|
offset32, F := alloc32(offset32, slab, M)
|
||||||
|
// Rune array
|
||||||
|
offset32, T := alloc32(offset32, slab, N)
|
||||||
|
input.CopyRunes(T)
|
||||||
|
|
||||||
|
// Phase 2. Calculate bonus for each point
|
||||||
|
maxScore, maxScorePos := int16(0), 0
|
||||||
|
pidx, lastIdx := 0, 0
|
||||||
|
pchar0, pchar, prevH0, prevClass, inGap := pattern[0], pattern[0], int16(0), charNonWord, false
|
||||||
|
Tsub := T[idx:]
|
||||||
|
H0sub, C0sub, Bsub := H0[idx:][:len(Tsub)], C0[idx:][:len(Tsub)], B[idx:][:len(Tsub)]
|
||||||
|
for off, char := range Tsub {
|
||||||
|
var class charClass
|
||||||
|
if char <= unicode.MaxASCII {
|
||||||
|
class = charClassOfAscii(char)
|
||||||
|
if !caseSensitive && class == charUpper {
|
||||||
|
char += 32
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
class = charClassOfNonAscii(char)
|
||||||
|
if !caseSensitive && class == charUpper {
|
||||||
|
char = unicode.To(unicode.LowerCase, char)
|
||||||
|
}
|
||||||
|
if normalize {
|
||||||
|
char = normalizeRune(char)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Tsub[off] = char
|
||||||
|
bonus := bonusFor(prevClass, class)
|
||||||
|
Bsub[off] = bonus
|
||||||
|
prevClass = class
|
||||||
|
|
||||||
|
if char == pchar {
|
||||||
|
if pidx < M {
|
||||||
|
F[pidx] = int32(idx + off)
|
||||||
|
pidx++
|
||||||
|
pchar = pattern[util.Min(pidx, M-1)]
|
||||||
|
}
|
||||||
|
lastIdx = idx + off
|
||||||
|
}
|
||||||
|
|
||||||
|
if char == pchar0 {
|
||||||
|
score := scoreMatch + bonus*bonusFirstCharMultiplier
|
||||||
|
H0sub[off] = score
|
||||||
|
C0sub[off] = 1
|
||||||
|
if M == 1 && (forward && score > maxScore || !forward && score >= maxScore) {
|
||||||
|
maxScore, maxScorePos = score, idx+off
|
||||||
|
if forward && bonus == bonusBoundary {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
inGap = false
|
||||||
|
} else {
|
||||||
|
if inGap {
|
||||||
|
H0sub[off] = util.Max16(prevH0+scoreGapExtention, 0)
|
||||||
|
} else {
|
||||||
|
H0sub[off] = util.Max16(prevH0+scoreGapStart, 0)
|
||||||
|
}
|
||||||
|
C0sub[off] = 0
|
||||||
|
inGap = true
|
||||||
|
}
|
||||||
|
prevH0 = H0sub[off]
|
||||||
|
}
|
||||||
|
if pidx != M {
|
||||||
|
return Result{-1, -1, 0}, nil
|
||||||
|
}
|
||||||
|
if M == 1 {
|
||||||
|
result := Result{maxScorePos, maxScorePos + 1, int(maxScore)}
|
||||||
|
if !withPos {
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
pos := []int{maxScorePos}
|
||||||
|
return result, &pos
|
||||||
|
}
|
||||||
|
|
||||||
|
// Phase 3. Fill in score matrix (H)
|
||||||
|
// Unlike the original algorithm, we do not allow omission.
|
||||||
|
f0 := int(F[0])
|
||||||
|
width := lastIdx - f0 + 1
|
||||||
|
offset16, H := alloc16(offset16, slab, width*M)
|
||||||
|
copy(H, H0[f0:lastIdx+1])
|
||||||
|
|
||||||
|
// Possible length of consecutive chunk at each position.
|
||||||
|
offset16, C := alloc16(offset16, slab, width*M)
|
||||||
|
copy(C, C0[f0:lastIdx+1])
|
||||||
|
|
||||||
|
Fsub := F[1:]
|
||||||
|
Psub := pattern[1:][:len(Fsub)]
|
||||||
|
for off, f := range Fsub {
|
||||||
|
f := int(f)
|
||||||
|
pchar := Psub[off]
|
||||||
|
pidx := off + 1
|
||||||
|
row := pidx * width
|
||||||
|
inGap := false
|
||||||
|
Tsub := T[f : lastIdx+1]
|
||||||
|
Bsub := B[f:][:len(Tsub)]
|
||||||
|
Csub := C[row+f-f0:][:len(Tsub)]
|
||||||
|
Cdiag := C[row+f-f0-1-width:][:len(Tsub)]
|
||||||
|
Hsub := H[row+f-f0:][:len(Tsub)]
|
||||||
|
Hdiag := H[row+f-f0-1-width:][:len(Tsub)]
|
||||||
|
Hleft := H[row+f-f0-1:][:len(Tsub)]
|
||||||
|
Hleft[0] = 0
|
||||||
|
for off, char := range Tsub {
|
||||||
|
col := off + f
|
||||||
|
var s1, s2, consecutive int16
|
||||||
|
|
||||||
|
if inGap {
|
||||||
|
s2 = Hleft[off] + scoreGapExtention
|
||||||
|
} else {
|
||||||
|
s2 = Hleft[off] + scoreGapStart
|
||||||
|
}
|
||||||
|
|
||||||
|
if pchar == char {
|
||||||
|
s1 = Hdiag[off] + scoreMatch
|
||||||
|
b := Bsub[off]
|
||||||
|
consecutive = Cdiag[off] + 1
|
||||||
|
// Break consecutive chunk
|
||||||
|
if b == bonusBoundary {
|
||||||
|
consecutive = 1
|
||||||
|
} else if consecutive > 1 {
|
||||||
|
b = util.Max16(b, util.Max16(bonusConsecutive, B[col-int(consecutive)+1]))
|
||||||
|
}
|
||||||
|
if s1+b < s2 {
|
||||||
|
s1 += Bsub[off]
|
||||||
|
consecutive = 0
|
||||||
|
} else {
|
||||||
|
s1 += b
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Csub[off] = consecutive
|
||||||
|
|
||||||
|
inGap = s1 < s2
|
||||||
|
score := util.Max16(util.Max16(s1, s2), 0)
|
||||||
|
if pidx == M-1 && (forward && score > maxScore || !forward && score >= maxScore) {
|
||||||
|
maxScore, maxScorePos = score, col
|
||||||
|
}
|
||||||
|
Hsub[off] = score
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if DEBUG {
|
||||||
|
debugV2(T, pattern, F, lastIdx, H, C)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Phase 4. (Optional) Backtrace to find character positions
|
||||||
pos := posArray(withPos, M)
|
pos := posArray(withPos, M)
|
||||||
j := int(F[0])
|
j := f0
|
||||||
if withPos {
|
if withPos {
|
||||||
i := M - 1
|
i := M - 1
|
||||||
j = maxScorePos
|
j = maxScorePos
|
||||||
preferMatch := true
|
preferMatch := true
|
||||||
for {
|
for {
|
||||||
I := i * width
|
I := i * width
|
||||||
j0 := j - int(F[0])
|
j0 := j - f0
|
||||||
s := H[I+j0]
|
s := H[I+j0]
|
||||||
|
|
||||||
var s1, s2 int16
|
var s1, s2 int16
|
||||||
@@ -451,7 +552,7 @@ func FuzzyMatchV2(caseSensitive bool, normalize bool, forward bool, input util.C
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Implement the same sorting criteria as V2
|
// Implement the same sorting criteria as V2
|
||||||
func calculateScore(caseSensitive bool, normalize bool, text util.Chars, pattern []rune, sidx int, eidx int, withPos bool) (int, *[]int) {
|
func calculateScore(caseSensitive bool, normalize bool, text *util.Chars, pattern []rune, sidx int, eidx int, withPos bool) (int, *[]int) {
|
||||||
pidx, score, inGap, consecutive, firstBonus := 0, 0, false, 0, int16(0)
|
pidx, score, inGap, consecutive, firstBonus := 0, 0, false, 0, int16(0)
|
||||||
pos := posArray(withPos, len(pattern))
|
pos := posArray(withPos, len(pattern))
|
||||||
prevClass := charNonWord
|
prevClass := charNonWord
|
||||||
@@ -511,10 +612,13 @@ func calculateScore(caseSensitive bool, normalize bool, text util.Chars, pattern
|
|||||||
}
|
}
|
||||||
|
|
||||||
// FuzzyMatchV1 performs fuzzy-match
|
// FuzzyMatchV1 performs fuzzy-match
|
||||||
func FuzzyMatchV1(caseSensitive bool, normalize bool, forward bool, text util.Chars, pattern []rune, withPos bool, slab *util.Slab) (Result, *[]int) {
|
func FuzzyMatchV1(caseSensitive bool, normalize bool, forward bool, text *util.Chars, pattern []rune, withPos bool, slab *util.Slab) (Result, *[]int) {
|
||||||
if len(pattern) == 0 {
|
if len(pattern) == 0 {
|
||||||
return Result{0, 0, 0}, nil
|
return Result{0, 0, 0}, nil
|
||||||
}
|
}
|
||||||
|
if asciiFuzzyIndex(text, pattern, caseSensitive) < 0 {
|
||||||
|
return Result{-1, -1, 0}, nil
|
||||||
|
}
|
||||||
|
|
||||||
pidx := 0
|
pidx := 0
|
||||||
sidx := -1
|
sidx := -1
|
||||||
@@ -594,7 +698,7 @@ func FuzzyMatchV1(caseSensitive bool, normalize bool, forward bool, text util.Ch
|
|||||||
// bonus point, instead of stopping immediately after finding the first match.
|
// bonus point, instead of stopping immediately after finding the first match.
|
||||||
// The solution is much cheaper since there is only one possible alignment of
|
// The solution is much cheaper since there is only one possible alignment of
|
||||||
// the pattern.
|
// the pattern.
|
||||||
func ExactMatchNaive(caseSensitive bool, normalize bool, forward bool, text util.Chars, pattern []rune, withPos bool, slab *util.Slab) (Result, *[]int) {
|
func ExactMatchNaive(caseSensitive bool, normalize bool, forward bool, text *util.Chars, pattern []rune, withPos bool, slab *util.Slab) (Result, *[]int) {
|
||||||
if len(pattern) == 0 {
|
if len(pattern) == 0 {
|
||||||
return Result{0, 0, 0}, nil
|
return Result{0, 0, 0}, nil
|
||||||
}
|
}
|
||||||
@@ -606,6 +710,10 @@ func ExactMatchNaive(caseSensitive bool, normalize bool, forward bool, text util
|
|||||||
return Result{-1, -1, 0}, nil
|
return Result{-1, -1, 0}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if asciiFuzzyIndex(text, pattern, caseSensitive) < 0 {
|
||||||
|
return Result{-1, -1, 0}, nil
|
||||||
|
}
|
||||||
|
|
||||||
// For simplicity, only look at the bonus at the first character position
|
// For simplicity, only look at the bonus at the first character position
|
||||||
pidx := 0
|
pidx := 0
|
||||||
bestPos, bonus, bestBonus := -1, int16(0), int16(-1)
|
bestPos, bonus, bestBonus := -1, int16(0), int16(-1)
|
||||||
@@ -660,7 +768,7 @@ func ExactMatchNaive(caseSensitive bool, normalize bool, forward bool, text util
|
|||||||
}
|
}
|
||||||
|
|
||||||
// PrefixMatch performs prefix-match
|
// PrefixMatch performs prefix-match
|
||||||
func PrefixMatch(caseSensitive bool, normalize bool, forward bool, text util.Chars, pattern []rune, withPos bool, slab *util.Slab) (Result, *[]int) {
|
func PrefixMatch(caseSensitive bool, normalize bool, forward bool, text *util.Chars, pattern []rune, withPos bool, slab *util.Slab) (Result, *[]int) {
|
||||||
if len(pattern) == 0 {
|
if len(pattern) == 0 {
|
||||||
return Result{0, 0, 0}, nil
|
return Result{0, 0, 0}, nil
|
||||||
}
|
}
|
||||||
@@ -687,7 +795,7 @@ func PrefixMatch(caseSensitive bool, normalize bool, forward bool, text util.Cha
|
|||||||
}
|
}
|
||||||
|
|
||||||
// SuffixMatch performs suffix-match
|
// SuffixMatch performs suffix-match
|
||||||
func SuffixMatch(caseSensitive bool, normalize bool, forward bool, text util.Chars, pattern []rune, withPos bool, slab *util.Slab) (Result, *[]int) {
|
func SuffixMatch(caseSensitive bool, normalize bool, forward bool, text *util.Chars, pattern []rune, withPos bool, slab *util.Slab) (Result, *[]int) {
|
||||||
lenRunes := text.Length()
|
lenRunes := text.Length()
|
||||||
trimmedLen := lenRunes - text.TrailingWhitespaces()
|
trimmedLen := lenRunes - text.TrailingWhitespaces()
|
||||||
if len(pattern) == 0 {
|
if len(pattern) == 0 {
|
||||||
@@ -718,7 +826,7 @@ func SuffixMatch(caseSensitive bool, normalize bool, forward bool, text util.Cha
|
|||||||
}
|
}
|
||||||
|
|
||||||
// EqualMatch performs equal-match
|
// EqualMatch performs equal-match
|
||||||
func EqualMatch(caseSensitive bool, normalize bool, forward bool, text util.Chars, pattern []rune, withPos bool, slab *util.Slab) (Result, *[]int) {
|
func EqualMatch(caseSensitive bool, normalize bool, forward bool, text *util.Chars, pattern []rune, withPos bool, slab *util.Slab) (Result, *[]int) {
|
||||||
lenPattern := len(pattern)
|
lenPattern := len(pattern)
|
||||||
if text.Length() != lenPattern {
|
if text.Length() != lenPattern {
|
||||||
return Result{-1, -1, 0}, nil
|
return Result{-1, -1, 0}, nil
|
||||||
|
|||||||
@@ -17,7 +17,8 @@ func assertMatch2(t *testing.T, fun Algo, caseSensitive, normalize, forward bool
|
|||||||
if !caseSensitive {
|
if !caseSensitive {
|
||||||
pattern = strings.ToLower(pattern)
|
pattern = strings.ToLower(pattern)
|
||||||
}
|
}
|
||||||
res, pos := fun(caseSensitive, normalize, forward, util.RunesToChars([]rune(input)), []rune(pattern), true, nil)
|
chars := util.ToChars([]byte(input))
|
||||||
|
res, pos := fun(caseSensitive, normalize, forward, &chars, []rune(pattern), true, nil)
|
||||||
var start, end int
|
var start, end int
|
||||||
if pos == nil || len(*pos) == 0 {
|
if pos == nil || len(*pos) == 0 {
|
||||||
start = res.Start
|
start = res.Start
|
||||||
|
|||||||
76
src/ansi.go
76
src/ansi.go
@@ -44,7 +44,21 @@ func init() {
|
|||||||
*/
|
*/
|
||||||
// The following regular expression will include not all but most of the
|
// The following regular expression will include not all but most of the
|
||||||
// frequently used ANSI sequences
|
// frequently used ANSI sequences
|
||||||
ansiRegex = regexp.MustCompile("\x1b[\\[()][0-9;]*[a-zA-Z@]|\x1b.|[\x0e\x0f]|.\x08")
|
ansiRegex = regexp.MustCompile("(?:\x1b[\\[()][0-9;]*[a-zA-Z@]|\x1b.|[\x0e\x0f]|.\x08)")
|
||||||
|
}
|
||||||
|
|
||||||
|
func findAnsiStart(str string) int {
|
||||||
|
idx := 0
|
||||||
|
for ; idx < len(str); idx++ {
|
||||||
|
b := str[idx]
|
||||||
|
if b == 0x1b || b == 0x0e || b == 0x0f {
|
||||||
|
return idx
|
||||||
|
}
|
||||||
|
if b == 0x08 && idx > 0 {
|
||||||
|
return idx - 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return idx
|
||||||
}
|
}
|
||||||
|
|
||||||
func extractColor(str string, state *ansiState, proc func(string, *ansiState) bool) (string, *[]ansiOffset, *ansiState) {
|
func extractColor(str string, state *ansiState, proc func(string, *ansiState) bool) (string, *[]ansiOffset, *ansiState) {
|
||||||
@@ -55,50 +69,76 @@ func extractColor(str string, state *ansiState, proc func(string, *ansiState) bo
|
|||||||
offsets = append(offsets, ansiOffset{[2]int32{0, 0}, *state})
|
offsets = append(offsets, ansiOffset{[2]int32{0, 0}, *state})
|
||||||
}
|
}
|
||||||
|
|
||||||
idx := 0
|
prevIdx := 0
|
||||||
for _, offset := range ansiRegex.FindAllStringIndex(str, -1) {
|
runeCount := 0
|
||||||
prev := str[idx:offset[0]]
|
for idx := 0; idx < len(str); {
|
||||||
output.WriteString(prev)
|
idx += findAnsiStart(str[idx:])
|
||||||
|
if idx == len(str) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make sure that we found an ANSI code
|
||||||
|
offset := ansiRegex.FindStringIndex(str[idx:])
|
||||||
|
if len(offset) < 2 {
|
||||||
|
idx++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
offset[0] += idx
|
||||||
|
offset[1] += idx
|
||||||
|
idx = offset[1]
|
||||||
|
|
||||||
|
// Check if we should continue
|
||||||
|
prev := str[prevIdx:offset[0]]
|
||||||
if proc != nil && !proc(prev, state) {
|
if proc != nil && !proc(prev, state) {
|
||||||
return "", nil, nil
|
return "", nil, nil
|
||||||
}
|
}
|
||||||
newState := interpretCode(str[offset[0]:offset[1]], state)
|
|
||||||
|
|
||||||
|
prevIdx = offset[1]
|
||||||
|
runeCount += utf8.RuneCountInString(prev)
|
||||||
|
output.WriteString(prev)
|
||||||
|
|
||||||
|
newState := interpretCode(str[offset[0]:offset[1]], state)
|
||||||
if !newState.equals(state) {
|
if !newState.equals(state) {
|
||||||
if state != nil {
|
if state != nil {
|
||||||
// Update last offset
|
// Update last offset
|
||||||
(&offsets[len(offsets)-1]).offset[1] = int32(utf8.RuneCount(output.Bytes()))
|
(&offsets[len(offsets)-1]).offset[1] = int32(runeCount)
|
||||||
}
|
}
|
||||||
|
|
||||||
if newState.colored() {
|
if newState.colored() {
|
||||||
// Append new offset
|
// Append new offset
|
||||||
state = newState
|
state = newState
|
||||||
newLen := int32(utf8.RuneCount(output.Bytes()))
|
offsets = append(offsets, ansiOffset{[2]int32{int32(runeCount), int32(runeCount)}, *state})
|
||||||
offsets = append(offsets, ansiOffset{[2]int32{newLen, newLen}, *state})
|
|
||||||
} else {
|
} else {
|
||||||
// Discard state
|
// Discard state
|
||||||
state = nil
|
state = nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
idx = offset[1]
|
|
||||||
}
|
}
|
||||||
|
|
||||||
rest := str[idx:]
|
var rest string
|
||||||
if len(rest) > 0 {
|
var trimmed string
|
||||||
|
|
||||||
|
if prevIdx == 0 {
|
||||||
|
// No ANSI code found
|
||||||
|
rest = str
|
||||||
|
trimmed = str
|
||||||
|
} else {
|
||||||
|
rest = str[prevIdx:]
|
||||||
output.WriteString(rest)
|
output.WriteString(rest)
|
||||||
if state != nil {
|
trimmed = output.String()
|
||||||
// Update last offset
|
|
||||||
(&offsets[len(offsets)-1]).offset[1] = int32(utf8.RuneCount(output.Bytes()))
|
|
||||||
}
|
}
|
||||||
|
if len(rest) > 0 && state != nil {
|
||||||
|
// Update last offset
|
||||||
|
runeCount += utf8.RuneCountInString(rest)
|
||||||
|
(&offsets[len(offsets)-1]).offset[1] = int32(runeCount)
|
||||||
}
|
}
|
||||||
if proc != nil {
|
if proc != nil {
|
||||||
proc(rest, state)
|
proc(rest, state)
|
||||||
}
|
}
|
||||||
if len(offsets) == 0 {
|
if len(offsets) == 0 {
|
||||||
return output.String(), nil, state
|
return trimmed, nil, state
|
||||||
}
|
}
|
||||||
return output.String(), &offsets, state
|
return trimmed, &offsets, state
|
||||||
}
|
}
|
||||||
|
|
||||||
func interpretCode(ansiCode string, prevState *ansiState) *ansiState {
|
func interpretCode(ansiCode string, prevState *ansiState) *ansiState {
|
||||||
|
|||||||
42
src/cache.go
42
src/cache.go
@@ -3,7 +3,7 @@ package fzf
|
|||||||
import "sync"
|
import "sync"
|
||||||
|
|
||||||
// queryCache associates strings to lists of items
|
// queryCache associates strings to lists of items
|
||||||
type queryCache map[string][]*Result
|
type queryCache map[string][]Result
|
||||||
|
|
||||||
// ChunkCache associates Chunk and query string to lists of items
|
// ChunkCache associates Chunk and query string to lists of items
|
||||||
type ChunkCache struct {
|
type ChunkCache struct {
|
||||||
@@ -17,7 +17,7 @@ func NewChunkCache() ChunkCache {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Add adds the list to the cache
|
// Add adds the list to the cache
|
||||||
func (cc *ChunkCache) Add(chunk *Chunk, key string, list []*Result) {
|
func (cc *ChunkCache) Add(chunk *Chunk, key string, list []Result) {
|
||||||
if len(key) == 0 || !chunk.IsFull() || len(list) > queryCacheMax {
|
if len(key) == 0 || !chunk.IsFull() || len(list) > queryCacheMax {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -33,10 +33,10 @@ func (cc *ChunkCache) Add(chunk *Chunk, key string, list []*Result) {
|
|||||||
(*qc)[key] = list
|
(*qc)[key] = list
|
||||||
}
|
}
|
||||||
|
|
||||||
// Find is called to lookup ChunkCache
|
// Lookup is called to lookup ChunkCache
|
||||||
func (cc *ChunkCache) Find(chunk *Chunk, key string) ([]*Result, bool) {
|
func (cc *ChunkCache) Lookup(chunk *Chunk, key string) []Result {
|
||||||
if len(key) == 0 || !chunk.IsFull() {
|
if len(key) == 0 || !chunk.IsFull() {
|
||||||
return nil, false
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
cc.mutex.Lock()
|
cc.mutex.Lock()
|
||||||
@@ -46,8 +46,36 @@ func (cc *ChunkCache) Find(chunk *Chunk, key string) ([]*Result, bool) {
|
|||||||
if ok {
|
if ok {
|
||||||
list, ok := (*qc)[key]
|
list, ok := (*qc)[key]
|
||||||
if ok {
|
if ok {
|
||||||
return list, true
|
return list
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil, false
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cc *ChunkCache) Search(chunk *Chunk, key string) []Result {
|
||||||
|
if len(key) == 0 || !chunk.IsFull() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
cc.mutex.Lock()
|
||||||
|
defer cc.mutex.Unlock()
|
||||||
|
|
||||||
|
qc, ok := cc.cache[chunk]
|
||||||
|
if !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for idx := 1; idx < len(key); idx++ {
|
||||||
|
// [---------| ] | [ |---------]
|
||||||
|
// [--------| ] | [ |--------]
|
||||||
|
// [-------| ] | [ |-------]
|
||||||
|
prefix := key[:len(key)-idx]
|
||||||
|
suffix := key[idx:]
|
||||||
|
for _, substr := range [2]string{prefix, suffix} {
|
||||||
|
if cached, found := (*qc)[substr]; found {
|
||||||
|
return cached
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,37 +4,36 @@ import "testing"
|
|||||||
|
|
||||||
func TestChunkCache(t *testing.T) {
|
func TestChunkCache(t *testing.T) {
|
||||||
cache := NewChunkCache()
|
cache := NewChunkCache()
|
||||||
chunk2 := make(Chunk, chunkSize)
|
|
||||||
chunk1p := &Chunk{}
|
chunk1p := &Chunk{}
|
||||||
chunk2p := &chunk2
|
chunk2p := &Chunk{count: chunkSize}
|
||||||
items1 := []*Result{&Result{}}
|
items1 := []Result{Result{}}
|
||||||
items2 := []*Result{&Result{}, &Result{}}
|
items2 := []Result{Result{}, Result{}}
|
||||||
cache.Add(chunk1p, "foo", items1)
|
cache.Add(chunk1p, "foo", items1)
|
||||||
cache.Add(chunk2p, "foo", items1)
|
cache.Add(chunk2p, "foo", items1)
|
||||||
cache.Add(chunk2p, "bar", items2)
|
cache.Add(chunk2p, "bar", items2)
|
||||||
|
|
||||||
{ // chunk1 is not full
|
{ // chunk1 is not full
|
||||||
cached, found := cache.Find(chunk1p, "foo")
|
cached := cache.Lookup(chunk1p, "foo")
|
||||||
if found {
|
if cached != nil {
|
||||||
t.Error("Cached disabled for non-empty chunks", found, cached)
|
t.Error("Cached disabled for non-empty chunks", cached)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
cached, found := cache.Find(chunk2p, "foo")
|
cached := cache.Lookup(chunk2p, "foo")
|
||||||
if !found || len(cached) != 1 {
|
if cached == nil || len(cached) != 1 {
|
||||||
t.Error("Expected 1 item cached", found, cached)
|
t.Error("Expected 1 item cached", cached)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
cached, found := cache.Find(chunk2p, "bar")
|
cached := cache.Lookup(chunk2p, "bar")
|
||||||
if !found || len(cached) != 2 {
|
if cached == nil || len(cached) != 2 {
|
||||||
t.Error("Expected 2 items cached", found, cached)
|
t.Error("Expected 2 items cached", cached)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
cached, found := cache.Find(chunk1p, "foobar")
|
cached := cache.Lookup(chunk1p, "foobar")
|
||||||
if found {
|
if cached != nil {
|
||||||
t.Error("Expected 0 item cached", found, cached)
|
t.Error("Expected 0 item cached", cached)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,17 +2,18 @@ package fzf
|
|||||||
|
|
||||||
import "sync"
|
import "sync"
|
||||||
|
|
||||||
// Chunk is a list of Item pointers whose size has the upper limit of chunkSize
|
// Chunk is a list of Items whose size has the upper limit of chunkSize
|
||||||
type Chunk []*Item // >>> []Item
|
type Chunk struct {
|
||||||
|
items [chunkSize]Item
|
||||||
|
count int
|
||||||
|
}
|
||||||
|
|
||||||
// ItemBuilder is a closure type that builds Item object from a pointer to a
|
// ItemBuilder is a closure type that builds Item object from byte array
|
||||||
// string and an integer
|
type ItemBuilder func(*Item, []byte) bool
|
||||||
type ItemBuilder func([]byte, int) *Item
|
|
||||||
|
|
||||||
// ChunkList is a list of Chunks
|
// ChunkList is a list of Chunks
|
||||||
type ChunkList struct {
|
type ChunkList struct {
|
||||||
chunks []*Chunk
|
chunks []*Chunk
|
||||||
count int
|
|
||||||
mutex sync.Mutex
|
mutex sync.Mutex
|
||||||
trans ItemBuilder
|
trans ItemBuilder
|
||||||
}
|
}
|
||||||
@@ -21,15 +22,13 @@ type ChunkList struct {
|
|||||||
func NewChunkList(trans ItemBuilder) *ChunkList {
|
func NewChunkList(trans ItemBuilder) *ChunkList {
|
||||||
return &ChunkList{
|
return &ChunkList{
|
||||||
chunks: []*Chunk{},
|
chunks: []*Chunk{},
|
||||||
count: 0,
|
|
||||||
mutex: sync.Mutex{},
|
mutex: sync.Mutex{},
|
||||||
trans: trans}
|
trans: trans}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Chunk) push(trans ItemBuilder, data []byte, index int) bool {
|
func (c *Chunk) push(trans ItemBuilder, data []byte) bool {
|
||||||
item := trans(data, index)
|
if trans(&c.items[c.count], data) {
|
||||||
if item != nil {
|
c.count++
|
||||||
*c = append(*c, item)
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
@@ -37,7 +36,7 @@ func (c *Chunk) push(trans ItemBuilder, data []byte, index int) bool {
|
|||||||
|
|
||||||
// IsFull returns true if the Chunk is full
|
// IsFull returns true if the Chunk is full
|
||||||
func (c *Chunk) IsFull() bool {
|
func (c *Chunk) IsFull() bool {
|
||||||
return len(*c) == chunkSize
|
return c.count == chunkSize
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cl *ChunkList) lastChunk() *Chunk {
|
func (cl *ChunkList) lastChunk() *Chunk {
|
||||||
@@ -49,45 +48,35 @@ func CountItems(cs []*Chunk) int {
|
|||||||
if len(cs) == 0 {
|
if len(cs) == 0 {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
return chunkSize*(len(cs)-1) + len(*(cs[len(cs)-1]))
|
return chunkSize*(len(cs)-1) + cs[len(cs)-1].count
|
||||||
}
|
}
|
||||||
|
|
||||||
// Push adds the item to the list
|
// Push adds the item to the list
|
||||||
func (cl *ChunkList) Push(data []byte) bool {
|
func (cl *ChunkList) Push(data []byte) bool {
|
||||||
cl.mutex.Lock()
|
cl.mutex.Lock()
|
||||||
defer cl.mutex.Unlock()
|
|
||||||
|
|
||||||
if len(cl.chunks) == 0 || cl.lastChunk().IsFull() {
|
if len(cl.chunks) == 0 || cl.lastChunk().IsFull() {
|
||||||
newChunk := Chunk(make([]*Item, 0, chunkSize))
|
cl.chunks = append(cl.chunks, &Chunk{})
|
||||||
cl.chunks = append(cl.chunks, &newChunk)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if cl.lastChunk().push(cl.trans, data, cl.count) {
|
ret := cl.lastChunk().push(cl.trans, data)
|
||||||
cl.count++
|
cl.mutex.Unlock()
|
||||||
return true
|
return ret
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Snapshot returns immutable snapshot of the ChunkList
|
// Snapshot returns immutable snapshot of the ChunkList
|
||||||
func (cl *ChunkList) Snapshot() ([]*Chunk, int) {
|
func (cl *ChunkList) Snapshot() ([]*Chunk, int) {
|
||||||
cl.mutex.Lock()
|
cl.mutex.Lock()
|
||||||
defer cl.mutex.Unlock()
|
|
||||||
|
|
||||||
ret := make([]*Chunk, len(cl.chunks))
|
ret := make([]*Chunk, len(cl.chunks))
|
||||||
copy(ret, cl.chunks)
|
copy(ret, cl.chunks)
|
||||||
|
|
||||||
// Duplicate the last chunk
|
// Duplicate the last chunk
|
||||||
if cnt := len(ret); cnt > 0 {
|
if cnt := len(ret); cnt > 0 {
|
||||||
ret[cnt-1] = ret[cnt-1].dupe()
|
newChunk := *ret[cnt-1]
|
||||||
}
|
ret[cnt-1] = &newChunk
|
||||||
return ret, cl.count
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Chunk) dupe() *Chunk {
|
cl.mutex.Unlock()
|
||||||
newChunk := make(Chunk, len(*c))
|
return ret, CountItems(ret)
|
||||||
for idx, ptr := range *c {
|
|
||||||
newChunk[idx] = ptr
|
|
||||||
}
|
|
||||||
return &newChunk
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -11,8 +11,9 @@ func TestChunkList(t *testing.T) {
|
|||||||
// FIXME global
|
// FIXME global
|
||||||
sortCriteria = []criterion{byScore, byLength}
|
sortCriteria = []criterion{byScore, byLength}
|
||||||
|
|
||||||
cl := NewChunkList(func(s []byte, i int) *Item {
|
cl := NewChunkList(func(item *Item, s []byte) bool {
|
||||||
return &Item{text: util.ToChars(s), index: int32(i * 2)}
|
item.text = util.ToChars(s)
|
||||||
|
return true
|
||||||
})
|
})
|
||||||
|
|
||||||
// Snapshot
|
// Snapshot
|
||||||
@@ -38,11 +39,11 @@ func TestChunkList(t *testing.T) {
|
|||||||
|
|
||||||
// Check the content of the ChunkList
|
// Check the content of the ChunkList
|
||||||
chunk1 := snapshot[0]
|
chunk1 := snapshot[0]
|
||||||
if len(*chunk1) != 2 {
|
if chunk1.count != 2 {
|
||||||
t.Error("Snapshot should contain only two items")
|
t.Error("Snapshot should contain only two items")
|
||||||
}
|
}
|
||||||
if (*chunk1)[0].text.ToString() != "hello" || (*chunk1)[0].index != 0 ||
|
if chunk1.items[0].text.ToString() != "hello" ||
|
||||||
(*chunk1)[1].text.ToString() != "world" || (*chunk1)[1].index != 2 {
|
chunk1.items[1].text.ToString() != "world" {
|
||||||
t.Error("Invalid data")
|
t.Error("Invalid data")
|
||||||
}
|
}
|
||||||
if chunk1.IsFull() {
|
if chunk1.IsFull() {
|
||||||
@@ -65,14 +66,14 @@ func TestChunkList(t *testing.T) {
|
|||||||
!snapshot[1].IsFull() || snapshot[2].IsFull() || count != chunkSize*2+2 {
|
!snapshot[1].IsFull() || snapshot[2].IsFull() || count != chunkSize*2+2 {
|
||||||
t.Error("Expected two full chunks and one more chunk")
|
t.Error("Expected two full chunks and one more chunk")
|
||||||
}
|
}
|
||||||
if len(*snapshot[2]) != 2 {
|
if snapshot[2].count != 2 {
|
||||||
t.Error("Unexpected number of items")
|
t.Error("Unexpected number of items")
|
||||||
}
|
}
|
||||||
|
|
||||||
cl.Push([]byte("hello"))
|
cl.Push([]byte("hello"))
|
||||||
cl.Push([]byte("world"))
|
cl.Push([]byte("world"))
|
||||||
|
|
||||||
lastChunkCount := len(*snapshot[len(snapshot)-1])
|
lastChunkCount := snapshot[len(snapshot)-1].count
|
||||||
if lastChunkCount != 2 {
|
if lastChunkCount != 2 {
|
||||||
t.Error("Unexpected number of items:", lastChunkCount)
|
t.Error("Unexpected number of items:", lastChunkCount)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ import (
|
|||||||
|
|
||||||
const (
|
const (
|
||||||
// Current version
|
// Current version
|
||||||
version = "0.16.8"
|
version = "0.17.0"
|
||||||
|
|
||||||
// Core
|
// Core
|
||||||
coordinatorDelayMax time.Duration = 100 * time.Millisecond
|
coordinatorDelayMax time.Duration = 100 * time.Millisecond
|
||||||
@@ -17,6 +17,9 @@ const (
|
|||||||
|
|
||||||
// Reader
|
// Reader
|
||||||
readerBufferSize = 64 * 1024
|
readerBufferSize = 64 * 1024
|
||||||
|
readerPollIntervalMin = 10 * time.Millisecond
|
||||||
|
readerPollIntervalStep = 5 * time.Millisecond
|
||||||
|
readerPollIntervalMax = 50 * time.Millisecond
|
||||||
|
|
||||||
// Terminal
|
// Terminal
|
||||||
initialDelay = 20 * time.Millisecond
|
initialDelay = 20 * time.Millisecond
|
||||||
@@ -68,7 +71,7 @@ const (
|
|||||||
EvtSearchProgress
|
EvtSearchProgress
|
||||||
EvtSearchFin
|
EvtSearchFin
|
||||||
EvtHeader
|
EvtHeader
|
||||||
EvtClose
|
EvtReady
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|||||||
74
src/core.go
74
src/core.go
@@ -63,76 +63,64 @@ func Run(opts *Options, revision string) {
|
|||||||
ansiProcessor := func(data []byte) (util.Chars, *[]ansiOffset) {
|
ansiProcessor := func(data []byte) (util.Chars, *[]ansiOffset) {
|
||||||
return util.ToChars(data), nil
|
return util.ToChars(data), nil
|
||||||
}
|
}
|
||||||
ansiProcessorRunes := func(data []rune) (util.Chars, *[]ansiOffset) {
|
|
||||||
return util.RunesToChars(data), nil
|
|
||||||
}
|
|
||||||
if opts.Ansi {
|
if opts.Ansi {
|
||||||
if opts.Theme != nil {
|
if opts.Theme != nil {
|
||||||
var state *ansiState
|
var state *ansiState
|
||||||
ansiProcessor = func(data []byte) (util.Chars, *[]ansiOffset) {
|
ansiProcessor = func(data []byte) (util.Chars, *[]ansiOffset) {
|
||||||
trimmed, offsets, newState := extractColor(string(data), state, nil)
|
trimmed, offsets, newState := extractColor(string(data), state, nil)
|
||||||
state = newState
|
state = newState
|
||||||
return util.RunesToChars([]rune(trimmed)), offsets
|
return util.ToChars([]byte(trimmed)), offsets
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// When color is disabled but ansi option is given,
|
// When color is disabled but ansi option is given,
|
||||||
// we simply strip out ANSI codes from the input
|
// we simply strip out ANSI codes from the input
|
||||||
ansiProcessor = func(data []byte) (util.Chars, *[]ansiOffset) {
|
ansiProcessor = func(data []byte) (util.Chars, *[]ansiOffset) {
|
||||||
trimmed, _, _ := extractColor(string(data), nil, nil)
|
trimmed, _, _ := extractColor(string(data), nil, nil)
|
||||||
return util.RunesToChars([]rune(trimmed)), nil
|
return util.ToChars([]byte(trimmed)), nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
ansiProcessorRunes = func(data []rune) (util.Chars, *[]ansiOffset) {
|
|
||||||
return ansiProcessor([]byte(string(data)))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Chunk list
|
// Chunk list
|
||||||
var chunkList *ChunkList
|
var chunkList *ChunkList
|
||||||
|
var itemIndex int32
|
||||||
header := make([]string, 0, opts.HeaderLines)
|
header := make([]string, 0, opts.HeaderLines)
|
||||||
if len(opts.WithNth) == 0 {
|
if len(opts.WithNth) == 0 {
|
||||||
chunkList = NewChunkList(func(data []byte, index int) *Item {
|
chunkList = NewChunkList(func(item *Item, data []byte) bool {
|
||||||
if len(header) < opts.HeaderLines {
|
if len(header) < opts.HeaderLines {
|
||||||
header = append(header, string(data))
|
header = append(header, string(data))
|
||||||
eventBox.Set(EvtHeader, header)
|
eventBox.Set(EvtHeader, header)
|
||||||
return nil
|
return false
|
||||||
}
|
}
|
||||||
chars, colors := ansiProcessor(data)
|
item.text, item.colors = ansiProcessor(data)
|
||||||
return &Item{
|
item.text.Index = itemIndex
|
||||||
index: int32(index),
|
itemIndex++
|
||||||
trimLength: -1,
|
return true
|
||||||
text: chars,
|
|
||||||
colors: colors}
|
|
||||||
})
|
})
|
||||||
} else {
|
} else {
|
||||||
chunkList = NewChunkList(func(data []byte, index int) *Item {
|
chunkList = NewChunkList(func(item *Item, data []byte) bool {
|
||||||
tokens := Tokenize(util.ToChars(data), opts.Delimiter)
|
tokens := Tokenize(string(data), opts.Delimiter)
|
||||||
trans := Transform(tokens, opts.WithNth)
|
trans := Transform(tokens, opts.WithNth)
|
||||||
|
transformed := joinTokens(trans)
|
||||||
if len(header) < opts.HeaderLines {
|
if len(header) < opts.HeaderLines {
|
||||||
header = append(header, string(joinTokens(trans)))
|
header = append(header, transformed)
|
||||||
eventBox.Set(EvtHeader, header)
|
eventBox.Set(EvtHeader, header)
|
||||||
return nil
|
return false
|
||||||
}
|
}
|
||||||
textRunes := joinTokens(trans)
|
item.text, item.colors = ansiProcessor([]byte(transformed))
|
||||||
item := Item{
|
item.text.Index = itemIndex
|
||||||
index: int32(index),
|
item.origText = &data
|
||||||
trimLength: -1,
|
itemIndex++
|
||||||
origText: &data,
|
return true
|
||||||
colors: nil}
|
|
||||||
|
|
||||||
trimmed, colors := ansiProcessorRunes(textRunes)
|
|
||||||
item.text = trimmed
|
|
||||||
item.colors = colors
|
|
||||||
return &item
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reader
|
// Reader
|
||||||
streamingFilter := opts.Filter != nil && !sort && !opts.Tac && !opts.Sync
|
streamingFilter := opts.Filter != nil && !sort && !opts.Tac && !opts.Sync
|
||||||
if !streamingFilter {
|
if !streamingFilter {
|
||||||
reader := Reader{func(data []byte) bool {
|
reader := NewReader(func(data []byte) bool {
|
||||||
return chunkList.Push(data)
|
return chunkList.Push(data)
|
||||||
}, eventBox, opts.ReadZero}
|
}, eventBox, opts.ReadZero)
|
||||||
go reader.ReadSource()
|
go reader.ReadSource()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -165,17 +153,17 @@ func Run(opts *Options, revision string) {
|
|||||||
found := false
|
found := false
|
||||||
if streamingFilter {
|
if streamingFilter {
|
||||||
slab := util.MakeSlab(slab16Size, slab32Size)
|
slab := util.MakeSlab(slab16Size, slab32Size)
|
||||||
reader := Reader{
|
reader := NewReader(
|
||||||
func(runes []byte) bool {
|
func(runes []byte) bool {
|
||||||
item := chunkList.trans(runes, 0)
|
item := Item{}
|
||||||
if item != nil {
|
if chunkList.trans(&item, runes) {
|
||||||
if result, _, _ := pattern.MatchItem(item, false, slab); result != nil {
|
if result, _, _ := pattern.MatchItem(&item, false, slab); result != nil {
|
||||||
opts.Printer(item.text.ToString())
|
opts.Printer(item.text.ToString())
|
||||||
found = true
|
found = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}, eventBox, opts.ReadZero}
|
}, eventBox, opts.ReadZero)
|
||||||
reader.ReadSource()
|
reader.ReadSource()
|
||||||
} else {
|
} else {
|
||||||
eventBox.Unwatch(EvtReadNew)
|
eventBox.Unwatch(EvtReadNew)
|
||||||
@@ -221,14 +209,19 @@ func Run(opts *Options, revision string) {
|
|||||||
delay := true
|
delay := true
|
||||||
ticks++
|
ticks++
|
||||||
eventBox.Wait(func(events *util.Events) {
|
eventBox.Wait(func(events *util.Events) {
|
||||||
defer events.Clear()
|
if _, fin := (*events)[EvtReadFin]; fin {
|
||||||
|
delete(*events, EvtReadNew)
|
||||||
|
}
|
||||||
for evt, value := range *events {
|
for evt, value := range *events {
|
||||||
switch evt {
|
switch evt {
|
||||||
|
|
||||||
case EvtReadNew, EvtReadFin:
|
case EvtReadNew, EvtReadFin:
|
||||||
reading = reading && evt == EvtReadNew
|
reading = reading && evt == EvtReadNew
|
||||||
snapshot, count := chunkList.Snapshot()
|
snapshot, count := chunkList.Snapshot()
|
||||||
terminal.UpdateCount(count, !reading)
|
terminal.UpdateCount(count, !reading, value.(bool))
|
||||||
|
if opts.Sync {
|
||||||
|
terminal.UpdateList(PassMerger(&snapshot, opts.Tac))
|
||||||
|
}
|
||||||
matcher.Reset(snapshot, terminal.Input(), false, !reading, sort)
|
matcher.Reset(snapshot, terminal.Input(), false, !reading, sort)
|
||||||
|
|
||||||
case EvtSearchNew:
|
case EvtSearchNew:
|
||||||
@@ -281,6 +274,7 @@ func Run(opts *Options, revision string) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
events.Clear()
|
||||||
})
|
})
|
||||||
if delay && reading {
|
if delay && reading {
|
||||||
dur := util.DurWithin(
|
dur := util.DurWithin(
|
||||||
|
|||||||
24
src/item.go
24
src/item.go
@@ -4,27 +4,23 @@ import (
|
|||||||
"github.com/junegunn/fzf/src/util"
|
"github.com/junegunn/fzf/src/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Item represents each input line
|
// Item represents each input line. 56 bytes.
|
||||||
type Item struct {
|
type Item struct {
|
||||||
index int32
|
text util.Chars // 32 = 24 + 1 + 1 + 2 + 4
|
||||||
trimLength int32
|
transformed *[]Token // 8
|
||||||
text util.Chars
|
origText *[]byte // 8
|
||||||
origText *[]byte
|
colors *[]ansiOffset // 8
|
||||||
colors *[]ansiOffset
|
|
||||||
transformed []Token
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Index returns ordinal index of the Item
|
// Index returns ordinal index of the Item
|
||||||
func (item *Item) Index() int32 {
|
func (item *Item) Index() int32 {
|
||||||
return item.index
|
return item.text.Index
|
||||||
}
|
}
|
||||||
|
|
||||||
func (item *Item) TrimLength() int32 {
|
var minItem = Item{text: util.Chars{Index: -1}}
|
||||||
if item.trimLength >= 0 {
|
|
||||||
return item.trimLength
|
func (item *Item) TrimLength() uint16 {
|
||||||
}
|
return item.text.TrimLength()
|
||||||
item.trimLength = int32(item.text.TrimLength())
|
|
||||||
return item.trimLength
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Colors returns ansiOffsets of the Item
|
// Colors returns ansiOffsets of the Item
|
||||||
|
|||||||
@@ -131,7 +131,7 @@ func (m *Matcher) sliceChunks(chunks []*Chunk) [][]*Chunk {
|
|||||||
|
|
||||||
type partialResult struct {
|
type partialResult struct {
|
||||||
index int
|
index int
|
||||||
matches []*Result
|
matches []Result
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Matcher) scan(request MatchRequest) (*Merger, bool) {
|
func (m *Matcher) scan(request MatchRequest) (*Merger, bool) {
|
||||||
@@ -162,7 +162,7 @@ func (m *Matcher) scan(request MatchRequest) (*Merger, bool) {
|
|||||||
go func(idx int, slab *util.Slab, chunks []*Chunk) {
|
go func(idx int, slab *util.Slab, chunks []*Chunk) {
|
||||||
defer func() { waitGroup.Done() }()
|
defer func() { waitGroup.Done() }()
|
||||||
count := 0
|
count := 0
|
||||||
allMatches := make([][]*Result, len(chunks))
|
allMatches := make([][]Result, len(chunks))
|
||||||
for idx, chunk := range chunks {
|
for idx, chunk := range chunks {
|
||||||
matches := request.pattern.Match(chunk, slab)
|
matches := request.pattern.Match(chunk, slab)
|
||||||
allMatches[idx] = matches
|
allMatches[idx] = matches
|
||||||
@@ -172,7 +172,7 @@ func (m *Matcher) scan(request MatchRequest) (*Merger, bool) {
|
|||||||
}
|
}
|
||||||
countChan <- len(matches)
|
countChan <- len(matches)
|
||||||
}
|
}
|
||||||
sliceMatches := make([]*Result, 0, count)
|
sliceMatches := make([]Result, 0, count)
|
||||||
for _, matches := range allMatches {
|
for _, matches := range allMatches {
|
||||||
sliceMatches = append(sliceMatches, matches...)
|
sliceMatches = append(sliceMatches, matches...)
|
||||||
}
|
}
|
||||||
@@ -212,7 +212,7 @@ func (m *Matcher) scan(request MatchRequest) (*Merger, bool) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
partialResults := make([][]*Result, numSlices)
|
partialResults := make([][]Result, numSlices)
|
||||||
for _ = range slices {
|
for _ = range slices {
|
||||||
partialResult := <-resultChan
|
partialResult := <-resultChan
|
||||||
partialResults[partialResult.index] = partialResult.matches
|
partialResults[partialResult.index] = partialResult.matches
|
||||||
|
|||||||
@@ -3,14 +3,14 @@ package fzf
|
|||||||
import "fmt"
|
import "fmt"
|
||||||
|
|
||||||
// EmptyMerger is a Merger with no data
|
// EmptyMerger is a Merger with no data
|
||||||
var EmptyMerger = NewMerger(nil, [][]*Result{}, false, false)
|
var EmptyMerger = NewMerger(nil, [][]Result{}, false, false)
|
||||||
|
|
||||||
// Merger holds a set of locally sorted lists of items and provides the view of
|
// Merger holds a set of locally sorted lists of items and provides the view of
|
||||||
// a single, globally-sorted list
|
// a single, globally-sorted list
|
||||||
type Merger struct {
|
type Merger struct {
|
||||||
pattern *Pattern
|
pattern *Pattern
|
||||||
lists [][]*Result
|
lists [][]Result
|
||||||
merged []*Result
|
merged []Result
|
||||||
chunks *[]*Chunk
|
chunks *[]*Chunk
|
||||||
cursors []int
|
cursors []int
|
||||||
sorted bool
|
sorted bool
|
||||||
@@ -29,17 +29,17 @@ func PassMerger(chunks *[]*Chunk, tac bool) *Merger {
|
|||||||
count: 0}
|
count: 0}
|
||||||
|
|
||||||
for _, chunk := range *mg.chunks {
|
for _, chunk := range *mg.chunks {
|
||||||
mg.count += len(*chunk)
|
mg.count += chunk.count
|
||||||
}
|
}
|
||||||
return &mg
|
return &mg
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewMerger returns a new Merger
|
// NewMerger returns a new Merger
|
||||||
func NewMerger(pattern *Pattern, lists [][]*Result, sorted bool, tac bool) *Merger {
|
func NewMerger(pattern *Pattern, lists [][]Result, sorted bool, tac bool) *Merger {
|
||||||
mg := Merger{
|
mg := Merger{
|
||||||
pattern: pattern,
|
pattern: pattern,
|
||||||
lists: lists,
|
lists: lists,
|
||||||
merged: []*Result{},
|
merged: []Result{},
|
||||||
chunks: nil,
|
chunks: nil,
|
||||||
cursors: make([]int, len(lists)),
|
cursors: make([]int, len(lists)),
|
||||||
sorted: sorted,
|
sorted: sorted,
|
||||||
@@ -59,13 +59,13 @@ func (mg *Merger) Length() int {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get returns the pointer to the Result object indexed by the given integer
|
// Get returns the pointer to the Result object indexed by the given integer
|
||||||
func (mg *Merger) Get(idx int) *Result {
|
func (mg *Merger) Get(idx int) Result {
|
||||||
if mg.chunks != nil {
|
if mg.chunks != nil {
|
||||||
if mg.tac {
|
if mg.tac {
|
||||||
idx = mg.count - idx - 1
|
idx = mg.count - idx - 1
|
||||||
}
|
}
|
||||||
chunk := (*mg.chunks)[idx/chunkSize]
|
chunk := (*mg.chunks)[idx/chunkSize]
|
||||||
return &Result{item: (*chunk)[idx%chunkSize]}
|
return Result{item: &chunk.items[idx%chunkSize]}
|
||||||
}
|
}
|
||||||
|
|
||||||
if mg.sorted {
|
if mg.sorted {
|
||||||
@@ -89,7 +89,7 @@ func (mg *Merger) cacheable() bool {
|
|||||||
return mg.count < mergerCacheMax
|
return mg.count < mergerCacheMax
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mg *Merger) mergedGet(idx int) *Result {
|
func (mg *Merger) mergedGet(idx int) Result {
|
||||||
for i := len(mg.merged); i <= idx; i++ {
|
for i := len(mg.merged); i <= idx; i++ {
|
||||||
minRank := minRank()
|
minRank := minRank()
|
||||||
minIdx := -1
|
minIdx := -1
|
||||||
@@ -100,7 +100,7 @@ func (mg *Merger) mergedGet(idx int) *Result {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if cursor >= 0 {
|
if cursor >= 0 {
|
||||||
rank := list[cursor].rank
|
rank := list[cursor]
|
||||||
if minIdx < 0 || compareRanks(rank, minRank, mg.tac) {
|
if minIdx < 0 || compareRanks(rank, minRank, mg.tac) {
|
||||||
minRank = rank
|
minRank = rank
|
||||||
minIdx = listIdx
|
minIdx = listIdx
|
||||||
|
|||||||
@@ -15,11 +15,11 @@ func assert(t *testing.T, cond bool, msg ...string) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func randResult() *Result {
|
func randResult() Result {
|
||||||
str := fmt.Sprintf("%d", rand.Uint32())
|
str := fmt.Sprintf("%d", rand.Uint32())
|
||||||
return &Result{
|
chars := util.ToChars([]byte(str))
|
||||||
item: &Item{text: util.RunesToChars([]rune(str))},
|
chars.Index = rand.Int31()
|
||||||
rank: rank{index: rand.Int31()}}
|
return Result{item: &Item{text: chars}}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestEmptyMerger(t *testing.T) {
|
func TestEmptyMerger(t *testing.T) {
|
||||||
@@ -29,14 +29,14 @@ func TestEmptyMerger(t *testing.T) {
|
|||||||
assert(t, len(EmptyMerger.merged) == 0, "Invalid merged list")
|
assert(t, len(EmptyMerger.merged) == 0, "Invalid merged list")
|
||||||
}
|
}
|
||||||
|
|
||||||
func buildLists(partiallySorted bool) ([][]*Result, []*Result) {
|
func buildLists(partiallySorted bool) ([][]Result, []Result) {
|
||||||
numLists := 4
|
numLists := 4
|
||||||
lists := make([][]*Result, numLists)
|
lists := make([][]Result, numLists)
|
||||||
cnt := 0
|
cnt := 0
|
||||||
for i := 0; i < numLists; i++ {
|
for i := 0; i < numLists; i++ {
|
||||||
numResults := rand.Int() % 20
|
numResults := rand.Int() % 20
|
||||||
cnt += numResults
|
cnt += numResults
|
||||||
lists[i] = make([]*Result, numResults)
|
lists[i] = make([]Result, numResults)
|
||||||
for j := 0; j < numResults; j++ {
|
for j := 0; j < numResults; j++ {
|
||||||
item := randResult()
|
item := randResult()
|
||||||
lists[i][j] = item
|
lists[i][j] = item
|
||||||
@@ -45,7 +45,7 @@ func buildLists(partiallySorted bool) ([][]*Result, []*Result) {
|
|||||||
sort.Sort(ByRelevance(lists[i]))
|
sort.Sort(ByRelevance(lists[i]))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
items := []*Result{}
|
items := []Result{}
|
||||||
for _, list := range lists {
|
for _, list := range lists {
|
||||||
items = append(items, list...)
|
items = append(items, list...)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -962,7 +962,11 @@ func parseOptions(opts *Options, allArgs []string) {
|
|||||||
case "--algo":
|
case "--algo":
|
||||||
opts.FuzzyAlgo = parseAlgo(nextString(allArgs, &i, "algorithm required (v1|v2)"))
|
opts.FuzzyAlgo = parseAlgo(nextString(allArgs, &i, "algorithm required (v1|v2)"))
|
||||||
case "--expect":
|
case "--expect":
|
||||||
opts.Expect = parseKeyChords(nextString(allArgs, &i, "key names required"), "key names required")
|
for k, v := range parseKeyChords(nextString(allArgs, &i, "key names required"), "key names required") {
|
||||||
|
opts.Expect[k] = v
|
||||||
|
}
|
||||||
|
case "--no-expect":
|
||||||
|
opts.Expect = make(map[int]string)
|
||||||
case "--tiebreak":
|
case "--tiebreak":
|
||||||
opts.Criteria = parseTiebreak(nextString(allArgs, &i, "sort criterion required"))
|
opts.Criteria = parseTiebreak(nextString(allArgs, &i, "sort criterion required"))
|
||||||
case "--bind":
|
case "--bind":
|
||||||
@@ -1138,7 +1142,9 @@ func parseOptions(opts *Options, allArgs []string) {
|
|||||||
} else if match, value := optString(arg, "--toggle-sort="); match {
|
} else if match, value := optString(arg, "--toggle-sort="); match {
|
||||||
parseToggleSort(opts.Keymap, value)
|
parseToggleSort(opts.Keymap, value)
|
||||||
} else if match, value := optString(arg, "--expect="); match {
|
} else if match, value := optString(arg, "--expect="); match {
|
||||||
opts.Expect = parseKeyChords(value, "key names required")
|
for k, v := range parseKeyChords(value, "key names required") {
|
||||||
|
opts.Expect[k] = v
|
||||||
|
}
|
||||||
} else if match, value := optString(arg, "--tiebreak="); match {
|
} else if match, value := optString(arg, "--tiebreak="); match {
|
||||||
opts.Criteria = parseTiebreak(value)
|
opts.Criteria = parseTiebreak(value)
|
||||||
} else if match, value := optString(arg, "--color="); match {
|
} else if match, value := optString(arg, "--color="); match {
|
||||||
|
|||||||
@@ -6,7 +6,6 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/junegunn/fzf/src/tui"
|
"github.com/junegunn/fzf/src/tui"
|
||||||
"github.com/junegunn/fzf/src/util"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestDelimiterRegex(t *testing.T) {
|
func TestDelimiterRegex(t *testing.T) {
|
||||||
@@ -44,7 +43,7 @@ func TestDelimiterRegex(t *testing.T) {
|
|||||||
|
|
||||||
func TestDelimiterRegexString(t *testing.T) {
|
func TestDelimiterRegexString(t *testing.T) {
|
||||||
delim := delimiterRegexp("*")
|
delim := delimiterRegexp("*")
|
||||||
tokens := Tokenize(util.RunesToChars([]rune("-*--*---**---")), delim)
|
tokens := Tokenize("-*--*---**---", delim)
|
||||||
if delim.regex != nil ||
|
if delim.regex != nil ||
|
||||||
tokens[0].text.ToString() != "-*" ||
|
tokens[0].text.ToString() != "-*" ||
|
||||||
tokens[1].text.ToString() != "--*" ||
|
tokens[1].text.ToString() != "--*" ||
|
||||||
@@ -57,7 +56,7 @@ func TestDelimiterRegexString(t *testing.T) {
|
|||||||
|
|
||||||
func TestDelimiterRegexRegex(t *testing.T) {
|
func TestDelimiterRegexRegex(t *testing.T) {
|
||||||
delim := delimiterRegexp("--\\*")
|
delim := delimiterRegexp("--\\*")
|
||||||
tokens := Tokenize(util.RunesToChars([]rune("-*--*---**---")), delim)
|
tokens := Tokenize("-*--*---**---", delim)
|
||||||
if delim.str != nil ||
|
if delim.str != nil ||
|
||||||
tokens[0].text.ToString() != "-*--*" ||
|
tokens[0].text.ToString() != "-*--*" ||
|
||||||
tokens[1].text.ToString() != "---*" ||
|
tokens[1].text.ToString() != "---*" ||
|
||||||
@@ -415,3 +414,10 @@ func TestPreviewOpts(t *testing.T) {
|
|||||||
t.Error(opts.Preview)
|
t.Error(opts.Preview)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAdditiveExpect(t *testing.T) {
|
||||||
|
opts := optsFor("--expect=a", "--expect", "b", "--expect=c")
|
||||||
|
if len(opts.Expect) != 3 {
|
||||||
|
t.Error(opts.Expect)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
130
src/pattern.go
130
src/pattern.go
@@ -10,12 +10,12 @@ import (
|
|||||||
|
|
||||||
// fuzzy
|
// fuzzy
|
||||||
// 'exact
|
// 'exact
|
||||||
// ^exact-prefix
|
// ^prefix-exact
|
||||||
// exact-suffix$
|
// suffix-exact$
|
||||||
// !not-fuzzy
|
// !inverse-exact
|
||||||
// !'not-exact
|
// !'inverse-fuzzy
|
||||||
// !^not-exact-prefix
|
// !^inverse-prefix-exact
|
||||||
// !not-exact-suffix$
|
// !inverse-suffix-exact$
|
||||||
|
|
||||||
type termType int
|
type termType int
|
||||||
|
|
||||||
@@ -32,7 +32,6 @@ type term struct {
|
|||||||
inv bool
|
inv bool
|
||||||
text []rune
|
text []rune
|
||||||
caseSensitive bool
|
caseSensitive bool
|
||||||
origText []rune
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type termSet []term
|
type termSet []term
|
||||||
@@ -48,6 +47,7 @@ type Pattern struct {
|
|||||||
text []rune
|
text []rune
|
||||||
termSets []termSet
|
termSets []termSet
|
||||||
cacheable bool
|
cacheable bool
|
||||||
|
cacheKey string
|
||||||
delimiter Delimiter
|
delimiter Delimiter
|
||||||
nth []Range
|
nth []Range
|
||||||
procFun map[termType]algo.Algo
|
procFun map[termType]algo.Algo
|
||||||
@@ -60,7 +60,7 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
_splitRegex = regexp.MustCompile("\\s+")
|
_splitRegex = regexp.MustCompile(" +")
|
||||||
clearPatternCache()
|
clearPatternCache()
|
||||||
clearChunkCache()
|
clearChunkCache()
|
||||||
}
|
}
|
||||||
@@ -81,7 +81,10 @@ func BuildPattern(fuzzy bool, fuzzyAlgo algo.Algo, extended bool, caseMode Case,
|
|||||||
|
|
||||||
var asString string
|
var asString string
|
||||||
if extended {
|
if extended {
|
||||||
asString = strings.Trim(string(runes), " ")
|
asString = strings.TrimLeft(string(runes), " ")
|
||||||
|
for strings.HasSuffix(asString, " ") && !strings.HasSuffix(asString, "\\ ") {
|
||||||
|
asString = asString[:len(asString)-1]
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
asString = string(runes)
|
asString = string(runes)
|
||||||
}
|
}
|
||||||
@@ -101,7 +104,7 @@ func BuildPattern(fuzzy bool, fuzzyAlgo algo.Algo, extended bool, caseMode Case,
|
|||||||
for idx, term := range termSet {
|
for idx, term := range termSet {
|
||||||
// If the query contains inverse search terms or OR operators,
|
// If the query contains inverse search terms or OR operators,
|
||||||
// we cannot cache the search scope
|
// we cannot cache the search scope
|
||||||
if !cacheable || idx > 0 || term.inv || !fuzzy && term.typ != termExact {
|
if !cacheable || idx > 0 || term.inv || fuzzy && term.typ != termFuzzy || !fuzzy && term.typ != termExact {
|
||||||
cacheable = false
|
cacheable = false
|
||||||
break Loop
|
break Loop
|
||||||
}
|
}
|
||||||
@@ -130,6 +133,7 @@ func BuildPattern(fuzzy bool, fuzzyAlgo algo.Algo, extended bool, caseMode Case,
|
|||||||
delimiter: delimiter,
|
delimiter: delimiter,
|
||||||
procFun: make(map[termType]algo.Algo)}
|
procFun: make(map[termType]algo.Algo)}
|
||||||
|
|
||||||
|
ptr.cacheKey = ptr.buildCacheKey()
|
||||||
ptr.procFun[termFuzzy] = fuzzyAlgo
|
ptr.procFun[termFuzzy] = fuzzyAlgo
|
||||||
ptr.procFun[termEqual] = algo.EqualMatch
|
ptr.procFun[termEqual] = algo.EqualMatch
|
||||||
ptr.procFun[termExact] = algo.ExactMatchNaive
|
ptr.procFun[termExact] = algo.ExactMatchNaive
|
||||||
@@ -141,27 +145,30 @@ func BuildPattern(fuzzy bool, fuzzyAlgo algo.Algo, extended bool, caseMode Case,
|
|||||||
}
|
}
|
||||||
|
|
||||||
func parseTerms(fuzzy bool, caseMode Case, normalize bool, str string) []termSet {
|
func parseTerms(fuzzy bool, caseMode Case, normalize bool, str string) []termSet {
|
||||||
|
str = strings.Replace(str, "\\ ", "\t", -1)
|
||||||
tokens := _splitRegex.Split(str, -1)
|
tokens := _splitRegex.Split(str, -1)
|
||||||
sets := []termSet{}
|
sets := []termSet{}
|
||||||
set := termSet{}
|
set := termSet{}
|
||||||
switchSet := false
|
switchSet := false
|
||||||
|
afterBar := false
|
||||||
for _, token := range tokens {
|
for _, token := range tokens {
|
||||||
typ, inv, text := termFuzzy, false, token
|
typ, inv, text := termFuzzy, false, strings.Replace(token, "\t", " ", -1)
|
||||||
lowerText := strings.ToLower(text)
|
lowerText := strings.ToLower(text)
|
||||||
caseSensitive := caseMode == CaseRespect ||
|
caseSensitive := caseMode == CaseRespect ||
|
||||||
caseMode == CaseSmart && text != lowerText
|
caseMode == CaseSmart && text != lowerText
|
||||||
if !caseSensitive {
|
if !caseSensitive {
|
||||||
text = lowerText
|
text = lowerText
|
||||||
}
|
}
|
||||||
origText := []rune(text)
|
|
||||||
if !fuzzy {
|
if !fuzzy {
|
||||||
typ = termExact
|
typ = termExact
|
||||||
}
|
}
|
||||||
|
|
||||||
if text == "|" {
|
if len(set) > 0 && !afterBar && text == "|" {
|
||||||
switchSet = false
|
switchSet = false
|
||||||
|
afterBar = true
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
afterBar = false
|
||||||
|
|
||||||
if strings.HasPrefix(text, "!") {
|
if strings.HasPrefix(text, "!") {
|
||||||
inv = true
|
inv = true
|
||||||
@@ -169,6 +176,11 @@ func parseTerms(fuzzy bool, caseMode Case, normalize bool, str string) []termSet
|
|||||||
text = text[1:]
|
text = text[1:]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if text != "$" && strings.HasSuffix(text, "$") {
|
||||||
|
typ = termSuffix
|
||||||
|
text = text[:len(text)-1]
|
||||||
|
}
|
||||||
|
|
||||||
if strings.HasPrefix(text, "'") {
|
if strings.HasPrefix(text, "'") {
|
||||||
// Flip exactness
|
// Flip exactness
|
||||||
if fuzzy && !inv {
|
if fuzzy && !inv {
|
||||||
@@ -179,16 +191,12 @@ func parseTerms(fuzzy bool, caseMode Case, normalize bool, str string) []termSet
|
|||||||
text = text[1:]
|
text = text[1:]
|
||||||
}
|
}
|
||||||
} else if strings.HasPrefix(text, "^") {
|
} else if strings.HasPrefix(text, "^") {
|
||||||
if strings.HasSuffix(text, "$") {
|
if typ == termSuffix {
|
||||||
typ = termEqual
|
typ = termEqual
|
||||||
text = text[1 : len(text)-1]
|
|
||||||
} else {
|
} else {
|
||||||
typ = termPrefix
|
typ = termPrefix
|
||||||
text = text[1:]
|
|
||||||
}
|
}
|
||||||
} else if strings.HasSuffix(text, "$") {
|
text = text[1:]
|
||||||
typ = termSuffix
|
|
||||||
text = text[:len(text)-1]
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(text) > 0 {
|
if len(text) > 0 {
|
||||||
@@ -204,8 +212,7 @@ func parseTerms(fuzzy bool, caseMode Case, normalize bool, str string) []termSet
|
|||||||
typ: typ,
|
typ: typ,
|
||||||
inv: inv,
|
inv: inv,
|
||||||
text: textRunes,
|
text: textRunes,
|
||||||
caseSensitive: caseSensitive,
|
caseSensitive: caseSensitive})
|
||||||
origText: origText})
|
|
||||||
switchSet = true
|
switchSet = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -228,46 +235,36 @@ func (p *Pattern) AsString() string {
|
|||||||
return string(p.text)
|
return string(p.text)
|
||||||
}
|
}
|
||||||
|
|
||||||
// CacheKey is used to build string to be used as the key of result cache
|
func (p *Pattern) buildCacheKey() string {
|
||||||
func (p *Pattern) CacheKey() string {
|
|
||||||
if !p.extended {
|
if !p.extended {
|
||||||
return p.AsString()
|
return p.AsString()
|
||||||
}
|
}
|
||||||
cacheableTerms := []string{}
|
cacheableTerms := []string{}
|
||||||
for _, termSet := range p.termSets {
|
for _, termSet := range p.termSets {
|
||||||
if len(termSet) == 1 && !termSet[0].inv && (p.fuzzy || termSet[0].typ == termExact) {
|
if len(termSet) == 1 && !termSet[0].inv && (p.fuzzy || termSet[0].typ == termExact) {
|
||||||
cacheableTerms = append(cacheableTerms, string(termSet[0].origText))
|
cacheableTerms = append(cacheableTerms, string(termSet[0].text))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return strings.Join(cacheableTerms, " ")
|
return strings.Join(cacheableTerms, "\t")
|
||||||
|
}
|
||||||
|
|
||||||
|
// CacheKey is used to build string to be used as the key of result cache
|
||||||
|
func (p *Pattern) CacheKey() string {
|
||||||
|
return p.cacheKey
|
||||||
}
|
}
|
||||||
|
|
||||||
// Match returns the list of matches Items in the given Chunk
|
// Match returns the list of matches Items in the given Chunk
|
||||||
func (p *Pattern) Match(chunk *Chunk, slab *util.Slab) []*Result {
|
func (p *Pattern) Match(chunk *Chunk, slab *util.Slab) []Result {
|
||||||
// ChunkCache: Exact match
|
// ChunkCache: Exact match
|
||||||
cacheKey := p.CacheKey()
|
cacheKey := p.CacheKey()
|
||||||
if p.cacheable {
|
if p.cacheable {
|
||||||
if cached, found := _cache.Find(chunk, cacheKey); found {
|
if cached := _cache.Lookup(chunk, cacheKey); cached != nil {
|
||||||
return cached
|
return cached
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Prefix/suffix cache
|
// Prefix/suffix cache
|
||||||
var space []*Result
|
space := _cache.Search(chunk, cacheKey)
|
||||||
Loop:
|
|
||||||
for idx := 1; idx < len(cacheKey); idx++ {
|
|
||||||
// [---------| ] | [ |---------]
|
|
||||||
// [--------| ] | [ |--------]
|
|
||||||
// [-------| ] | [ |-------]
|
|
||||||
prefix := cacheKey[:len(cacheKey)-idx]
|
|
||||||
suffix := cacheKey[idx:]
|
|
||||||
for _, substr := range [2]*string{&prefix, &suffix} {
|
|
||||||
if cached, found := _cache.Find(chunk, *substr); found {
|
|
||||||
space = cached
|
|
||||||
break Loop
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
matches := p.matchChunk(chunk, space, slab)
|
matches := p.matchChunk(chunk, space, slab)
|
||||||
|
|
||||||
@@ -277,19 +274,19 @@ Loop:
|
|||||||
return matches
|
return matches
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *Pattern) matchChunk(chunk *Chunk, space []*Result, slab *util.Slab) []*Result {
|
func (p *Pattern) matchChunk(chunk *Chunk, space []Result, slab *util.Slab) []Result {
|
||||||
matches := []*Result{}
|
matches := []Result{}
|
||||||
|
|
||||||
if space == nil {
|
if space == nil {
|
||||||
for _, item := range *chunk {
|
for idx := 0; idx < chunk.count; idx++ {
|
||||||
if match, _, _ := p.MatchItem(item, false, slab); match != nil {
|
if match, _, _ := p.MatchItem(&chunk.items[idx], false, slab); match != nil {
|
||||||
matches = append(matches, match)
|
matches = append(matches, *match)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
for _, result := range space {
|
for _, result := range space {
|
||||||
if match, _, _ := p.MatchItem(result.item, false, slab); match != nil {
|
if match, _, _ := p.MatchItem(result.item, false, slab); match != nil {
|
||||||
matches = append(matches, match)
|
matches = append(matches, *match)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -300,20 +297,27 @@ func (p *Pattern) matchChunk(chunk *Chunk, space []*Result, slab *util.Slab) []*
|
|||||||
func (p *Pattern) MatchItem(item *Item, withPos bool, slab *util.Slab) (*Result, []Offset, *[]int) {
|
func (p *Pattern) MatchItem(item *Item, withPos bool, slab *util.Slab) (*Result, []Offset, *[]int) {
|
||||||
if p.extended {
|
if p.extended {
|
||||||
if offsets, bonus, pos := p.extendedMatch(item, withPos, slab); len(offsets) == len(p.termSets) {
|
if offsets, bonus, pos := p.extendedMatch(item, withPos, slab); len(offsets) == len(p.termSets) {
|
||||||
return buildResult(item, offsets, bonus), offsets, pos
|
result := buildResult(item, offsets, bonus)
|
||||||
|
return &result, offsets, pos
|
||||||
}
|
}
|
||||||
return nil, nil, nil
|
return nil, nil, nil
|
||||||
}
|
}
|
||||||
offset, bonus, pos := p.basicMatch(item, withPos, slab)
|
offset, bonus, pos := p.basicMatch(item, withPos, slab)
|
||||||
if sidx := offset[0]; sidx >= 0 {
|
if sidx := offset[0]; sidx >= 0 {
|
||||||
offsets := []Offset{offset}
|
offsets := []Offset{offset}
|
||||||
return buildResult(item, offsets, bonus), offsets, pos
|
result := buildResult(item, offsets, bonus)
|
||||||
|
return &result, offsets, pos
|
||||||
}
|
}
|
||||||
return nil, nil, nil
|
return nil, nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *Pattern) basicMatch(item *Item, withPos bool, slab *util.Slab) (Offset, int, *[]int) {
|
func (p *Pattern) basicMatch(item *Item, withPos bool, slab *util.Slab) (Offset, int, *[]int) {
|
||||||
input := p.prepareInput(item)
|
var input []Token
|
||||||
|
if len(p.nth) == 0 {
|
||||||
|
input = []Token{Token{text: &item.text, prefixLength: 0}}
|
||||||
|
} else {
|
||||||
|
input = p.transformInput(item)
|
||||||
|
}
|
||||||
if p.fuzzy {
|
if p.fuzzy {
|
||||||
return p.iter(p.fuzzyAlgo, input, p.caseSensitive, p.normalize, p.forward, p.text, withPos, slab)
|
return p.iter(p.fuzzyAlgo, input, p.caseSensitive, p.normalize, p.forward, p.text, withPos, slab)
|
||||||
}
|
}
|
||||||
@@ -321,7 +325,12 @@ func (p *Pattern) basicMatch(item *Item, withPos bool, slab *util.Slab) (Offset,
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (p *Pattern) extendedMatch(item *Item, withPos bool, slab *util.Slab) ([]Offset, int, *[]int) {
|
func (p *Pattern) extendedMatch(item *Item, withPos bool, slab *util.Slab) ([]Offset, int, *[]int) {
|
||||||
input := p.prepareInput(item)
|
var input []Token
|
||||||
|
if len(p.nth) == 0 {
|
||||||
|
input = []Token{Token{text: &item.text, prefixLength: 0}}
|
||||||
|
} else {
|
||||||
|
input = p.transformInput(item)
|
||||||
|
}
|
||||||
offsets := []Offset{}
|
offsets := []Offset{}
|
||||||
var totalScore int
|
var totalScore int
|
||||||
var allPos *[]int
|
var allPos *[]int
|
||||||
@@ -365,25 +374,20 @@ func (p *Pattern) extendedMatch(item *Item, withPos bool, slab *util.Slab) ([]Of
|
|||||||
return offsets, totalScore, allPos
|
return offsets, totalScore, allPos
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *Pattern) prepareInput(item *Item) []Token {
|
func (p *Pattern) transformInput(item *Item) []Token {
|
||||||
if item.transformed != nil {
|
if item.transformed != nil {
|
||||||
return item.transformed
|
return *item.transformed
|
||||||
}
|
}
|
||||||
|
|
||||||
var ret []Token
|
tokens := Tokenize(item.text.ToString(), p.delimiter)
|
||||||
if len(p.nth) == 0 {
|
ret := Transform(tokens, p.nth)
|
||||||
ret = []Token{Token{text: &item.text, prefixLength: 0}}
|
item.transformed = &ret
|
||||||
} else {
|
|
||||||
tokens := Tokenize(item.text, p.delimiter)
|
|
||||||
ret = Transform(tokens, p.nth)
|
|
||||||
}
|
|
||||||
item.transformed = ret
|
|
||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *Pattern) iter(pfun algo.Algo, tokens []Token, caseSensitive bool, normalize bool, forward bool, pattern []rune, withPos bool, slab *util.Slab) (Offset, int, *[]int) {
|
func (p *Pattern) iter(pfun algo.Algo, tokens []Token, caseSensitive bool, normalize bool, forward bool, pattern []rune, withPos bool, slab *util.Slab) (Offset, int, *[]int) {
|
||||||
for _, part := range tokens {
|
for _, part := range tokens {
|
||||||
if res, pos := pfun(caseSensitive, normalize, forward, *part.text, pattern, withPos, slab); res.Start >= 0 {
|
if res, pos := pfun(caseSensitive, normalize, forward, part.text, pattern, withPos, slab); res.Start >= 0 {
|
||||||
sidx := int32(res.Start) + part.prefixLength
|
sidx := int32(res.Start) + part.prefixLength
|
||||||
eidx := int32(res.End) + part.prefixLength
|
eidx := int32(res.End) + part.prefixLength
|
||||||
if pos != nil {
|
if pos != nil {
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ func init() {
|
|||||||
|
|
||||||
func TestParseTermsExtended(t *testing.T) {
|
func TestParseTermsExtended(t *testing.T) {
|
||||||
terms := parseTerms(true, CaseSmart, false,
|
terms := parseTerms(true, CaseSmart, false,
|
||||||
"| aaa 'bbb ^ccc ddd$ !eee !'fff !^ggg !hhh$ | ^iii$ ^xxx | 'yyy | | zzz$ | !ZZZ |")
|
"aaa 'bbb ^ccc ddd$ !eee !'fff !^ggg !hhh$ | ^iii$ ^xxx | 'yyy | zzz$ | !ZZZ |")
|
||||||
if len(terms) != 9 ||
|
if len(terms) != 9 ||
|
||||||
terms[0][0].typ != termFuzzy || terms[0][0].inv ||
|
terms[0][0].typ != termFuzzy || terms[0][0].inv ||
|
||||||
terms[1][0].typ != termExact || terms[1][0].inv ||
|
terms[1][0].typ != termExact || terms[1][0].inv ||
|
||||||
@@ -33,19 +33,11 @@ func TestParseTermsExtended(t *testing.T) {
|
|||||||
terms[8][3].typ != termExact || !terms[8][3].inv {
|
terms[8][3].typ != termExact || !terms[8][3].inv {
|
||||||
t.Errorf("%s", terms)
|
t.Errorf("%s", terms)
|
||||||
}
|
}
|
||||||
for idx, termSet := range terms[:8] {
|
for _, termSet := range terms[:8] {
|
||||||
term := termSet[0]
|
term := termSet[0]
|
||||||
if len(term.text) != 3 {
|
if len(term.text) != 3 {
|
||||||
t.Errorf("%s", term)
|
t.Errorf("%s", term)
|
||||||
}
|
}
|
||||||
if idx > 0 && len(term.origText) != 4+idx/5 {
|
|
||||||
t.Errorf("%s", term)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, term := range terms[8] {
|
|
||||||
if len(term.origText) != 4 {
|
|
||||||
t.Errorf("%s", term)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -66,7 +58,7 @@ func TestParseTermsExtendedExact(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestParseTermsEmpty(t *testing.T) {
|
func TestParseTermsEmpty(t *testing.T) {
|
||||||
terms := parseTerms(true, CaseSmart, false, "' $ ^ !' !^ !$")
|
terms := parseTerms(true, CaseSmart, false, "' ^ !' !^")
|
||||||
if len(terms) != 0 {
|
if len(terms) != 0 {
|
||||||
t.Errorf("%s", terms)
|
t.Errorf("%s", terms)
|
||||||
}
|
}
|
||||||
@@ -77,8 +69,9 @@ func TestExact(t *testing.T) {
|
|||||||
clearPatternCache()
|
clearPatternCache()
|
||||||
pattern := BuildPattern(true, algo.FuzzyMatchV2, true, CaseSmart, false, true, true,
|
pattern := BuildPattern(true, algo.FuzzyMatchV2, true, CaseSmart, false, true, true,
|
||||||
[]Range{}, Delimiter{}, []rune("'abc"))
|
[]Range{}, Delimiter{}, []rune("'abc"))
|
||||||
|
chars := util.ToChars([]byte("aabbcc abc"))
|
||||||
res, pos := algo.ExactMatchNaive(
|
res, pos := algo.ExactMatchNaive(
|
||||||
pattern.caseSensitive, pattern.normalize, pattern.forward, util.RunesToChars([]rune("aabbcc abc")), pattern.termSets[0][0].text, true, nil)
|
pattern.caseSensitive, pattern.normalize, pattern.forward, &chars, pattern.termSets[0][0].text, true, nil)
|
||||||
if res.Start != 7 || res.End != 10 {
|
if res.Start != 7 || res.End != 10 {
|
||||||
t.Errorf("%s / %d / %d", pattern.termSets, res.Start, res.End)
|
t.Errorf("%s / %d / %d", pattern.termSets, res.Start, res.End)
|
||||||
}
|
}
|
||||||
@@ -93,8 +86,9 @@ func TestEqual(t *testing.T) {
|
|||||||
pattern := BuildPattern(true, algo.FuzzyMatchV2, true, CaseSmart, false, true, true, []Range{}, Delimiter{}, []rune("^AbC$"))
|
pattern := BuildPattern(true, algo.FuzzyMatchV2, true, CaseSmart, false, true, true, []Range{}, Delimiter{}, []rune("^AbC$"))
|
||||||
|
|
||||||
match := func(str string, sidxExpected int, eidxExpected int) {
|
match := func(str string, sidxExpected int, eidxExpected int) {
|
||||||
|
chars := util.ToChars([]byte(str))
|
||||||
res, pos := algo.EqualMatch(
|
res, pos := algo.EqualMatch(
|
||||||
pattern.caseSensitive, pattern.normalize, pattern.forward, util.RunesToChars([]rune(str)), pattern.termSets[0][0].text, true, nil)
|
pattern.caseSensitive, pattern.normalize, pattern.forward, &chars, pattern.termSets[0][0].text, true, nil)
|
||||||
if res.Start != sidxExpected || res.End != eidxExpected {
|
if res.Start != sidxExpected || res.End != eidxExpected {
|
||||||
t.Errorf("%s / %d / %d", pattern.termSets, res.Start, res.End)
|
t.Errorf("%s / %d / %d", pattern.termSets, res.Start, res.End)
|
||||||
}
|
}
|
||||||
@@ -133,30 +127,29 @@ func TestCaseSensitivity(t *testing.T) {
|
|||||||
|
|
||||||
func TestOrigTextAndTransformed(t *testing.T) {
|
func TestOrigTextAndTransformed(t *testing.T) {
|
||||||
pattern := BuildPattern(true, algo.FuzzyMatchV2, true, CaseSmart, false, true, true, []Range{}, Delimiter{}, []rune("jg"))
|
pattern := BuildPattern(true, algo.FuzzyMatchV2, true, CaseSmart, false, true, true, []Range{}, Delimiter{}, []rune("jg"))
|
||||||
tokens := Tokenize(util.RunesToChars([]rune("junegunn")), Delimiter{})
|
tokens := Tokenize("junegunn", Delimiter{})
|
||||||
trans := Transform(tokens, []Range{Range{1, 1}})
|
trans := Transform(tokens, []Range{Range{1, 1}})
|
||||||
|
|
||||||
origBytes := []byte("junegunn.choi")
|
origBytes := []byte("junegunn.choi")
|
||||||
for _, extended := range []bool{false, true} {
|
for _, extended := range []bool{false, true} {
|
||||||
chunk := Chunk{
|
chunk := Chunk{count: 1}
|
||||||
&Item{
|
chunk.items[0] = Item{
|
||||||
text: util.RunesToChars([]rune("junegunn")),
|
text: util.ToChars([]byte("junegunn")),
|
||||||
origText: &origBytes,
|
origText: &origBytes,
|
||||||
transformed: trans},
|
transformed: &trans}
|
||||||
}
|
|
||||||
pattern.extended = extended
|
pattern.extended = extended
|
||||||
matches := pattern.matchChunk(&chunk, nil, slab) // No cache
|
matches := pattern.matchChunk(&chunk, nil, slab) // No cache
|
||||||
if !(matches[0].item.text.ToString() == "junegunn" &&
|
if !(matches[0].item.text.ToString() == "junegunn" &&
|
||||||
string(*matches[0].item.origText) == "junegunn.choi" &&
|
string(*matches[0].item.origText) == "junegunn.choi" &&
|
||||||
reflect.DeepEqual(matches[0].item.transformed, trans)) {
|
reflect.DeepEqual(*matches[0].item.transformed, trans)) {
|
||||||
t.Error("Invalid match result", matches)
|
t.Error("Invalid match result", matches)
|
||||||
}
|
}
|
||||||
|
|
||||||
match, offsets, pos := pattern.MatchItem(chunk[0], true, slab)
|
match, offsets, pos := pattern.MatchItem(&chunk.items[0], true, slab)
|
||||||
if !(match.item.text.ToString() == "junegunn" &&
|
if !(match.item.text.ToString() == "junegunn" &&
|
||||||
string(*match.item.origText) == "junegunn.choi" &&
|
string(*match.item.origText) == "junegunn.choi" &&
|
||||||
offsets[0][0] == 0 && offsets[0][1] == 5 &&
|
offsets[0][0] == 0 && offsets[0][1] == 5 &&
|
||||||
reflect.DeepEqual(match.item.transformed, trans)) {
|
reflect.DeepEqual(*match.item.transformed, trans)) {
|
||||||
t.Error("Invalid match result", match, offsets, extended)
|
t.Error("Invalid match result", match, offsets, extended)
|
||||||
}
|
}
|
||||||
if !((*pos)[0] == 4 && (*pos)[1] == 0) {
|
if !((*pos)[0] == 4 && (*pos)[1] == 0) {
|
||||||
@@ -167,40 +160,47 @@ func TestOrigTextAndTransformed(t *testing.T) {
|
|||||||
|
|
||||||
func TestCacheKey(t *testing.T) {
|
func TestCacheKey(t *testing.T) {
|
||||||
test := func(extended bool, patStr string, expected string, cacheable bool) {
|
test := func(extended bool, patStr string, expected string, cacheable bool) {
|
||||||
|
clearPatternCache()
|
||||||
pat := BuildPattern(true, algo.FuzzyMatchV2, extended, CaseSmart, false, true, true, []Range{}, Delimiter{}, []rune(patStr))
|
pat := BuildPattern(true, algo.FuzzyMatchV2, extended, CaseSmart, false, true, true, []Range{}, Delimiter{}, []rune(patStr))
|
||||||
if pat.CacheKey() != expected {
|
if pat.CacheKey() != expected {
|
||||||
t.Errorf("Expected: %s, actual: %s", expected, pat.CacheKey())
|
t.Errorf("Expected: %s, actual: %s", expected, pat.CacheKey())
|
||||||
}
|
}
|
||||||
if pat.cacheable != cacheable {
|
if pat.cacheable != cacheable {
|
||||||
t.Errorf("Expected: %s, actual: %s (%s)", cacheable, pat.cacheable, patStr)
|
t.Errorf("Expected: %t, actual: %t (%s)", cacheable, pat.cacheable, patStr)
|
||||||
}
|
}
|
||||||
clearPatternCache()
|
clearPatternCache()
|
||||||
}
|
}
|
||||||
test(false, "foo !bar", "foo !bar", true)
|
test(false, "foo !bar", "foo !bar", true)
|
||||||
test(false, "foo | bar !baz", "foo | bar !baz", true)
|
test(false, "foo | bar !baz", "foo | bar !baz", true)
|
||||||
test(true, "foo bar baz", "foo bar baz", true)
|
test(true, "foo bar baz", "foo\tbar\tbaz", true)
|
||||||
test(true, "foo !bar", "foo", false)
|
test(true, "foo !bar", "foo", false)
|
||||||
test(true, "foo !bar baz", "foo baz", false)
|
test(true, "foo !bar baz", "foo\tbaz", false)
|
||||||
test(true, "foo | bar baz", "baz", false)
|
test(true, "foo | bar baz", "baz", false)
|
||||||
test(true, "foo | bar | baz", "", false)
|
test(true, "foo | bar | baz", "", false)
|
||||||
test(true, "foo | bar !baz", "", false)
|
test(true, "foo | bar !baz", "", false)
|
||||||
test(true, "| | | foo", "foo", true)
|
test(true, "| | foo", "", false)
|
||||||
|
test(true, "| | | foo", "foo", false)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCacheable(t *testing.T) {
|
func TestCacheable(t *testing.T) {
|
||||||
test := func(fuzzy bool, str string, cacheable bool) {
|
test := func(fuzzy bool, str string, expected string, cacheable bool) {
|
||||||
clearPatternCache()
|
clearPatternCache()
|
||||||
pat := BuildPattern(fuzzy, algo.FuzzyMatchV2, true, CaseSmart, true, true, true, []Range{}, Delimiter{}, []rune(str))
|
pat := BuildPattern(fuzzy, algo.FuzzyMatchV2, true, CaseSmart, true, true, true, []Range{}, Delimiter{}, []rune(str))
|
||||||
|
if pat.CacheKey() != expected {
|
||||||
|
t.Errorf("Expected: %s, actual: %s", expected, pat.CacheKey())
|
||||||
|
}
|
||||||
if cacheable != pat.cacheable {
|
if cacheable != pat.cacheable {
|
||||||
t.Errorf("Invalid Pattern.cacheable for \"%s\": %v (expected: %v)", str, pat.cacheable, cacheable)
|
t.Errorf("Invalid Pattern.cacheable for \"%s\": %v (expected: %v)", str, pat.cacheable, cacheable)
|
||||||
}
|
}
|
||||||
|
clearPatternCache()
|
||||||
}
|
}
|
||||||
test(true, "foo bar", true)
|
test(true, "foo bar", "foo\tbar", true)
|
||||||
test(true, "foo 'bar", true)
|
test(true, "foo 'bar", "foo\tbar", false)
|
||||||
test(true, "foo !bar", false)
|
test(true, "foo !bar", "foo", false)
|
||||||
|
|
||||||
test(false, "foo bar", true)
|
test(false, "foo bar", "foo\tbar", true)
|
||||||
test(false, "foo '", true)
|
test(false, "foo 'bar", "foo", false)
|
||||||
test(false, "foo 'bar", false)
|
test(false, "foo '", "foo", true)
|
||||||
test(false, "foo !bar", false)
|
test(false, "foo 'bar", "foo", false)
|
||||||
|
test(false, "foo !bar", "foo", false)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,6 +4,8 @@ import (
|
|||||||
"bufio"
|
"bufio"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/junegunn/fzf/src/util"
|
"github.com/junegunn/fzf/src/util"
|
||||||
)
|
)
|
||||||
@@ -13,20 +15,54 @@ type Reader struct {
|
|||||||
pusher func([]byte) bool
|
pusher func([]byte) bool
|
||||||
eventBox *util.EventBox
|
eventBox *util.EventBox
|
||||||
delimNil bool
|
delimNil bool
|
||||||
|
event int32
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewReader returns new Reader object
|
||||||
|
func NewReader(pusher func([]byte) bool, eventBox *util.EventBox, delimNil bool) *Reader {
|
||||||
|
return &Reader{pusher, eventBox, delimNil, int32(EvtReady)}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Reader) startEventPoller() {
|
||||||
|
go func() {
|
||||||
|
ptr := &r.event
|
||||||
|
pollInterval := readerPollIntervalMin
|
||||||
|
for {
|
||||||
|
if atomic.CompareAndSwapInt32(ptr, int32(EvtReadNew), int32(EvtReady)) {
|
||||||
|
r.eventBox.Set(EvtReadNew, true)
|
||||||
|
pollInterval = readerPollIntervalMin
|
||||||
|
} else if atomic.LoadInt32(ptr) == int32(EvtReadFin) {
|
||||||
|
return
|
||||||
|
} else {
|
||||||
|
pollInterval += readerPollIntervalStep
|
||||||
|
if pollInterval > readerPollIntervalMax {
|
||||||
|
pollInterval = readerPollIntervalMax
|
||||||
|
}
|
||||||
|
}
|
||||||
|
time.Sleep(pollInterval)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Reader) fin(success bool) {
|
||||||
|
atomic.StoreInt32(&r.event, int32(EvtReadFin))
|
||||||
|
r.eventBox.Set(EvtReadFin, success)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReadSource reads data from the default command or from standard input
|
// ReadSource reads data from the default command or from standard input
|
||||||
func (r *Reader) ReadSource() {
|
func (r *Reader) ReadSource() {
|
||||||
|
r.startEventPoller()
|
||||||
|
var success bool
|
||||||
if util.IsTty() {
|
if util.IsTty() {
|
||||||
cmd := os.Getenv("FZF_DEFAULT_COMMAND")
|
cmd := os.Getenv("FZF_DEFAULT_COMMAND")
|
||||||
if len(cmd) == 0 {
|
if len(cmd) == 0 {
|
||||||
cmd = defaultCommand
|
cmd = defaultCommand
|
||||||
}
|
}
|
||||||
r.readFromCommand(cmd)
|
success = r.readFromCommand(cmd)
|
||||||
} else {
|
} else {
|
||||||
r.readFromStdin()
|
success = r.readFromStdin()
|
||||||
}
|
}
|
||||||
r.eventBox.Set(EvtReadFin, nil)
|
r.fin(success)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Reader) feed(src io.Reader) {
|
func (r *Reader) feed(src io.Reader) {
|
||||||
@@ -40,7 +76,7 @@ func (r *Reader) feed(src io.Reader) {
|
|||||||
// end in delim.
|
// end in delim.
|
||||||
bytea, err := reader.ReadBytes(delim)
|
bytea, err := reader.ReadBytes(delim)
|
||||||
byteaLen := len(bytea)
|
byteaLen := len(bytea)
|
||||||
if len(bytea) > 0 {
|
if byteaLen > 0 {
|
||||||
if err == nil {
|
if err == nil {
|
||||||
// get rid of carriage return if under Windows:
|
// get rid of carriage return if under Windows:
|
||||||
if util.IsWindows() && byteaLen >= 2 && bytea[byteaLen-2] == byte('\r') {
|
if util.IsWindows() && byteaLen >= 2 && bytea[byteaLen-2] == byte('\r') {
|
||||||
@@ -50,7 +86,7 @@ func (r *Reader) feed(src io.Reader) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if r.pusher(bytea) {
|
if r.pusher(bytea) {
|
||||||
r.eventBox.Set(EvtReadNew, nil)
|
atomic.StoreInt32(&r.event, int32(EvtReadNew))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -59,20 +95,21 @@ func (r *Reader) feed(src io.Reader) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Reader) readFromStdin() {
|
func (r *Reader) readFromStdin() bool {
|
||||||
r.feed(os.Stdin)
|
r.feed(os.Stdin)
|
||||||
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Reader) readFromCommand(cmd string) {
|
func (r *Reader) readFromCommand(cmd string) bool {
|
||||||
listCommand := util.ExecCommand(cmd)
|
listCommand := util.ExecCommand(cmd)
|
||||||
out, err := listCommand.StdoutPipe()
|
out, err := listCommand.StdoutPipe()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return false
|
||||||
}
|
}
|
||||||
err = listCommand.Start()
|
err = listCommand.Start()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return false
|
||||||
}
|
}
|
||||||
defer listCommand.Wait()
|
|
||||||
r.feed(out)
|
r.feed(out)
|
||||||
|
return listCommand.Wait() == nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ package fzf
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/junegunn/fzf/src/util"
|
"github.com/junegunn/fzf/src/util"
|
||||||
)
|
)
|
||||||
@@ -11,7 +12,10 @@ func TestReadFromCommand(t *testing.T) {
|
|||||||
eb := util.NewEventBox()
|
eb := util.NewEventBox()
|
||||||
reader := Reader{
|
reader := Reader{
|
||||||
pusher: func(s []byte) bool { strs = append(strs, string(s)); return true },
|
pusher: func(s []byte) bool { strs = append(strs, string(s)); return true },
|
||||||
eventBox: eb}
|
eventBox: eb,
|
||||||
|
event: int32(EvtReady)}
|
||||||
|
|
||||||
|
reader.startEventPoller()
|
||||||
|
|
||||||
// Check EventBox
|
// Check EventBox
|
||||||
if eb.Peek(EvtReadNew) {
|
if eb.Peek(EvtReadNew) {
|
||||||
@@ -19,21 +23,16 @@ func TestReadFromCommand(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Normal command
|
// Normal command
|
||||||
reader.readFromCommand(`echo abc && echo def`)
|
reader.fin(reader.readFromCommand(`echo abc && echo def`))
|
||||||
if len(strs) != 2 || strs[0] != "abc" || strs[1] != "def" {
|
if len(strs) != 2 || strs[0] != "abc" || strs[1] != "def" {
|
||||||
t.Errorf("%s", strs)
|
t.Errorf("%s", strs)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check EventBox again
|
// Check EventBox again
|
||||||
if !eb.Peek(EvtReadNew) {
|
eb.WaitFor(EvtReadFin)
|
||||||
t.Error("EvtReadNew should be set yet")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wait should return immediately
|
// Wait should return immediately
|
||||||
eb.Wait(func(events *util.Events) {
|
eb.Wait(func(events *util.Events) {
|
||||||
if _, found := (*events)[EvtReadNew]; !found {
|
|
||||||
t.Errorf("%s", events)
|
|
||||||
}
|
|
||||||
events.Clear()
|
events.Clear()
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -42,8 +41,14 @@ func TestReadFromCommand(t *testing.T) {
|
|||||||
t.Error("EvtReadNew should not be set yet")
|
t.Error("EvtReadNew should not be set yet")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Make sure that event poller is finished
|
||||||
|
time.Sleep(readerPollIntervalMax)
|
||||||
|
|
||||||
|
// Restart event poller
|
||||||
|
reader.startEventPoller()
|
||||||
|
|
||||||
// Failing command
|
// Failing command
|
||||||
reader.readFromCommand(`no-such-command`)
|
reader.fin(reader.readFromCommand(`no-such-command`))
|
||||||
strs = []string{}
|
strs = []string{}
|
||||||
if len(strs) > 0 {
|
if len(strs) > 0 {
|
||||||
t.Errorf("%s", strs)
|
t.Errorf("%s", strs)
|
||||||
@@ -51,6 +56,9 @@ func TestReadFromCommand(t *testing.T) {
|
|||||||
|
|
||||||
// Check EventBox again
|
// Check EventBox again
|
||||||
if eb.Peek(EvtReadNew) {
|
if eb.Peek(EvtReadNew) {
|
||||||
t.Error("Command failed. EvtReadNew should be set")
|
t.Error("Command failed. EvtReadNew should not be set")
|
||||||
|
}
|
||||||
|
if !eb.Peek(EvtReadFin) {
|
||||||
|
t.Error("EvtReadFin should be set")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -19,22 +19,17 @@ type colorOffset struct {
|
|||||||
index int32
|
index int32
|
||||||
}
|
}
|
||||||
|
|
||||||
type rank struct {
|
|
||||||
points [4]uint16
|
|
||||||
index int32
|
|
||||||
}
|
|
||||||
|
|
||||||
type Result struct {
|
type Result struct {
|
||||||
item *Item
|
item *Item
|
||||||
rank rank
|
points [4]uint16
|
||||||
}
|
}
|
||||||
|
|
||||||
func buildResult(item *Item, offsets []Offset, score int) *Result {
|
func buildResult(item *Item, offsets []Offset, score int) Result {
|
||||||
if len(offsets) > 1 {
|
if len(offsets) > 1 {
|
||||||
sort.Sort(ByOrder(offsets))
|
sort.Sort(ByOrder(offsets))
|
||||||
}
|
}
|
||||||
|
|
||||||
result := Result{item: item, rank: rank{index: item.index}}
|
result := Result{item: item}
|
||||||
numChars := item.text.Length()
|
numChars := item.text.Length()
|
||||||
minBegin := math.MaxUint16
|
minBegin := math.MaxUint16
|
||||||
minEnd := math.MaxUint16
|
minEnd := math.MaxUint16
|
||||||
@@ -57,7 +52,7 @@ func buildResult(item *Item, offsets []Offset, score int) *Result {
|
|||||||
// Higher is better
|
// Higher is better
|
||||||
val = math.MaxUint16 - util.AsUint16(score)
|
val = math.MaxUint16 - util.AsUint16(score)
|
||||||
case byLength:
|
case byLength:
|
||||||
val = util.AsUint16(int(item.TrimLength()))
|
val = item.TrimLength()
|
||||||
case byBegin, byEnd:
|
case byBegin, byEnd:
|
||||||
if validOffsetFound {
|
if validOffsetFound {
|
||||||
whitePrefixLen := 0
|
whitePrefixLen := 0
|
||||||
@@ -75,10 +70,10 @@ func buildResult(item *Item, offsets []Offset, score int) *Result {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
result.rank.points[idx] = val
|
result.points[3-idx] = val
|
||||||
}
|
}
|
||||||
|
|
||||||
return &result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sort criteria to use. Never changes once fzf is started.
|
// Sort criteria to use. Never changes once fzf is started.
|
||||||
@@ -86,11 +81,11 @@ var sortCriteria []criterion
|
|||||||
|
|
||||||
// Index returns ordinal index of the Item
|
// Index returns ordinal index of the Item
|
||||||
func (result *Result) Index() int32 {
|
func (result *Result) Index() int32 {
|
||||||
return result.item.index
|
return result.item.Index()
|
||||||
}
|
}
|
||||||
|
|
||||||
func minRank() rank {
|
func minRank() Result {
|
||||||
return rank{index: 0, points: [4]uint16{math.MaxUint16, 0, 0, 0}}
|
return Result{item: &minItem, points: [4]uint16{math.MaxUint16, 0, 0, 0}}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (result *Result) colorOffsets(matchOffsets []Offset, theme *tui.ColorTheme, color tui.ColorPair, attr tui.Attr, current bool) []colorOffset {
|
func (result *Result) colorOffsets(matchOffsets []Offset, theme *tui.ColorTheme, color tui.ColorPair, attr tui.Attr, current bool) []colorOffset {
|
||||||
@@ -201,7 +196,7 @@ func (a ByOrder) Less(i, j int) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ByRelevance is for sorting Items
|
// ByRelevance is for sorting Items
|
||||||
type ByRelevance []*Result
|
type ByRelevance []Result
|
||||||
|
|
||||||
func (a ByRelevance) Len() int {
|
func (a ByRelevance) Len() int {
|
||||||
return len(a)
|
return len(a)
|
||||||
@@ -212,11 +207,11 @@ func (a ByRelevance) Swap(i, j int) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (a ByRelevance) Less(i, j int) bool {
|
func (a ByRelevance) Less(i, j int) bool {
|
||||||
return compareRanks((*a[i]).rank, (*a[j]).rank, false)
|
return compareRanks(a[i], a[j], false)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ByRelevanceTac is for sorting Items
|
// ByRelevanceTac is for sorting Items
|
||||||
type ByRelevanceTac []*Result
|
type ByRelevanceTac []Result
|
||||||
|
|
||||||
func (a ByRelevanceTac) Len() int {
|
func (a ByRelevanceTac) Len() int {
|
||||||
return len(a)
|
return len(a)
|
||||||
@@ -227,18 +222,5 @@ func (a ByRelevanceTac) Swap(i, j int) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (a ByRelevanceTac) Less(i, j int) bool {
|
func (a ByRelevanceTac) Less(i, j int) bool {
|
||||||
return compareRanks((*a[i]).rank, (*a[j]).rank, true)
|
return compareRanks(a[i], a[j], true)
|
||||||
}
|
|
||||||
|
|
||||||
func compareRanks(irank rank, jrank rank, tac bool) bool {
|
|
||||||
for idx := 0; idx < 4; idx++ {
|
|
||||||
left := irank.points[idx]
|
|
||||||
right := jrank.points[idx]
|
|
||||||
if left < right {
|
|
||||||
return true
|
|
||||||
} else if left > right {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return (irank.index <= jrank.index) != tac
|
|
||||||
}
|
}
|
||||||
|
|||||||
16
src/result_others.go
Normal file
16
src/result_others.go
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
// +build !386,!amd64
|
||||||
|
|
||||||
|
package fzf
|
||||||
|
|
||||||
|
func compareRanks(irank Result, jrank Result, tac bool) bool {
|
||||||
|
for idx := 3; idx >= 0; idx-- {
|
||||||
|
left := irank.points[idx]
|
||||||
|
right := jrank.points[idx]
|
||||||
|
if left < right {
|
||||||
|
return true
|
||||||
|
} else if left > right {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return (irank.item.Index() <= jrank.item.Index()) != tac
|
||||||
|
}
|
||||||
@@ -11,6 +11,11 @@ import (
|
|||||||
"github.com/junegunn/fzf/src/util"
|
"github.com/junegunn/fzf/src/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func withIndex(i *Item, index int) *Item {
|
||||||
|
(*i).text.Index = int32(index)
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
|
||||||
func TestOffsetSort(t *testing.T) {
|
func TestOffsetSort(t *testing.T) {
|
||||||
offsets := []Offset{
|
offsets := []Offset{
|
||||||
Offset{3, 5}, Offset{2, 7},
|
Offset{3, 5}, Offset{2, 7},
|
||||||
@@ -26,10 +31,10 @@ func TestOffsetSort(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestRankComparison(t *testing.T) {
|
func TestRankComparison(t *testing.T) {
|
||||||
rank := func(vals ...uint16) rank {
|
rank := func(vals ...uint16) Result {
|
||||||
return rank{
|
return Result{
|
||||||
points: [4]uint16{vals[0], vals[1], vals[2], vals[3]},
|
points: [4]uint16{vals[0], vals[1], vals[2], vals[3]},
|
||||||
index: int32(vals[4])}
|
item: &Item{text: util.Chars{Index: int32(vals[4])}}}
|
||||||
}
|
}
|
||||||
if compareRanks(rank(3, 0, 0, 0, 5), rank(2, 0, 0, 0, 7), false) ||
|
if compareRanks(rank(3, 0, 0, 0, 5), rank(2, 0, 0, 0, 7), false) ||
|
||||||
!compareRanks(rank(3, 0, 0, 0, 5), rank(3, 0, 0, 0, 6), false) ||
|
!compareRanks(rank(3, 0, 0, 0, 5), rank(3, 0, 0, 0, 6), false) ||
|
||||||
@@ -52,36 +57,41 @@ func TestResultRank(t *testing.T) {
|
|||||||
sortCriteria = []criterion{byScore, byLength}
|
sortCriteria = []criterion{byScore, byLength}
|
||||||
|
|
||||||
strs := [][]rune{[]rune("foo"), []rune("foobar"), []rune("bar"), []rune("baz")}
|
strs := [][]rune{[]rune("foo"), []rune("foobar"), []rune("bar"), []rune("baz")}
|
||||||
item1 := buildResult(&Item{text: util.RunesToChars(strs[0]), index: 1, trimLength: -1}, []Offset{}, 2)
|
item1 := buildResult(
|
||||||
if item1.rank.points[0] != math.MaxUint16-2 || // Bonus
|
withIndex(&Item{text: util.RunesToChars(strs[0])}, 1), []Offset{}, 2)
|
||||||
item1.rank.points[1] != 3 || // Length
|
if item1.points[3] != math.MaxUint16-2 || // Bonus
|
||||||
item1.rank.points[2] != 0 || // Unused
|
item1.points[2] != 3 || // Length
|
||||||
item1.rank.points[3] != 0 || // Unused
|
item1.points[1] != 0 || // Unused
|
||||||
item1.item.index != 1 {
|
item1.points[0] != 0 || // Unused
|
||||||
t.Error(item1.rank)
|
item1.item.Index() != 1 {
|
||||||
|
t.Error(item1)
|
||||||
}
|
}
|
||||||
// Only differ in index
|
// Only differ in index
|
||||||
item2 := buildResult(&Item{text: util.RunesToChars(strs[0])}, []Offset{}, 2)
|
item2 := buildResult(&Item{text: util.RunesToChars(strs[0])}, []Offset{}, 2)
|
||||||
|
|
||||||
items := []*Result{item1, item2}
|
items := []Result{item1, item2}
|
||||||
sort.Sort(ByRelevance(items))
|
sort.Sort(ByRelevance(items))
|
||||||
if items[0] != item2 || items[1] != item1 {
|
if items[0] != item2 || items[1] != item1 {
|
||||||
t.Error(items)
|
t.Error(items)
|
||||||
}
|
}
|
||||||
|
|
||||||
items = []*Result{item2, item1, item1, item2}
|
items = []Result{item2, item1, item1, item2}
|
||||||
sort.Sort(ByRelevance(items))
|
sort.Sort(ByRelevance(items))
|
||||||
if items[0] != item2 || items[1] != item2 ||
|
if items[0] != item2 || items[1] != item2 ||
|
||||||
items[2] != item1 || items[3] != item1 {
|
items[2] != item1 || items[3] != item1 {
|
||||||
t.Error(items, item1, item1.item.index, item2, item2.item.index)
|
t.Error(items, item1, item1.item.Index(), item2, item2.item.Index())
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sort by relevance
|
// Sort by relevance
|
||||||
item3 := buildResult(&Item{index: 2}, []Offset{Offset{1, 3}, Offset{5, 7}}, 3)
|
item3 := buildResult(
|
||||||
item4 := buildResult(&Item{index: 2}, []Offset{Offset{1, 2}, Offset{6, 7}}, 4)
|
withIndex(&Item{}, 2), []Offset{Offset{1, 3}, Offset{5, 7}}, 3)
|
||||||
item5 := buildResult(&Item{index: 2}, []Offset{Offset{1, 3}, Offset{5, 7}}, 5)
|
item4 := buildResult(
|
||||||
item6 := buildResult(&Item{index: 2}, []Offset{Offset{1, 2}, Offset{6, 7}}, 6)
|
withIndex(&Item{}, 2), []Offset{Offset{1, 2}, Offset{6, 7}}, 4)
|
||||||
items = []*Result{item1, item2, item3, item4, item5, item6}
|
item5 := buildResult(
|
||||||
|
withIndex(&Item{}, 2), []Offset{Offset{1, 3}, Offset{5, 7}}, 5)
|
||||||
|
item6 := buildResult(
|
||||||
|
withIndex(&Item{}, 2), []Offset{Offset{1, 2}, Offset{6, 7}}, 6)
|
||||||
|
items = []Result{item1, item2, item3, item4, item5, item6}
|
||||||
sort.Sort(ByRelevance(items))
|
sort.Sort(ByRelevance(items))
|
||||||
if !(items[0] == item6 && items[1] == item5 &&
|
if !(items[0] == item6 && items[1] == item5 &&
|
||||||
items[2] == item4 && items[3] == item3 &&
|
items[2] == item4 && items[3] == item3 &&
|
||||||
|
|||||||
16
src/result_x86.go
Normal file
16
src/result_x86.go
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
// +build 386 amd64
|
||||||
|
|
||||||
|
package fzf
|
||||||
|
|
||||||
|
import "unsafe"
|
||||||
|
|
||||||
|
func compareRanks(irank Result, jrank Result, tac bool) bool {
|
||||||
|
left := *(*uint64)(unsafe.Pointer(&irank.points[0]))
|
||||||
|
right := *(*uint64)(unsafe.Pointer(&jrank.points[0]))
|
||||||
|
if left < right {
|
||||||
|
return true
|
||||||
|
} else if left > right {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return (irank.item.Index() <= jrank.item.Index()) != tac
|
||||||
|
}
|
||||||
110
src/terminal.go
110
src/terminal.go
@@ -87,6 +87,7 @@ type Terminal struct {
|
|||||||
margin [4]sizeSpec
|
margin [4]sizeSpec
|
||||||
strong tui.Attr
|
strong tui.Attr
|
||||||
bordered bool
|
bordered bool
|
||||||
|
cleanExit bool
|
||||||
border tui.Window
|
border tui.Window
|
||||||
window tui.Window
|
window tui.Window
|
||||||
pborder tui.Window
|
pborder tui.Window
|
||||||
@@ -94,11 +95,13 @@ type Terminal struct {
|
|||||||
count int
|
count int
|
||||||
progress int
|
progress int
|
||||||
reading bool
|
reading bool
|
||||||
|
success bool
|
||||||
jumping jumpMode
|
jumping jumpMode
|
||||||
jumpLabels string
|
jumpLabels string
|
||||||
printer func(string)
|
printer func(string)
|
||||||
merger *Merger
|
merger *Merger
|
||||||
selected map[int32]selectedItem
|
selected map[int32]selectedItem
|
||||||
|
version int64
|
||||||
reqBox *util.EventBox
|
reqBox *util.EventBox
|
||||||
preview previewOpts
|
preview previewOpts
|
||||||
previewer previewer
|
previewer previewer
|
||||||
@@ -278,9 +281,13 @@ func defaultKeymap() map[int][]action {
|
|||||||
return keymap
|
return keymap
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func trimQuery(query string) []rune {
|
||||||
|
return []rune(strings.Replace(query, "\t", " ", -1))
|
||||||
|
}
|
||||||
|
|
||||||
// NewTerminal returns new Terminal object
|
// NewTerminal returns new Terminal object
|
||||||
func NewTerminal(opts *Options, eventBox *util.EventBox) *Terminal {
|
func NewTerminal(opts *Options, eventBox *util.EventBox) *Terminal {
|
||||||
input := []rune(opts.Query)
|
input := trimQuery(opts.Query)
|
||||||
var header []string
|
var header []string
|
||||||
if opts.Reverse {
|
if opts.Reverse {
|
||||||
header = opts.Header
|
header = opts.Header
|
||||||
@@ -365,6 +372,7 @@ func NewTerminal(opts *Options, eventBox *util.EventBox) *Terminal {
|
|||||||
history: opts.History,
|
history: opts.History,
|
||||||
margin: opts.Margin,
|
margin: opts.Margin,
|
||||||
bordered: opts.Bordered,
|
bordered: opts.Bordered,
|
||||||
|
cleanExit: opts.ClearOnExit,
|
||||||
strong: strongAttr,
|
strong: strongAttr,
|
||||||
cycle: opts.Cycle,
|
cycle: opts.Cycle,
|
||||||
header: header,
|
header: header,
|
||||||
@@ -372,6 +380,7 @@ func NewTerminal(opts *Options, eventBox *util.EventBox) *Terminal {
|
|||||||
ansi: opts.Ansi,
|
ansi: opts.Ansi,
|
||||||
tabstop: opts.Tabstop,
|
tabstop: opts.Tabstop,
|
||||||
reading: true,
|
reading: true,
|
||||||
|
success: true,
|
||||||
jumping: jumpDisabled,
|
jumping: jumpDisabled,
|
||||||
jumpLabels: opts.JumpLabels,
|
jumpLabels: opts.JumpLabels,
|
||||||
printer: opts.Printer,
|
printer: opts.Printer,
|
||||||
@@ -401,10 +410,11 @@ func (t *Terminal) Input() []rune {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// UpdateCount updates the count information
|
// UpdateCount updates the count information
|
||||||
func (t *Terminal) UpdateCount(cnt int, final bool) {
|
func (t *Terminal) UpdateCount(cnt int, final bool, success bool) {
|
||||||
t.mutex.Lock()
|
t.mutex.Lock()
|
||||||
t.count = cnt
|
t.count = cnt
|
||||||
t.reading = !final
|
t.reading = !final
|
||||||
|
t.success = success
|
||||||
t.mutex.Unlock()
|
t.mutex.Unlock()
|
||||||
t.reqBox.Set(reqInfo, nil)
|
t.reqBox.Set(reqInfo, nil)
|
||||||
if final {
|
if final {
|
||||||
@@ -682,6 +692,9 @@ func (t *Terminal) printInfo() {
|
|||||||
if t.progress > 0 && t.progress < 100 {
|
if t.progress > 0 && t.progress < 100 {
|
||||||
output += fmt.Sprintf(" (%d%%)", t.progress)
|
output += fmt.Sprintf(" (%d%%)", t.progress)
|
||||||
}
|
}
|
||||||
|
if !t.success && t.count == 0 {
|
||||||
|
output += " [ERROR]"
|
||||||
|
}
|
||||||
if pos+len(output) <= t.window.Width() {
|
if pos+len(output) <= t.window.Width() {
|
||||||
t.window.CPrint(tui.ColInfo, 0, output)
|
t.window.CPrint(tui.ColInfo, 0, output)
|
||||||
}
|
}
|
||||||
@@ -704,11 +717,11 @@ func (t *Terminal) printHeader() {
|
|||||||
trimmed, colors, newState := extractColor(lineStr, state, nil)
|
trimmed, colors, newState := extractColor(lineStr, state, nil)
|
||||||
state = newState
|
state = newState
|
||||||
item := &Item{
|
item := &Item{
|
||||||
text: util.RunesToChars([]rune(trimmed)),
|
text: util.ToChars([]byte(trimmed)),
|
||||||
colors: colors}
|
colors: colors}
|
||||||
|
|
||||||
t.move(line, 2, true)
|
t.move(line, 2, true)
|
||||||
t.printHighlighted(&Result{item: item},
|
t.printHighlighted(Result{item: item},
|
||||||
tui.AttrRegular, tui.ColHeader, tui.ColDefault, false, false)
|
tui.AttrRegular, tui.ColHeader, tui.ColDefault, false, false)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -736,7 +749,7 @@ func (t *Terminal) printList() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *Terminal) printItem(result *Result, line int, i int, current bool) {
|
func (t *Terminal) printItem(result Result, line int, i int, current bool) {
|
||||||
item := result.item
|
item := result.item
|
||||||
_, selected := t.selected[item.Index()]
|
_, selected := t.selected[item.Index()]
|
||||||
label := " "
|
label := " "
|
||||||
@@ -752,7 +765,7 @@ func (t *Terminal) printItem(result *Result, line int, i int, current bool) {
|
|||||||
|
|
||||||
// Avoid unnecessary redraw
|
// Avoid unnecessary redraw
|
||||||
newLine := itemLine{current: current, selected: selected, label: label,
|
newLine := itemLine{current: current, selected: selected, label: label,
|
||||||
result: *result, queryLen: len(t.input), width: 0}
|
result: result, queryLen: len(t.input), width: 0}
|
||||||
prevLine := t.prevLines[i]
|
prevLine := t.prevLines[i]
|
||||||
if prevLine.current == newLine.current &&
|
if prevLine.current == newLine.current &&
|
||||||
prevLine.selected == newLine.selected &&
|
prevLine.selected == newLine.selected &&
|
||||||
@@ -834,7 +847,7 @@ func (t *Terminal) overflow(runes []rune, max int) bool {
|
|||||||
return t.displayWidthWithLimit(runes, 0, max) > max
|
return t.displayWidthWithLimit(runes, 0, max) > max
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *Terminal) printHighlighted(result *Result, attr tui.Attr, col1 tui.ColorPair, col2 tui.ColorPair, current bool, match bool) int {
|
func (t *Terminal) printHighlighted(result Result, attr tui.Attr, col1 tui.ColorPair, col2 tui.ColorPair, current bool, match bool) int {
|
||||||
item := result.item
|
item := result.item
|
||||||
|
|
||||||
// Overflow
|
// Overflow
|
||||||
@@ -954,6 +967,7 @@ func (t *Terminal) printPreview() {
|
|||||||
}
|
}
|
||||||
reader := bufio.NewReader(strings.NewReader(t.previewer.text))
|
reader := bufio.NewReader(strings.NewReader(t.previewer.text))
|
||||||
lineNo := -t.previewer.offset
|
lineNo := -t.previewer.offset
|
||||||
|
height := t.pwindow.Height()
|
||||||
var ansi *ansiState
|
var ansi *ansiState
|
||||||
for {
|
for {
|
||||||
line, err := reader.ReadString('\n')
|
line, err := reader.ReadString('\n')
|
||||||
@@ -962,7 +976,8 @@ func (t *Terminal) printPreview() {
|
|||||||
line = line[:len(line)-1]
|
line = line[:len(line)-1]
|
||||||
}
|
}
|
||||||
lineNo++
|
lineNo++
|
||||||
if lineNo > t.pwindow.Height() {
|
if lineNo > height ||
|
||||||
|
t.pwindow.Y() == height-1 && t.pwindow.X() > 0 {
|
||||||
break
|
break
|
||||||
} else if lineNo > 0 {
|
} else if lineNo > 0 {
|
||||||
var fillRet tui.FillReturn
|
var fillRet tui.FillReturn
|
||||||
@@ -992,7 +1007,7 @@ func (t *Terminal) printPreview() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
t.pwindow.FinishFill()
|
t.pwindow.FinishFill()
|
||||||
if t.previewer.lines > t.pwindow.Height() {
|
if t.previewer.lines > height {
|
||||||
offset := fmt.Sprintf("%d/%d", t.previewer.offset+1, t.previewer.lines)
|
offset := fmt.Sprintf("%d/%d", t.previewer.offset+1, t.previewer.lines)
|
||||||
pos := t.pwindow.Width() - len(offset)
|
pos := t.pwindow.Width() - len(offset)
|
||||||
if t.tui.DoesAutoWrap() {
|
if t.tui.DoesAutoWrap() {
|
||||||
@@ -1163,8 +1178,7 @@ func replacePlaceholder(template string, stripAnsi bool, delimiter Delimiter, fo
|
|||||||
}
|
}
|
||||||
|
|
||||||
for idx, item := range items {
|
for idx, item := range items {
|
||||||
chars := util.RunesToChars([]rune(item.AsString(stripAnsi)))
|
tokens := Tokenize(item.AsString(stripAnsi), delimiter)
|
||||||
tokens := Tokenize(chars, delimiter)
|
|
||||||
trans := Transform(tokens, ranges)
|
trans := Transform(tokens, ranges)
|
||||||
str := string(joinTokens(trans))
|
str := string(joinTokens(trans))
|
||||||
if delimiter.str != nil {
|
if delimiter.str != nil {
|
||||||
@@ -1248,6 +1262,24 @@ func (t *Terminal) truncateQuery() {
|
|||||||
t.cx = util.Constrain(t.cx, 0, len(t.input))
|
t.cx = util.Constrain(t.cx, 0, len(t.input))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (t *Terminal) selectItem(item *Item) {
|
||||||
|
t.selected[item.Index()] = selectedItem{time.Now(), item}
|
||||||
|
t.version++
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Terminal) deselectItem(item *Item) {
|
||||||
|
delete(t.selected, item.Index())
|
||||||
|
t.version++
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Terminal) toggleItem(item *Item) {
|
||||||
|
if _, found := t.selected[item.Index()]; !found {
|
||||||
|
t.selectItem(item)
|
||||||
|
} else {
|
||||||
|
t.deselectItem(item)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Loop is called to start Terminal I/O
|
// Loop is called to start Terminal I/O
|
||||||
func (t *Terminal) Loop() {
|
func (t *Terminal) Loop() {
|
||||||
// prof := profile.Start(profile.ProfilePath("/tmp/"))
|
// prof := profile.Start(profile.ProfilePath("/tmp/"))
|
||||||
@@ -1335,7 +1367,12 @@ func (t *Terminal) Loop() {
|
|||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
exit := func(code int) {
|
exit := func(getCode func() int) {
|
||||||
|
if !t.cleanExit && t.fullscreen && t.inlineInfo {
|
||||||
|
t.placeCursor()
|
||||||
|
}
|
||||||
|
t.tui.Close()
|
||||||
|
code := getCode()
|
||||||
if code <= exitNoMatch && t.history != nil {
|
if code <= exitNoMatch && t.history != nil {
|
||||||
t.history.append(string(t.input))
|
t.history.append(string(t.input))
|
||||||
}
|
}
|
||||||
@@ -1345,6 +1382,7 @@ func (t *Terminal) Loop() {
|
|||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
var focused *Item
|
var focused *Item
|
||||||
|
var version int64
|
||||||
for {
|
for {
|
||||||
t.reqBox.Wait(func(events *util.Events) {
|
t.reqBox.Wait(func(events *util.Events) {
|
||||||
defer events.Clear()
|
defer events.Clear()
|
||||||
@@ -1361,7 +1399,8 @@ func (t *Terminal) Loop() {
|
|||||||
case reqList:
|
case reqList:
|
||||||
t.printList()
|
t.printList()
|
||||||
currentFocus := t.currentItem()
|
currentFocus := t.currentItem()
|
||||||
if currentFocus != focused {
|
if currentFocus != focused || version != t.version {
|
||||||
|
version = t.version
|
||||||
focused = currentFocus
|
focused = currentFocus
|
||||||
if t.isPreviewEnabled() {
|
if t.isPreviewEnabled() {
|
||||||
_, list := t.buildPlusList(t.preview.command, false)
|
_, list := t.buildPlusList(t.preview.command, false)
|
||||||
@@ -1383,11 +1422,12 @@ func (t *Terminal) Loop() {
|
|||||||
case reqRedraw:
|
case reqRedraw:
|
||||||
t.redraw()
|
t.redraw()
|
||||||
case reqClose:
|
case reqClose:
|
||||||
t.tui.Close()
|
exit(func() int {
|
||||||
if t.output() {
|
if t.output() {
|
||||||
exit(exitOk)
|
return exitOk
|
||||||
}
|
}
|
||||||
exit(exitNoMatch)
|
return exitNoMatch
|
||||||
|
})
|
||||||
case reqPreviewDisplay:
|
case reqPreviewDisplay:
|
||||||
t.previewer.text = value.(string)
|
t.previewer.text = value.(string)
|
||||||
t.previewer.lines = strings.Count(t.previewer.text, "\n")
|
t.previewer.lines = strings.Count(t.previewer.text, "\n")
|
||||||
@@ -1396,12 +1436,12 @@ func (t *Terminal) Loop() {
|
|||||||
case reqPreviewRefresh:
|
case reqPreviewRefresh:
|
||||||
t.printPreview()
|
t.printPreview()
|
||||||
case reqPrintQuery:
|
case reqPrintQuery:
|
||||||
t.tui.Close()
|
exit(func() int {
|
||||||
t.printer(string(t.input))
|
t.printer(string(t.input))
|
||||||
exit(exitOk)
|
return exitOk
|
||||||
|
})
|
||||||
case reqQuit:
|
case reqQuit:
|
||||||
t.tui.Close()
|
exit(func() int { return exitInterrupt })
|
||||||
exit(exitInterrupt)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
t.placeCursor()
|
t.placeCursor()
|
||||||
@@ -1426,22 +1466,9 @@ func (t *Terminal) Loop() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
selectItem := func(item *Item) bool {
|
|
||||||
if _, found := t.selected[item.Index()]; !found {
|
|
||||||
t.selected[item.Index()] = selectedItem{time.Now(), item}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
toggleY := func(y int) {
|
|
||||||
item := t.merger.Get(y).item
|
|
||||||
if !selectItem(item) {
|
|
||||||
delete(t.selected, item.Index())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
toggle := func() {
|
toggle := func() {
|
||||||
if t.cy < t.merger.Length() {
|
if t.cy < t.merger.Length() {
|
||||||
toggleY(t.cy)
|
t.toggleItem(t.merger.Get(t.cy).item)
|
||||||
req(reqInfo)
|
req(reqInfo)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1555,17 +1582,14 @@ func (t *Terminal) Loop() {
|
|||||||
case actSelectAll:
|
case actSelectAll:
|
||||||
if t.multi {
|
if t.multi {
|
||||||
for i := 0; i < t.merger.Length(); i++ {
|
for i := 0; i < t.merger.Length(); i++ {
|
||||||
item := t.merger.Get(i).item
|
t.selectItem(t.merger.Get(i).item)
|
||||||
selectItem(item)
|
|
||||||
}
|
}
|
||||||
req(reqList, reqInfo)
|
req(reqList, reqInfo)
|
||||||
}
|
}
|
||||||
case actDeselectAll:
|
case actDeselectAll:
|
||||||
if t.multi {
|
if t.multi {
|
||||||
for i := 0; i < t.merger.Length(); i++ {
|
t.selected = make(map[int32]selectedItem)
|
||||||
item := t.merger.Get(i)
|
t.version++
|
||||||
delete(t.selected, item.Index())
|
|
||||||
}
|
|
||||||
req(reqList, reqInfo)
|
req(reqList, reqInfo)
|
||||||
}
|
}
|
||||||
case actToggle:
|
case actToggle:
|
||||||
@@ -1576,7 +1600,7 @@ func (t *Terminal) Loop() {
|
|||||||
case actToggleAll:
|
case actToggleAll:
|
||||||
if t.multi {
|
if t.multi {
|
||||||
for i := 0; i < t.merger.Length(); i++ {
|
for i := 0; i < t.merger.Length(); i++ {
|
||||||
toggleY(i)
|
t.toggleItem(t.merger.Get(i).item)
|
||||||
}
|
}
|
||||||
req(reqList, reqInfo)
|
req(reqList, reqInfo)
|
||||||
}
|
}
|
||||||
@@ -1674,13 +1698,13 @@ func (t *Terminal) Loop() {
|
|||||||
case actPreviousHistory:
|
case actPreviousHistory:
|
||||||
if t.history != nil {
|
if t.history != nil {
|
||||||
t.history.override(string(t.input))
|
t.history.override(string(t.input))
|
||||||
t.input = []rune(t.history.previous())
|
t.input = trimQuery(t.history.previous())
|
||||||
t.cx = len(t.input)
|
t.cx = len(t.input)
|
||||||
}
|
}
|
||||||
case actNextHistory:
|
case actNextHistory:
|
||||||
if t.history != nil {
|
if t.history != nil {
|
||||||
t.history.override(string(t.input))
|
t.history.override(string(t.input))
|
||||||
t.input = []rune(t.history.next())
|
t.input = trimQuery(t.history.next())
|
||||||
t.cx = len(t.input)
|
t.cx = len(t.input)
|
||||||
}
|
}
|
||||||
case actSigStop:
|
case actSigStop:
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ import (
|
|||||||
func newItem(str string) *Item {
|
func newItem(str string) *Item {
|
||||||
bytes := []byte(str)
|
bytes := []byte(str)
|
||||||
trimmed, _, _ := extractColor(str, nil, nil)
|
trimmed, _, _ := extractColor(str, nil, nil)
|
||||||
return &Item{origText: &bytes, text: util.RunesToChars([]rune(trimmed))}
|
return &Item{origText: &bytes, text: util.ToChars([]byte(trimmed))}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestReplacePlaceholder(t *testing.T) {
|
func TestReplacePlaceholder(t *testing.T) {
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
package fzf
|
package fzf
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
@@ -74,14 +75,14 @@ func ParseRange(str *string) (Range, bool) {
|
|||||||
return newRange(n, n), true
|
return newRange(n, n), true
|
||||||
}
|
}
|
||||||
|
|
||||||
func withPrefixLengths(tokens []util.Chars, begin int) []Token {
|
func withPrefixLengths(tokens []string, begin int) []Token {
|
||||||
ret := make([]Token, len(tokens))
|
ret := make([]Token, len(tokens))
|
||||||
|
|
||||||
prefixLength := begin
|
prefixLength := begin
|
||||||
for idx, token := range tokens {
|
for idx := range tokens {
|
||||||
// NOTE: &tokens[idx] instead of &tokens
|
chars := util.ToChars([]byte(tokens[idx]))
|
||||||
ret[idx] = Token{&tokens[idx], int32(prefixLength)}
|
ret[idx] = Token{&chars, int32(prefixLength)}
|
||||||
prefixLength += token.Length()
|
prefixLength += chars.Length()
|
||||||
}
|
}
|
||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
@@ -92,16 +93,15 @@ const (
|
|||||||
awkWhite
|
awkWhite
|
||||||
)
|
)
|
||||||
|
|
||||||
func awkTokenizer(input util.Chars) ([]util.Chars, int) {
|
func awkTokenizer(input string) ([]string, int) {
|
||||||
// 9, 32
|
// 9, 32
|
||||||
ret := []util.Chars{}
|
ret := []string{}
|
||||||
prefixLength := 0
|
prefixLength := 0
|
||||||
state := awkNil
|
state := awkNil
|
||||||
numChars := input.Length()
|
|
||||||
begin := 0
|
begin := 0
|
||||||
end := 0
|
end := 0
|
||||||
for idx := 0; idx < numChars; idx++ {
|
for idx := 0; idx < len(input); idx++ {
|
||||||
r := input.Get(idx)
|
r := input[idx]
|
||||||
white := r == 9 || r == 32
|
white := r == 9 || r == 32
|
||||||
switch state {
|
switch state {
|
||||||
case awkNil:
|
case awkNil:
|
||||||
@@ -119,19 +119,19 @@ func awkTokenizer(input util.Chars) ([]util.Chars, int) {
|
|||||||
if white {
|
if white {
|
||||||
end = idx + 1
|
end = idx + 1
|
||||||
} else {
|
} else {
|
||||||
ret = append(ret, input.Slice(begin, end))
|
ret = append(ret, input[begin:end])
|
||||||
state, begin, end = awkBlack, idx, idx+1
|
state, begin, end = awkBlack, idx, idx+1
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if begin < end {
|
if begin < end {
|
||||||
ret = append(ret, input.Slice(begin, end))
|
ret = append(ret, input[begin:end])
|
||||||
}
|
}
|
||||||
return ret, prefixLength
|
return ret, prefixLength
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tokenize tokenizes the given string with the delimiter
|
// Tokenize tokenizes the given string with the delimiter
|
||||||
func Tokenize(text util.Chars, delimiter Delimiter) []Token {
|
func Tokenize(text string, delimiter Delimiter) []Token {
|
||||||
if delimiter.str == nil && delimiter.regex == nil {
|
if delimiter.str == nil && delimiter.regex == nil {
|
||||||
// AWK-style (\S+\s*)
|
// AWK-style (\S+\s*)
|
||||||
tokens, prefixLength := awkTokenizer(text)
|
tokens, prefixLength := awkTokenizer(text)
|
||||||
@@ -139,36 +139,31 @@ func Tokenize(text util.Chars, delimiter Delimiter) []Token {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if delimiter.str != nil {
|
if delimiter.str != nil {
|
||||||
return withPrefixLengths(text.Split(*delimiter.str), 0)
|
return withPrefixLengths(strings.SplitAfter(text, *delimiter.str), 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
// FIXME performance
|
// FIXME performance
|
||||||
var tokens []string
|
var tokens []string
|
||||||
if delimiter.regex != nil {
|
if delimiter.regex != nil {
|
||||||
str := text.ToString()
|
for len(text) > 0 {
|
||||||
for len(str) > 0 {
|
loc := delimiter.regex.FindStringIndex(text)
|
||||||
loc := delimiter.regex.FindStringIndex(str)
|
if len(loc) < 2 {
|
||||||
if loc == nil {
|
loc = []int{0, len(text)}
|
||||||
loc = []int{0, len(str)}
|
|
||||||
}
|
}
|
||||||
last := util.Max(loc[1], 1)
|
last := util.Max(loc[1], 1)
|
||||||
tokens = append(tokens, str[:last])
|
tokens = append(tokens, text[:last])
|
||||||
str = str[last:]
|
text = text[last:]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
asRunes := make([]util.Chars, len(tokens))
|
return withPrefixLengths(tokens, 0)
|
||||||
for i, token := range tokens {
|
|
||||||
asRunes[i] = util.RunesToChars([]rune(token))
|
|
||||||
}
|
|
||||||
return withPrefixLengths(asRunes, 0)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func joinTokens(tokens []Token) []rune {
|
func joinTokens(tokens []Token) string {
|
||||||
ret := []rune{}
|
var output bytes.Buffer
|
||||||
for _, token := range tokens {
|
for _, token := range tokens {
|
||||||
ret = append(ret, token.text.ToRunes()...)
|
output.WriteString(token.text.ToString())
|
||||||
}
|
}
|
||||||
return ret
|
return output.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Transform is used to transform the input when --with-nth option is given
|
// Transform is used to transform the input when --with-nth option is given
|
||||||
@@ -181,7 +176,7 @@ func Transform(tokens []Token, withNth []Range) []Token {
|
|||||||
if r.begin == r.end {
|
if r.begin == r.end {
|
||||||
idx := r.begin
|
idx := r.begin
|
||||||
if idx == rangeEllipsis {
|
if idx == rangeEllipsis {
|
||||||
chars := util.RunesToChars(joinTokens(tokens))
|
chars := util.ToChars([]byte(joinTokens(tokens)))
|
||||||
parts = append(parts, &chars)
|
parts = append(parts, &chars)
|
||||||
} else {
|
} else {
|
||||||
if idx < 0 {
|
if idx < 0 {
|
||||||
@@ -224,15 +219,15 @@ func Transform(tokens []Token, withNth []Range) []Token {
|
|||||||
var merged util.Chars
|
var merged util.Chars
|
||||||
switch len(parts) {
|
switch len(parts) {
|
||||||
case 0:
|
case 0:
|
||||||
merged = util.RunesToChars([]rune{})
|
merged = util.ToChars([]byte{})
|
||||||
case 1:
|
case 1:
|
||||||
merged = *parts[0]
|
merged = *parts[0]
|
||||||
default:
|
default:
|
||||||
runes := []rune{}
|
var output bytes.Buffer
|
||||||
for _, part := range parts {
|
for _, part := range parts {
|
||||||
runes = append(runes, part.ToRunes()...)
|
output.WriteString(part.ToString())
|
||||||
}
|
}
|
||||||
merged = util.RunesToChars(runes)
|
merged = util.ToChars([]byte(output.String()))
|
||||||
}
|
}
|
||||||
|
|
||||||
var prefixLength int32
|
var prefixLength int32
|
||||||
|
|||||||
@@ -2,8 +2,6 @@ package fzf
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/junegunn/fzf/src/util"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestParseRange(t *testing.T) {
|
func TestParseRange(t *testing.T) {
|
||||||
@@ -47,19 +45,19 @@ func TestParseRange(t *testing.T) {
|
|||||||
func TestTokenize(t *testing.T) {
|
func TestTokenize(t *testing.T) {
|
||||||
// AWK-style
|
// AWK-style
|
||||||
input := " abc: def: ghi "
|
input := " abc: def: ghi "
|
||||||
tokens := Tokenize(util.RunesToChars([]rune(input)), Delimiter{})
|
tokens := Tokenize(input, Delimiter{})
|
||||||
if tokens[0].text.ToString() != "abc: " || tokens[0].prefixLength != 2 {
|
if tokens[0].text.ToString() != "abc: " || tokens[0].prefixLength != 2 {
|
||||||
t.Errorf("%s", tokens)
|
t.Errorf("%s", tokens)
|
||||||
}
|
}
|
||||||
|
|
||||||
// With delimiter
|
// With delimiter
|
||||||
tokens = Tokenize(util.RunesToChars([]rune(input)), delimiterRegexp(":"))
|
tokens = Tokenize(input, delimiterRegexp(":"))
|
||||||
if tokens[0].text.ToString() != " abc:" || tokens[0].prefixLength != 0 {
|
if tokens[0].text.ToString() != " abc:" || tokens[0].prefixLength != 0 {
|
||||||
t.Errorf("%s", tokens)
|
t.Error(tokens[0].text.ToString(), tokens[0].prefixLength)
|
||||||
}
|
}
|
||||||
|
|
||||||
// With delimiter regex
|
// With delimiter regex
|
||||||
tokens = Tokenize(util.RunesToChars([]rune(input)), delimiterRegexp("\\s+"))
|
tokens = Tokenize(input, delimiterRegexp("\\s+"))
|
||||||
if tokens[0].text.ToString() != " " || tokens[0].prefixLength != 0 ||
|
if tokens[0].text.ToString() != " " || tokens[0].prefixLength != 0 ||
|
||||||
tokens[1].text.ToString() != "abc: " || tokens[1].prefixLength != 2 ||
|
tokens[1].text.ToString() != "abc: " || tokens[1].prefixLength != 2 ||
|
||||||
tokens[2].text.ToString() != "def: " || tokens[2].prefixLength != 8 ||
|
tokens[2].text.ToString() != "def: " || tokens[2].prefixLength != 8 ||
|
||||||
@@ -71,7 +69,7 @@ func TestTokenize(t *testing.T) {
|
|||||||
func TestTransform(t *testing.T) {
|
func TestTransform(t *testing.T) {
|
||||||
input := " abc: def: ghi: jkl"
|
input := " abc: def: ghi: jkl"
|
||||||
{
|
{
|
||||||
tokens := Tokenize(util.RunesToChars([]rune(input)), Delimiter{})
|
tokens := Tokenize(input, Delimiter{})
|
||||||
{
|
{
|
||||||
ranges := splitNth("1,2,3")
|
ranges := splitNth("1,2,3")
|
||||||
tx := Transform(tokens, ranges)
|
tx := Transform(tokens, ranges)
|
||||||
@@ -93,7 +91,7 @@ func TestTransform(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
tokens := Tokenize(util.RunesToChars([]rune(input)), delimiterRegexp(":"))
|
tokens := Tokenize(input, delimiterRegexp(":"))
|
||||||
{
|
{
|
||||||
ranges := splitNth("1..2,3,2..,1")
|
ranges := splitNth("1..2,3,2..,1")
|
||||||
tx := Transform(tokens, ranges)
|
tx := Transform(tokens, ranges)
|
||||||
|
|||||||
@@ -32,7 +32,8 @@ var offsetRegexp *regexp.Regexp = regexp.MustCompile("\x1b\\[([0-9]+);([0-9]+)R"
|
|||||||
func openTtyIn() *os.File {
|
func openTtyIn() *os.File {
|
||||||
in, err := os.OpenFile(consoleDevice, syscall.O_RDONLY, 0)
|
in, err := os.OpenFile(consoleDevice, syscall.O_RDONLY, 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic("Failed to open " + consoleDevice)
|
fmt.Fprintln(os.Stderr, "Failed to open "+consoleDevice)
|
||||||
|
os.Exit(2)
|
||||||
}
|
}
|
||||||
return in
|
return in
|
||||||
}
|
}
|
||||||
@@ -182,10 +183,18 @@ func (r *LightRenderer) Init() {
|
|||||||
if r.fullscreen {
|
if r.fullscreen {
|
||||||
r.smcup()
|
r.smcup()
|
||||||
} else {
|
} else {
|
||||||
|
// We assume that --no-clear is used for repetitive relaunching of fzf.
|
||||||
|
// So we do not clear the lower bottom of the screen.
|
||||||
|
if r.clearOnExit {
|
||||||
r.csi("J")
|
r.csi("J")
|
||||||
|
}
|
||||||
y, x := r.findOffset()
|
y, x := r.findOffset()
|
||||||
r.mouse = r.mouse && y >= 0
|
r.mouse = r.mouse && y >= 0
|
||||||
if x > 0 {
|
// When --no-clear is used for repetitive relaunching, there is a small
|
||||||
|
// time frame between fzf processes where the user keystrokes are not
|
||||||
|
// captured by either of fzf process which can cause x offset to be
|
||||||
|
// increased and we're left with unwanted extra new line.
|
||||||
|
if x > 0 && r.clearOnExit {
|
||||||
r.upOneLine = true
|
r.upOneLine = true
|
||||||
r.makeSpace()
|
r.makeSpace()
|
||||||
}
|
}
|
||||||
@@ -200,7 +209,9 @@ func (r *LightRenderer) Init() {
|
|||||||
r.csi(fmt.Sprintf("%dA", r.MaxY()-1))
|
r.csi(fmt.Sprintf("%dA", r.MaxY()-1))
|
||||||
r.csi("G")
|
r.csi("G")
|
||||||
r.csi("K")
|
r.csi("K")
|
||||||
// r.csi("s")
|
if !r.clearOnExit && !r.fullscreen {
|
||||||
|
r.csi("s")
|
||||||
|
}
|
||||||
if !r.fullscreen && r.mouse {
|
if !r.fullscreen && r.mouse {
|
||||||
r.yoffset, _ = r.findOffset()
|
r.yoffset, _ = r.findOffset()
|
||||||
}
|
}
|
||||||
@@ -411,10 +422,12 @@ func (r *LightRenderer) escSequence(sz *int) Event {
|
|||||||
return Event{F12, 0, nil}
|
return Event{F12, 0, nil}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Bracketed paste mode \e[200~ / \e[201
|
// Bracketed paste mode: \e[200~ ... \e[201~
|
||||||
if r.buffer[3] == 48 && (r.buffer[4] == 48 || r.buffer[4] == 49) && r.buffer[5] == 126 {
|
if r.buffer[3] == '0' && (r.buffer[4] == '0' || r.buffer[4] == '1') && r.buffer[5] == '~' {
|
||||||
*sz = 6
|
// Immediately discard the sequence from the buffer and reread input
|
||||||
return Event{Invalid, 0, nil}
|
r.buffer = r.buffer[6:]
|
||||||
|
*sz = 0
|
||||||
|
return r.GetChar()
|
||||||
}
|
}
|
||||||
return Event{Invalid, 0, nil} // INS
|
return Event{Invalid, 0, nil} // INS
|
||||||
case 51:
|
case 51:
|
||||||
@@ -584,10 +597,8 @@ func (r *LightRenderer) Close() {
|
|||||||
}
|
}
|
||||||
r.csi("J")
|
r.csi("J")
|
||||||
}
|
}
|
||||||
} else if r.fullscreen {
|
} else if !r.fullscreen {
|
||||||
r.csi("G")
|
r.csi("u")
|
||||||
} else {
|
|
||||||
r.move(r.height, 0)
|
|
||||||
}
|
}
|
||||||
if r.mouse {
|
if r.mouse {
|
||||||
r.csi("?1000l")
|
r.csi("?1000l")
|
||||||
@@ -697,6 +708,10 @@ func (w *LightWindow) X() int {
|
|||||||
return w.posx
|
return w.posx
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (w *LightWindow) Y() int {
|
||||||
|
return w.posy
|
||||||
|
}
|
||||||
|
|
||||||
func (w *LightWindow) Enclose(y int, x int) bool {
|
func (w *LightWindow) Enclose(y int, x int) bool {
|
||||||
return x >= w.left && x < (w.left+w.width) &&
|
return x >= w.left && x < (w.left+w.width) &&
|
||||||
y >= w.top && y < (w.top+w.height)
|
y >= w.top && y < (w.top+w.height)
|
||||||
@@ -831,17 +846,20 @@ func (w *LightWindow) fill(str string, onMove func()) FillReturn {
|
|||||||
for j, wl := range lines {
|
for j, wl := range lines {
|
||||||
if w.posx >= w.Width()-1 && wl.displayWidth == 0 {
|
if w.posx >= w.Width()-1 && wl.displayWidth == 0 {
|
||||||
if w.posy < w.height-1 {
|
if w.posy < w.height-1 {
|
||||||
w.MoveAndClear(w.posy+1, 0)
|
w.Move(w.posy+1, 0)
|
||||||
}
|
}
|
||||||
return FillNextLine
|
return FillNextLine
|
||||||
}
|
}
|
||||||
w.stderrInternal(wl.text, false)
|
w.stderrInternal(wl.text, false)
|
||||||
w.posx += wl.displayWidth
|
w.posx += wl.displayWidth
|
||||||
|
|
||||||
|
// Wrap line
|
||||||
if j < len(lines)-1 || i < len(allLines)-1 {
|
if j < len(lines)-1 || i < len(allLines)-1 {
|
||||||
if w.posy+1 >= w.height {
|
if w.posy+1 >= w.height {
|
||||||
return FillSuspend
|
return FillSuspend
|
||||||
}
|
}
|
||||||
w.MoveAndClear(w.posy+1, 0)
|
w.MoveAndClear(w.posy, w.posx)
|
||||||
|
w.Move(w.posy+1, 0)
|
||||||
onMove()
|
onMove()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -856,24 +874,25 @@ func (w *LightWindow) setBg() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (w *LightWindow) Fill(text string) FillReturn {
|
func (w *LightWindow) Fill(text string) FillReturn {
|
||||||
w.MoveAndClear(w.posy, w.posx)
|
w.Move(w.posy, w.posx)
|
||||||
w.setBg()
|
w.setBg()
|
||||||
return w.fill(text, w.setBg)
|
return w.fill(text, w.setBg)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *LightWindow) CFill(fg Color, bg Color, attr Attr, text string) FillReturn {
|
func (w *LightWindow) CFill(fg Color, bg Color, attr Attr, text string) FillReturn {
|
||||||
w.MoveAndClear(w.posy, w.posx)
|
w.Move(w.posy, w.posx)
|
||||||
if bg == colDefault {
|
if bg == colDefault {
|
||||||
bg = w.bg
|
bg = w.bg
|
||||||
}
|
}
|
||||||
if w.csiColor(fg, bg, attr) {
|
if w.csiColor(fg, bg, attr) {
|
||||||
return w.fill(text, func() { w.csiColor(fg, bg, attr) })
|
|
||||||
defer w.csi("m")
|
defer w.csi("m")
|
||||||
|
return w.fill(text, func() { w.csiColor(fg, bg, attr) })
|
||||||
}
|
}
|
||||||
return w.fill(text, w.setBg)
|
return w.fill(text, w.setBg)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *LightWindow) FinishFill() {
|
func (w *LightWindow) FinishFill() {
|
||||||
|
w.MoveAndClear(w.posy, w.posx)
|
||||||
for y := w.posy + 1; y < w.height; y++ {
|
for y := w.posy + 1; y < w.height; y++ {
|
||||||
w.MoveAndClear(y, 0)
|
w.MoveAndClear(y, 0)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -164,6 +164,10 @@ func (w *TcellWindow) X() int {
|
|||||||
return w.lastX
|
return w.lastX
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (w *TcellWindow) Y() int {
|
||||||
|
return w.lastY
|
||||||
|
}
|
||||||
|
|
||||||
func (r *FullscreenRenderer) DoesAutoWrap() bool {
|
func (r *FullscreenRenderer) DoesAutoWrap() bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -236,6 +236,7 @@ type Window interface {
|
|||||||
Close()
|
Close()
|
||||||
|
|
||||||
X() int
|
X() int
|
||||||
|
Y() int
|
||||||
Enclose(y int, x int) bool
|
Enclose(y int, x int) bool
|
||||||
|
|
||||||
Move(y int, x int)
|
Move(y int, x int)
|
||||||
|
|||||||
@@ -3,63 +3,103 @@ package util
|
|||||||
import (
|
import (
|
||||||
"unicode"
|
"unicode"
|
||||||
"unicode/utf8"
|
"unicode/utf8"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
overflow64 uint64 = 0x8080808080808080
|
||||||
|
overflow32 uint32 = 0x80808080
|
||||||
)
|
)
|
||||||
|
|
||||||
type Chars struct {
|
type Chars struct {
|
||||||
runes []rune
|
slice []byte // or []rune
|
||||||
bytes []byte
|
inBytes bool
|
||||||
|
trimLengthKnown bool
|
||||||
|
trimLength uint16
|
||||||
|
|
||||||
|
// XXX Piggybacking item index here is a horrible idea. But I'm trying to
|
||||||
|
// minimize the memory footprint by not wasting padded spaces.
|
||||||
|
Index int32
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkAscii(bytes []byte) (bool, int) {
|
||||||
|
i := 0
|
||||||
|
for ; i <= len(bytes)-8; i += 8 {
|
||||||
|
if (overflow64 & *(*uint64)(unsafe.Pointer(&bytes[i]))) > 0 {
|
||||||
|
return false, i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for ; i <= len(bytes)-4; i += 4 {
|
||||||
|
if (overflow32 & *(*uint32)(unsafe.Pointer(&bytes[i]))) > 0 {
|
||||||
|
return false, i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for ; i < len(bytes); i++ {
|
||||||
|
if bytes[i] >= utf8.RuneSelf {
|
||||||
|
return false, i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true, 0
|
||||||
}
|
}
|
||||||
|
|
||||||
// ToChars converts byte array into rune array
|
// ToChars converts byte array into rune array
|
||||||
func ToChars(bytea []byte) Chars {
|
func ToChars(bytes []byte) Chars {
|
||||||
var runes []rune
|
inBytes, bytesUntil := checkAscii(bytes)
|
||||||
ascii := true
|
if inBytes {
|
||||||
numBytes := len(bytea)
|
return Chars{slice: bytes, inBytes: inBytes}
|
||||||
for i := 0; i < numBytes; {
|
|
||||||
if bytea[i] < utf8.RuneSelf {
|
|
||||||
if !ascii {
|
|
||||||
runes = append(runes, rune(bytea[i]))
|
|
||||||
}
|
}
|
||||||
i++
|
|
||||||
} else {
|
runes := make([]rune, bytesUntil, len(bytes))
|
||||||
if ascii {
|
for i := 0; i < bytesUntil; i++ {
|
||||||
ascii = false
|
runes[i] = rune(bytes[i])
|
||||||
runes = make([]rune, i, numBytes)
|
|
||||||
for j := 0; j < i; j++ {
|
|
||||||
runes[j] = rune(bytea[j])
|
|
||||||
}
|
}
|
||||||
}
|
for i := bytesUntil; i < len(bytes); {
|
||||||
r, sz := utf8.DecodeRune(bytea[i:])
|
r, sz := utf8.DecodeRune(bytes[i:])
|
||||||
i += sz
|
i += sz
|
||||||
runes = append(runes, r)
|
runes = append(runes, r)
|
||||||
}
|
}
|
||||||
}
|
return RunesToChars(runes)
|
||||||
if ascii {
|
|
||||||
return Chars{bytes: bytea}
|
|
||||||
}
|
|
||||||
return Chars{runes: runes}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func RunesToChars(runes []rune) Chars {
|
func RunesToChars(runes []rune) Chars {
|
||||||
return Chars{runes: runes}
|
return Chars{slice: *(*[]byte)(unsafe.Pointer(&runes)), inBytes: false}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (chars *Chars) IsBytes() bool {
|
||||||
|
return chars.inBytes
|
||||||
|
}
|
||||||
|
|
||||||
|
func (chars *Chars) Bytes() []byte {
|
||||||
|
return chars.slice
|
||||||
|
}
|
||||||
|
|
||||||
|
func (chars *Chars) optionalRunes() []rune {
|
||||||
|
if chars.inBytes {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return *(*[]rune)(unsafe.Pointer(&chars.slice))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (chars *Chars) Get(i int) rune {
|
func (chars *Chars) Get(i int) rune {
|
||||||
if chars.runes != nil {
|
if runes := chars.optionalRunes(); runes != nil {
|
||||||
return chars.runes[i]
|
return runes[i]
|
||||||
}
|
}
|
||||||
return rune(chars.bytes[i])
|
return rune(chars.slice[i])
|
||||||
}
|
}
|
||||||
|
|
||||||
func (chars *Chars) Length() int {
|
func (chars *Chars) Length() int {
|
||||||
if chars.runes != nil {
|
if runes := chars.optionalRunes(); runes != nil {
|
||||||
return len(chars.runes)
|
return len(runes)
|
||||||
}
|
}
|
||||||
return len(chars.bytes)
|
return len(chars.slice)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TrimLength returns the length after trimming leading and trailing whitespaces
|
// TrimLength returns the length after trimming leading and trailing whitespaces
|
||||||
func (chars *Chars) TrimLength() int {
|
func (chars *Chars) TrimLength() uint16 {
|
||||||
|
if chars.trimLengthKnown {
|
||||||
|
return chars.trimLength
|
||||||
|
}
|
||||||
|
chars.trimLengthKnown = true
|
||||||
var i int
|
var i int
|
||||||
len := chars.Length()
|
len := chars.Length()
|
||||||
for i = len - 1; i >= 0; i-- {
|
for i = len - 1; i >= 0; i-- {
|
||||||
@@ -80,7 +120,8 @@ func (chars *Chars) TrimLength() int {
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return i - j + 1
|
chars.trimLength = AsUint16(i - j + 1)
|
||||||
|
return chars.trimLength
|
||||||
}
|
}
|
||||||
|
|
||||||
func (chars *Chars) TrailingWhitespaces() int {
|
func (chars *Chars) TrailingWhitespaces() int {
|
||||||
@@ -96,62 +137,31 @@ func (chars *Chars) TrailingWhitespaces() int {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (chars *Chars) ToString() string {
|
func (chars *Chars) ToString() string {
|
||||||
if chars.runes != nil {
|
if runes := chars.optionalRunes(); runes != nil {
|
||||||
return string(chars.runes)
|
return string(runes)
|
||||||
}
|
}
|
||||||
return string(chars.bytes)
|
return string(chars.slice)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (chars *Chars) ToRunes() []rune {
|
func (chars *Chars) ToRunes() []rune {
|
||||||
if chars.runes != nil {
|
if runes := chars.optionalRunes(); runes != nil {
|
||||||
return chars.runes
|
return runes
|
||||||
}
|
}
|
||||||
runes := make([]rune, len(chars.bytes))
|
bytes := chars.slice
|
||||||
for idx, b := range chars.bytes {
|
runes := make([]rune, len(bytes))
|
||||||
|
for idx, b := range bytes {
|
||||||
runes[idx] = rune(b)
|
runes[idx] = rune(b)
|
||||||
}
|
}
|
||||||
return runes
|
return runes
|
||||||
}
|
}
|
||||||
|
|
||||||
func (chars *Chars) Slice(b int, e int) Chars {
|
func (chars *Chars) CopyRunes(dest []rune) {
|
||||||
if chars.runes != nil {
|
if runes := chars.optionalRunes(); runes != nil {
|
||||||
return Chars{runes: chars.runes[b:e]}
|
copy(dest, runes)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
return Chars{bytes: chars.bytes[b:e]}
|
for idx, b := range chars.slice[:len(dest)] {
|
||||||
|
dest[idx] = rune(b)
|
||||||
}
|
}
|
||||||
|
return
|
||||||
func (chars *Chars) Split(delimiter string) []Chars {
|
|
||||||
delim := []rune(delimiter)
|
|
||||||
numChars := chars.Length()
|
|
||||||
numDelim := len(delim)
|
|
||||||
begin := 0
|
|
||||||
ret := make([]Chars, 0, 1)
|
|
||||||
|
|
||||||
for index := 0; index < numChars; {
|
|
||||||
if index+numDelim <= numChars {
|
|
||||||
match := true
|
|
||||||
for off, d := range delim {
|
|
||||||
if chars.Get(index+off) != d {
|
|
||||||
match = false
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Found the delimiter
|
|
||||||
if match {
|
|
||||||
incr := Max(numDelim, 1)
|
|
||||||
ret = append(ret, chars.Slice(begin, index+incr))
|
|
||||||
index += incr
|
|
||||||
begin = index
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Impossible to find the delimiter in the remaining substring
|
|
||||||
break
|
|
||||||
}
|
|
||||||
index++
|
|
||||||
}
|
|
||||||
if begin < numChars || len(ret) == 0 {
|
|
||||||
ret = append(ret, chars.Slice(begin, numChars))
|
|
||||||
}
|
|
||||||
return ret
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,27 +2,16 @@ package util
|
|||||||
|
|
||||||
import "testing"
|
import "testing"
|
||||||
|
|
||||||
func TestToCharsNil(t *testing.T) {
|
|
||||||
bs := Chars{bytes: []byte{}}
|
|
||||||
if bs.bytes == nil || bs.runes != nil {
|
|
||||||
t.Error()
|
|
||||||
}
|
|
||||||
rs := RunesToChars([]rune{})
|
|
||||||
if rs.bytes != nil || rs.runes == nil {
|
|
||||||
t.Error()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestToCharsAscii(t *testing.T) {
|
func TestToCharsAscii(t *testing.T) {
|
||||||
chars := ToChars([]byte("foobar"))
|
chars := ToChars([]byte("foobar"))
|
||||||
if chars.ToString() != "foobar" || chars.runes != nil {
|
if !chars.inBytes || chars.ToString() != "foobar" || !chars.inBytes {
|
||||||
t.Error()
|
t.Error()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCharsLength(t *testing.T) {
|
func TestCharsLength(t *testing.T) {
|
||||||
chars := ToChars([]byte("\tabc한글 "))
|
chars := ToChars([]byte("\tabc한글 "))
|
||||||
if chars.Length() != 8 || chars.TrimLength() != 5 {
|
if chars.inBytes || chars.Length() != 8 || chars.TrimLength() != 5 {
|
||||||
t.Error()
|
t.Error()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -36,7 +25,7 @@ func TestCharsToString(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestTrimLength(t *testing.T) {
|
func TestTrimLength(t *testing.T) {
|
||||||
check := func(str string, exp int) {
|
check := func(str string, exp uint16) {
|
||||||
chars := ToChars([]byte(str))
|
chars := ToChars([]byte(str))
|
||||||
trimmed := chars.TrimLength()
|
trimmed := chars.TrimLength()
|
||||||
if trimmed != exp {
|
if trimmed != exp {
|
||||||
@@ -55,28 +44,3 @@ func TestTrimLength(t *testing.T) {
|
|||||||
check(" h o ", 5)
|
check(" h o ", 5)
|
||||||
check(" ", 0)
|
check(" ", 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSplit(t *testing.T) {
|
|
||||||
check := func(str string, delim string, tokens ...string) {
|
|
||||||
input := ToChars([]byte(str))
|
|
||||||
result := input.Split(delim)
|
|
||||||
if len(result) != len(tokens) {
|
|
||||||
t.Errorf("Invalid Split result for '%s': %d tokens found (expected %d): %s",
|
|
||||||
str, len(result), len(tokens), result)
|
|
||||||
}
|
|
||||||
for idx, token := range tokens {
|
|
||||||
if result[idx].ToString() != token {
|
|
||||||
t.Errorf("Invalid Split result for '%s': %s (expected %s)",
|
|
||||||
str, result[idx].ToString(), token)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
check("abc:def::", ":", "abc:", "def:", ":")
|
|
||||||
check("abc:def::", "-", "abc:def::")
|
|
||||||
check("abc", "", "a", "b", "c")
|
|
||||||
check("abc", "a", "a", "bc")
|
|
||||||
check("abc", "ab", "ab", "c")
|
|
||||||
check("abc", "abc", "abc")
|
|
||||||
check("abc", "abcd", "abc")
|
|
||||||
check("", "abcd", "")
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -26,23 +26,23 @@ func NewEventBox() *EventBox {
|
|||||||
// Wait blocks the goroutine until signaled
|
// Wait blocks the goroutine until signaled
|
||||||
func (b *EventBox) Wait(callback func(*Events)) {
|
func (b *EventBox) Wait(callback func(*Events)) {
|
||||||
b.cond.L.Lock()
|
b.cond.L.Lock()
|
||||||
defer b.cond.L.Unlock()
|
|
||||||
|
|
||||||
if len(b.events) == 0 {
|
if len(b.events) == 0 {
|
||||||
b.cond.Wait()
|
b.cond.Wait()
|
||||||
}
|
}
|
||||||
|
|
||||||
callback(&b.events)
|
callback(&b.events)
|
||||||
|
b.cond.L.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set turns on the event type on the box
|
// Set turns on the event type on the box
|
||||||
func (b *EventBox) Set(event EventType, value interface{}) {
|
func (b *EventBox) Set(event EventType, value interface{}) {
|
||||||
b.cond.L.Lock()
|
b.cond.L.Lock()
|
||||||
defer b.cond.L.Unlock()
|
|
||||||
b.events[event] = value
|
b.events[event] = value
|
||||||
if _, found := b.ignore[event]; !found {
|
if _, found := b.ignore[event]; !found {
|
||||||
b.cond.Broadcast()
|
b.cond.Broadcast()
|
||||||
}
|
}
|
||||||
|
b.cond.L.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Clear clears the events
|
// Clear clears the events
|
||||||
@@ -56,27 +56,27 @@ func (events *Events) Clear() {
|
|||||||
// Peek peeks at the event box if the given event is set
|
// Peek peeks at the event box if the given event is set
|
||||||
func (b *EventBox) Peek(event EventType) bool {
|
func (b *EventBox) Peek(event EventType) bool {
|
||||||
b.cond.L.Lock()
|
b.cond.L.Lock()
|
||||||
defer b.cond.L.Unlock()
|
|
||||||
_, ok := b.events[event]
|
_, ok := b.events[event]
|
||||||
|
b.cond.L.Unlock()
|
||||||
return ok
|
return ok
|
||||||
}
|
}
|
||||||
|
|
||||||
// Watch deletes the events from the ignore list
|
// Watch deletes the events from the ignore list
|
||||||
func (b *EventBox) Watch(events ...EventType) {
|
func (b *EventBox) Watch(events ...EventType) {
|
||||||
b.cond.L.Lock()
|
b.cond.L.Lock()
|
||||||
defer b.cond.L.Unlock()
|
|
||||||
for _, event := range events {
|
for _, event := range events {
|
||||||
delete(b.ignore, event)
|
delete(b.ignore, event)
|
||||||
}
|
}
|
||||||
|
b.cond.L.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unwatch adds the events to the ignore list
|
// Unwatch adds the events to the ignore list
|
||||||
func (b *EventBox) Unwatch(events ...EventType) {
|
func (b *EventBox) Unwatch(events ...EventType) {
|
||||||
b.cond.L.Lock()
|
b.cond.L.Lock()
|
||||||
defer b.cond.L.Unlock()
|
|
||||||
for _, event := range events {
|
for _, event := range events {
|
||||||
b.ignore[event] = true
|
b.ignore[event] = true
|
||||||
}
|
}
|
||||||
|
b.cond.L.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
// WaitFor blocks the execution until the event is received
|
// WaitFor blocks the execution until the event is received
|
||||||
|
|||||||
@@ -6,11 +6,11 @@ Execute (Setup):
|
|||||||
|
|
||||||
Execute (fzf#run with dir option):
|
Execute (fzf#run with dir option):
|
||||||
let cwd = getcwd()
|
let cwd = getcwd()
|
||||||
let result = fzf#run({ 'options': '--filter=vdr', 'dir': g:dir })
|
let result = fzf#run({ 'source': 'git ls-files', 'options': '--filter=vdr', 'dir': g:dir })
|
||||||
AssertEqual ['fzf.vader'], result
|
AssertEqual ['fzf.vader'], result
|
||||||
AssertEqual getcwd(), cwd
|
AssertEqual getcwd(), cwd
|
||||||
|
|
||||||
let result = sort(fzf#run({ 'options': '--filter e', 'dir': g:dir }))
|
let result = sort(fzf#run({ 'source': 'git ls-files', 'options': '--filter e', 'dir': g:dir }))
|
||||||
AssertEqual ['fzf.vader', 'test_go.rb'], result
|
AssertEqual ['fzf.vader', 'test_go.rb'], result
|
||||||
AssertEqual getcwd(), cwd
|
AssertEqual getcwd(), cwd
|
||||||
|
|
||||||
@@ -19,7 +19,7 @@ Execute (fzf#run with Funcref command):
|
|||||||
function! g:FzfTest(e)
|
function! g:FzfTest(e)
|
||||||
call add(g:ret, a:e)
|
call add(g:ret, a:e)
|
||||||
endfunction
|
endfunction
|
||||||
let result = sort(fzf#run({ 'sink': function('g:FzfTest'), 'options': '--filter e', 'dir': g:dir }))
|
let result = sort(fzf#run({ 'source': 'git ls-files', 'sink': function('g:FzfTest'), 'options': '--filter e', 'dir': g:dir }))
|
||||||
AssertEqual ['fzf.vader', 'test_go.rb'], result
|
AssertEqual ['fzf.vader', 'test_go.rb'], result
|
||||||
AssertEqual ['fzf.vader', 'test_go.rb'], sort(g:ret)
|
AssertEqual ['fzf.vader', 'test_go.rb'], sort(g:ret)
|
||||||
|
|
||||||
@@ -140,7 +140,7 @@ Execute (fzf#wrap):
|
|||||||
let g:fzf_history_dir = '/tmp'
|
let g:fzf_history_dir = '/tmp'
|
||||||
let opts = fzf#wrap('foobar', {'options': '--color light'})
|
let opts = fzf#wrap('foobar', {'options': '--color light'})
|
||||||
Log opts
|
Log opts
|
||||||
Assert opts.options =~ '--history /tmp/foobar'
|
Assert opts.options =~ "--history '/tmp/foobar'"
|
||||||
Assert opts.options =~ '--color light'
|
Assert opts.options =~ '--color light'
|
||||||
|
|
||||||
let g:fzf_colors = { 'fg': ['fg', 'Error'] }
|
let g:fzf_colors = { 'fg': ['fg', 'Error'] }
|
||||||
@@ -149,21 +149,23 @@ Execute (fzf#wrap):
|
|||||||
|
|
||||||
Execute (fzf#shellescape with sh):
|
Execute (fzf#shellescape with sh):
|
||||||
AssertEqual '''''', fzf#shellescape('', 'sh')
|
AssertEqual '''''', fzf#shellescape('', 'sh')
|
||||||
|
AssertEqual '''\''', fzf#shellescape('\', 'sh')
|
||||||
AssertEqual '''""''', fzf#shellescape('""', 'sh')
|
AssertEqual '''""''', fzf#shellescape('""', 'sh')
|
||||||
AssertEqual '''foobar>''', fzf#shellescape('foobar>', 'sh')
|
AssertEqual '''foobar>''', fzf#shellescape('foobar>', 'sh')
|
||||||
AssertEqual '''\"''', fzf#shellescape('\"', 'sh')
|
AssertEqual '''\\\"\\\''', fzf#shellescape('\\\"\\\', 'sh')
|
||||||
AssertEqual '''echo ''\''''a''\'''' && echo ''\''''b''\''''''', fzf#shellescape('echo ''a'' && echo ''b''', 'sh')
|
AssertEqual '''echo ''\''''a''\'''' && echo ''\''''b''\''''''', fzf#shellescape('echo ''a'' && echo ''b''', 'sh')
|
||||||
|
|
||||||
Execute (fzf#shellescape with cmd.exe):
|
Execute (fzf#shellescape with cmd.exe):
|
||||||
AssertEqual '^"^"', fzf#shellescape('', 'cmd.exe')
|
AssertEqual '^"^"', fzf#shellescape('', 'cmd.exe')
|
||||||
|
AssertEqual '^"\\^"', fzf#shellescape('\', 'cmd.exe')
|
||||||
AssertEqual '^"\^"\^"^"', fzf#shellescape('""', 'cmd.exe')
|
AssertEqual '^"\^"\^"^"', fzf#shellescape('""', 'cmd.exe')
|
||||||
AssertEqual '^"foobar^>^"', fzf#shellescape('foobar>', 'cmd.exe')
|
AssertEqual '^"foobar^>^"', fzf#shellescape('foobar>', 'cmd.exe')
|
||||||
AssertEqual '^"\\\^"\\^"', fzf#shellescape('\\\\\\\\"\', 'cmd.exe')
|
AssertEqual '^"\\\\\\\^"\\\\\\^"', fzf#shellescape('\\\"\\\', 'cmd.exe')
|
||||||
AssertEqual '^"echo ''a'' ^&^& echo ''b''^"', fzf#shellescape('echo ''a'' && echo ''b''', 'cmd.exe')
|
AssertEqual '^"echo ''a'' ^&^& echo ''b''^"', fzf#shellescape('echo ''a'' && echo ''b''', 'cmd.exe')
|
||||||
|
|
||||||
AssertEqual '^"C:\Program Files ^(x86^)\\^"', fzf#shellescape('C:\Program Files (x86)\', 'cmd.exe')
|
AssertEqual '^"C:\Program Files ^(x86^)\\^"', fzf#shellescape('C:\Program Files (x86)\', 'cmd.exe')
|
||||||
AssertEqual '^"C:/Program Files ^(x86^)/^"', fzf#shellescape('C:/Program Files (x86)/', 'cmd.exe')
|
AssertEqual '^"C:/Program Files ^(x86^)/^"', fzf#shellescape('C:/Program Files (x86)/', 'cmd.exe')
|
||||||
" AssertEqual '^"%%USERPROFILE%%^", fzf#shellescape('%USERPROFILE%', 'cmd.exe')
|
AssertEqual '^"%%USERPROFILE%%^"', fzf#shellescape('%USERPROFILE%', 'cmd.exe')
|
||||||
|
|
||||||
Execute (Cleanup):
|
Execute (Cleanup):
|
||||||
unlet g:dir
|
unlet g:dir
|
||||||
|
|||||||
497
test/test_go.rb
497
test/test_go.rb
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user