mirror of
https://github.com/LBRYFoundation/tracker.git
synced 2025-08-29 16:31:27 +00:00
Compare commits
438 commits
v2.0.0-rc.
...
main
Author | SHA1 | Date | |
---|---|---|---|
|
226d6eace4 | ||
|
48b28d0b07 | ||
|
494d67fae4 | ||
|
a7b4e6a559 | ||
|
1b6b7f1b5b | ||
|
6baeb76ce2 | ||
|
c1fcd3d624 | ||
|
9330b3fd59 | ||
|
c8f0c1eed8 | ||
|
737053bd0e | ||
|
441b06169f | ||
|
7455c2ad4a | ||
|
bb5460fccc | ||
|
828edb8fd8 | ||
|
07e4db8baf | ||
|
4b5e39f83c | ||
|
301dd22f15 | ||
|
7166c1da17 | ||
|
f3468edf19 | ||
|
d1b90c0139 | ||
|
b81a310eea | ||
|
65ce7c7c6b | ||
|
e2991d59e4 | ||
|
c28d7ad788 | ||
|
6e2c095ce4 | ||
|
129aac230a | ||
|
dc34044973 | ||
|
d57c348b6c | ||
|
9c44135610 | ||
|
8bf717fa4e | ||
|
c1f523e855 | ||
|
7498ef3f4a | ||
|
035e66f155 | ||
|
5c58456d9f | ||
|
057f7afefc | ||
|
2d747cfac4 | ||
|
205694d901 | ||
|
7c888a171e | ||
|
b9572c1770 | ||
|
5f8229ad12 | ||
|
b70ddbef91 | ||
|
9fbf669fcd | ||
|
d998ce556e | ||
|
6375e7c735 | ||
|
313db5027b | ||
|
4224e1ac6b | ||
|
e7f43ee924 | ||
|
3b330213ad | ||
|
696a5e51bb | ||
|
592d487a67 | ||
|
dfcda607fc | ||
|
8c7a4fd117 | ||
|
89c83d2e3c | ||
|
425662fa93 | ||
|
0f2cfb2fdd | ||
|
456f9de190 | ||
|
cf30ad8b6a | ||
|
6f65feef4b | ||
|
7be396d299 | ||
|
606361d9c1 | ||
|
c50c6b15b2 | ||
|
535c0fdd07 | ||
|
83f79e5202 | ||
|
25d39698f2 | ||
|
56fd2818b2 | ||
|
7ac177257c | ||
|
49d69140aa | ||
|
e6339590f0 | ||
|
99aeb7cebe | ||
|
bc7b3bc738 | ||
|
3a76f09ea9 | ||
|
fad3541bd9 | ||
|
ff0fe9e28d | ||
|
06eaf570ca | ||
|
24c72cdacc | ||
|
bd24c5b3fc | ||
|
4dbba4862c | ||
|
b6aa407213 | ||
|
326832e479 | ||
|
f1713d6524 | ||
|
a9f094749d | ||
|
f7e8116f33 | ||
|
aa5b97dc5a | ||
|
689ee75178 | ||
|
3d803be039 | ||
|
e9dac2a874 | ||
|
3db6859db6 | ||
|
e266d218db | ||
|
3c23a854c8 | ||
|
932d0e50c1 | ||
|
0704b62b31 | ||
|
5b771c47a1 | ||
|
bdc4f7b4d6 | ||
|
54f761efe7 | ||
|
03ac7353e0 | ||
|
a13acda170 | ||
|
7e16002dc0 | ||
|
b072bb4166 | ||
|
61e9d47a77 | ||
|
64d471d13a | ||
|
b1852c7c8e | ||
|
b61fe233df | ||
|
0a725f7d44 | ||
|
5e0ee0bd00 | ||
|
130e186006 | ||
|
2a3bb5bea0 | ||
|
7ba4b68138 | ||
|
0e17b1352b | ||
|
e6e72698b9 | ||
|
d70d300422 | ||
|
e0b50f3ffa | ||
|
85d646d1ad | ||
|
452eb1acef | ||
|
9e7323fa44 | ||
|
77a52f9f30 | ||
|
89cdaa8c6d | ||
|
5082146ae9 | ||
|
053ce531d9 | ||
|
c8c0de539c | ||
|
9acf809ffb | ||
|
a9a2d37f11 | ||
|
728ec0c623 | ||
|
ae431e1361 | ||
|
ddeb44b527 | ||
|
0a420fe053 | ||
|
797d0cb6e2 | ||
|
0936bd3f9a | ||
|
f4d34b54e5 | ||
|
4d58b4bce6 | ||
|
87c72bc516 | ||
|
cb88a11d6a | ||
|
eed141dbe4 | ||
|
acdce7fea9 | ||
|
3889888f8a | ||
|
7b64e92ee9 | ||
|
a48ab487e2 | ||
|
d7cfcacbff | ||
|
68b8edfdd5 | ||
|
3e334b9536 | ||
|
94696c062e | ||
|
a6df644597 | ||
|
6e362c184c | ||
|
dc753b937c | ||
|
7df0145118 | ||
|
ad1eee4eb7 | ||
|
36e0204a8f | ||
|
e83f68b952 | ||
|
7943288678 | ||
|
9d22b67f74 | ||
|
fa19ffd050 | ||
|
f2ab706f10 | ||
|
5f99a7e778 | ||
|
9a5fac67ed | ||
|
d65ab677e7 | ||
|
e78892d5ac | ||
|
d0fc3a4634 | ||
|
b4b257c151 | ||
|
df4eeb840b | ||
|
91715229f1 | ||
|
0de1d25448 | ||
|
b345eb3899 | ||
|
2a26215f2a | ||
|
1b7ce4c378 | ||
|
fcbc168ae6 | ||
|
8f0fc7ba10 | ||
|
e09d11e4b9 | ||
|
dcd8e8ea86 | ||
|
bacc7646d0 | ||
|
3c80ed8a8e | ||
|
f19f08aa2e | ||
|
e749c9c6c9 | ||
|
3f9ac79570 | ||
|
bb56c2932b | ||
|
7c5f8bf9c5 | ||
|
757ebf1241 | ||
|
c9d51e8e68 | ||
|
6450a2fa00 | ||
|
12c9f95eb1 | ||
|
d5bddeac96 | ||
|
82c9f08f4f | ||
|
564a54a178 | ||
|
495f2c2734 | ||
|
b505cecde1 | ||
|
96d0c3d829 | ||
|
862b452cef | ||
|
1cb16ddb0c | ||
|
1a4e4c833b | ||
|
2df7eac90f | ||
|
d95120c817 | ||
|
20edf7a136 | ||
|
21f500c93e | ||
|
3aa7d1a91d | ||
|
3bcb79129c | ||
|
8095657735 | ||
|
085234044a | ||
|
17f22e77a3 | ||
|
ff15955dcc | ||
|
0738d93644 | ||
|
aab8fa24c1 | ||
|
84ee1d6658 | ||
|
734c11c6ed | ||
|
3c052ec98d | ||
|
f0780ad9cc | ||
|
be57cd15b7 | ||
|
b737c8d0aa | ||
|
be555c3b51 | ||
|
0d492c4349 | ||
|
2f603e43fc | ||
|
ca4147a808 | ||
|
9e251b23b6 | ||
|
1f7ea58197 | ||
|
0954c17692 | ||
|
ee7b4f944a | ||
|
fa19d1125c | ||
|
6c5e8ad20c | ||
|
0edd6382d5 | ||
|
b1c05d362a | ||
|
b19f7115df | ||
|
0c077f0a8c | ||
|
a8bc51ba1d | ||
|
7022b541bc | ||
|
40f6456138 | ||
|
811fe001ac | ||
|
65704f47e1 | ||
|
1a39a495d7 | ||
|
a5b15d69ad | ||
|
6bef53658b | ||
|
2004489016 | ||
|
120c4615c1 | ||
|
e9d1e71276 | ||
|
7dbbc86380 | ||
|
2bead6b7b4 | ||
|
5840cd3de1 | ||
|
d38a7017d1 | ||
|
f69159362a | ||
|
24be4ece73 | ||
|
8f472ad52c | ||
|
ff269b0f44 | ||
|
b100583d7d | ||
|
22c42f9ec3 | ||
|
39e3b5ae5c | ||
|
ae7a13db21 | ||
|
d28c6717b1 | ||
|
15bd5c41f3 | ||
|
fa6e360da4 | ||
|
df34304ab4 | ||
|
dde5cd1586 | ||
|
34a6425fd5 | ||
|
6e3470aa7e | ||
|
2f58e98832 | ||
|
35d146f675 | ||
|
ef166a6159 | ||
|
756a0f6316 | ||
|
6198491194 | ||
|
395e59aef3 | ||
|
e505250b06 | ||
|
80558648d7 | ||
|
df0de94337 | ||
|
1a0b5c56a6 | ||
|
ca823e0e5f | ||
|
66e12c6684 | ||
|
e7b8264e50 | ||
|
6dfdb7e192 | ||
|
89bc479a3b | ||
|
55b57549a6 | ||
|
ce43a09956 | ||
|
134744a484 | ||
|
47b5e67345 | ||
|
6dee48ce17 | ||
|
b7e6719129 | ||
|
b314b5003a | ||
|
7d9166e003 | ||
|
8300621799 | ||
|
79750ef983 | ||
|
b9773473e4 | ||
|
b5dda16706 | ||
|
2a4c82f613 | ||
|
464d37b2a7 | ||
|
c7b052dbb2 | ||
|
13857d5bce | ||
|
4f4495f0f3 | ||
|
5400a99b75 | ||
|
44dbf4abb4 | ||
|
7c666e336a | ||
|
8ee8793867 | ||
|
3168f50601 | ||
|
d026424038 | ||
|
df7b59e2f3 | ||
|
3799b856c2 | ||
|
034aa0b5dc | ||
|
2c67ad4dac | ||
|
3f3f75519d | ||
|
c5f8e5a9b0 | ||
|
13c71b4ee1 | ||
|
80e9fce087 | ||
|
8ed171b0ea | ||
|
153ad325b7 | ||
|
1aa6c86d3f | ||
|
d43cb719b9 | ||
|
02336d10e7 | ||
|
7ea4b3dc7a | ||
|
6e1cfa18d8 | ||
|
2764717657 | ||
|
2dcb4344cb | ||
|
03b98e0090 | ||
|
fa6dcddcb6 | ||
|
3a323d9338 | ||
|
ad496fceb8 | ||
|
f7becf952b | ||
|
3168f13b48 | ||
|
6663c09391 | ||
|
389dbd20dc | ||
|
53297853a6 | ||
|
035c5b4960 | ||
|
6c3ddaefb3 | ||
|
8dddae0658 | ||
|
233b5b52ec | ||
|
0d9a2309fc | ||
|
dab03f52dc | ||
|
edef032381 | ||
|
ce3281f3e8 | ||
|
effb05103a | ||
|
c41519e73f | ||
|
7786e1a915 | ||
|
ed69a0893e | ||
|
85d7d9c677 | ||
|
c37311e8c7 | ||
|
acf7e3c749 | ||
|
039f25f571 | ||
|
b013106f89 | ||
|
cb6a3be2ac | ||
|
d07b61d67d | ||
|
370004a9f5 | ||
|
9ce6c31021 | ||
|
496cc1a31d | ||
|
6fc3f618aa | ||
|
bd02be6ab5 | ||
|
7f7c79bdf1 | ||
|
abccf5bd7e | ||
|
ce6706b0d5 | ||
|
d3de59bab0 | ||
|
7c4f8cf395 | ||
|
ceacd6bdbd | ||
|
86197a258c | ||
|
7fabf4ba61 | ||
|
634bc6b706 | ||
|
a70d6dc036 | ||
|
1cc0738cbe | ||
|
143ba54695 | ||
|
f61e7a9281 | ||
|
48abc6048e | ||
|
71eb9fb767 | ||
|
a4aeba7ecc | ||
|
d026a3359c | ||
|
869777c6d0 | ||
|
1daee323a3 | ||
|
cb55019ab8 | ||
|
353ba01e51 | ||
|
7b1e7e8c99 | ||
|
23e9719073 | ||
|
68cbe0fc21 | ||
|
ea0dba3a3d | ||
|
20d1cbf537 | ||
|
842bec32e7 | ||
|
00ab4acc98 | ||
|
cdb0ff3612 | ||
|
9f45950dd0 | ||
|
2e625af44d | ||
|
ea1324602e | ||
|
1ef65d3704 | ||
|
233c355526 | ||
|
300b9c12ab | ||
|
bb93e478b8 | ||
|
f0eca4c3f7 | ||
|
13882ae05d | ||
|
215f33d862 | ||
|
b028a36424 | ||
|
03080b39b4 | ||
|
517fb4044e | ||
|
82d79e5113 | ||
|
9db2d3dabb | ||
|
fdf4ed8169 | ||
|
98299c5912 | ||
|
74dde3f362 | ||
|
51926ad562 | ||
|
a4b08c021b | ||
|
0e07b33827 | ||
|
4d54980930 | ||
|
3f29aa358b | ||
|
fb30e9fb03 | ||
|
6884a8f527 | ||
|
738e496929 | ||
|
8bc2b09724 | ||
|
4aad0e992b | ||
|
9cc31763d0 | ||
|
7aebb44852 | ||
|
f40cd33b12 | ||
|
6deebdd6d4 | ||
|
a48b9a50c3 | ||
|
baef1c17c3 | ||
|
831b908402 | ||
|
a5dab8ac0a | ||
|
102b032c43 | ||
|
8cf49aad2e | ||
|
0c37672d47 | ||
|
3e1cd77405 | ||
|
bfe970b12f | ||
|
18d7e5d51b | ||
|
8b1ab73894 | ||
|
acc051bcc4 | ||
|
c2e34f7c36 | ||
|
a50be904e4 | ||
|
a204081a04 | ||
|
0702755d0b | ||
|
6200724ac6 | ||
|
ffcca91221 | ||
|
91a0b4012a | ||
|
fceee10aba | ||
|
87d64dba50 | ||
|
1e9af8bfe0 | ||
|
062a480737 | ||
|
8a9f70825f | ||
|
23ac850693 | ||
|
03f0c977d3 | ||
|
3ae3843944 | ||
|
3c098c0703 | ||
|
91ce2aaf77 | ||
|
fe8c74bd9c | ||
|
f4dcf1c3fe | ||
|
e3e545e22e | ||
|
f180d54f6d | ||
|
79213c6bbd | ||
|
2a4b263955 | ||
|
6b1d4c7ed5 | ||
|
f3690011a7 | ||
|
70ceb96313 | ||
|
989cc4deba | ||
|
e4e6ef4bfd |
93 changed files with 8191 additions and 1417 deletions
3
.github/FUNDING.yml
vendored
Normal file
3
.github/FUNDING.yml
vendored
Normal file
|
@ -0,0 +1,3 @@
|
|||
---
|
||||
github:
|
||||
- "jzelinskie"
|
23
.github/dependabot.yml
vendored
Normal file
23
.github/dependabot.yml
vendored
Normal file
|
@ -0,0 +1,23 @@
|
|||
---
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "monthly"
|
||||
labels:
|
||||
- "component/dependencies"
|
||||
|
||||
- package-ecosystem: "gomod"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "monthly"
|
||||
labels:
|
||||
- "component/dependencies"
|
||||
|
||||
- package-ecosystem: "docker"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "monthly"
|
||||
labels:
|
||||
- "component/dependencies"
|
112
.github/workflows/build.yaml
vendored
Normal file
112
.github/workflows/build.yaml
vendored
Normal file
|
@ -0,0 +1,112 @@
|
|||
---
|
||||
name: "Build & Test"
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- "!dependabot/*"
|
||||
- "main"
|
||||
pull_request:
|
||||
branches: ["*"]
|
||||
jobs:
|
||||
build:
|
||||
name: "Go Build"
|
||||
runs-on: "ubuntu-latest"
|
||||
steps:
|
||||
- uses: "actions/checkout@v2"
|
||||
- uses: "actions/setup-go@v2"
|
||||
with:
|
||||
go-version: "^1.17"
|
||||
- name: "Build"
|
||||
run: "go build ./cmd/..."
|
||||
|
||||
unit:
|
||||
name: "Run Unit Tests"
|
||||
runs-on: "ubuntu-latest"
|
||||
steps:
|
||||
- uses: "actions/checkout@v2"
|
||||
- uses: "actions/setup-go@v2"
|
||||
with:
|
||||
go-version: "^1.17"
|
||||
- name: "Run `go test`"
|
||||
run: "go test -race ./..."
|
||||
|
||||
e2e-mem:
|
||||
name: "E2E Memory Tests"
|
||||
runs-on: "ubuntu-latest"
|
||||
steps:
|
||||
- uses: "actions/checkout@v2"
|
||||
- uses: "actions/setup-go@v2"
|
||||
with:
|
||||
go-version: "^1.17"
|
||||
- name: "Install and configure chihaya"
|
||||
run: |
|
||||
go install ./cmd/chihaya
|
||||
cat ./dist/example_config.yaml
|
||||
- name: "Run end-to-end tests"
|
||||
run: |
|
||||
chihaya --config=./dist/example_config.yaml --debug &
|
||||
pid=$!
|
||||
sleep 2
|
||||
chihaya e2e --debug
|
||||
kill $pid
|
||||
|
||||
e2e-redis:
|
||||
name: "E2E Redis Tests"
|
||||
runs-on: "ubuntu-latest"
|
||||
services:
|
||||
redis:
|
||||
image: "redis"
|
||||
ports: ["6379:6379"]
|
||||
options: "--entrypoint redis-server"
|
||||
steps:
|
||||
- uses: "actions/checkout@v2"
|
||||
- uses: "actions/setup-go@v2"
|
||||
with:
|
||||
go-version: "^1.17"
|
||||
- name: "Install and configure chihaya"
|
||||
run: |
|
||||
go install ./cmd/chihaya
|
||||
curl -LO https://github.com/jzelinskie/faq/releases/download/0.0.6/faq-linux-amd64
|
||||
chmod +x faq-linux-amd64
|
||||
./faq-linux-amd64 '.chihaya.storage = {"config":{"gc_interval":"3m","peer_lifetime":"31m","prometheus_reporting_interval":"1s","redis_broker":"redis://127.0.0.1:6379/0","redis_connect_timeout":"15s","redis_read_timeout":"15s","redis_write_timeout":"15s"},"name":"redis"}' ./dist/example_config.yaml > ./dist/example_redis_config.yaml
|
||||
cat ./dist/example_redis_config.yaml
|
||||
- name: "Run end-to-end tests"
|
||||
run: |
|
||||
chihaya --config=./dist/example_redis_config.yaml --debug &
|
||||
pid=$!
|
||||
sleep 2
|
||||
chihaya e2e --debug
|
||||
kill $pid
|
||||
|
||||
image-build:
|
||||
name: "Docker Build"
|
||||
runs-on: "ubuntu-latest"
|
||||
steps:
|
||||
- uses: "actions/checkout@v2"
|
||||
- uses: "docker/setup-qemu-action@v1"
|
||||
- uses: "docker/setup-buildx-action@v1"
|
||||
with:
|
||||
driver-opts: "image=moby/buildkit:master"
|
||||
- uses: "docker/build-push-action@v1"
|
||||
with:
|
||||
push: false
|
||||
tags: "latest"
|
||||
|
||||
helm:
|
||||
name: "Helm Template"
|
||||
runs-on: "ubuntu-latest"
|
||||
steps:
|
||||
- uses: "actions/checkout@v2"
|
||||
- name: "Install Helm"
|
||||
uses: "engineerd/configurator@v0.0.5"
|
||||
with:
|
||||
name: "helm"
|
||||
pathInArchive: "linux-amd64/helm"
|
||||
fromGitHubReleases: true
|
||||
repo: "helm/helm"
|
||||
version: "^v3"
|
||||
urlTemplate: "https://get.helm.sh/helm-{{version}}-linux-amd64.tar.gz"
|
||||
token: "${{ secrets.GITHUB_TOKEN }}"
|
||||
- name: "Run `helm template`"
|
||||
working-directory: "./dist/helm/chihaya"
|
||||
run: "helm template . --debug"
|
86
.github/workflows/lint.yaml
vendored
Normal file
86
.github/workflows/lint.yaml
vendored
Normal file
|
@ -0,0 +1,86 @@
|
|||
---
|
||||
name: "Lint"
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- "!dependabot/*"
|
||||
- "main"
|
||||
pull_request:
|
||||
branches: ["*"]
|
||||
jobs:
|
||||
go-mod-tidy:
|
||||
name: "Lint Go Modules"
|
||||
runs-on: "ubuntu-latest"
|
||||
steps:
|
||||
- uses: "actions/checkout@v2"
|
||||
- uses: "actions/setup-go@v2"
|
||||
with:
|
||||
go-version: "^1.17"
|
||||
- name: "Run `go mod tidy`"
|
||||
run: "go mod tidy && bash -c '[ $(git status --porcelain | tee /dev/fd/2 | wc -c) -eq 0 ]'"
|
||||
|
||||
go-fmt:
|
||||
name: "Format Go"
|
||||
runs-on: "ubuntu-latest"
|
||||
steps:
|
||||
- uses: "actions/checkout@v2"
|
||||
- uses: "actions/setup-go@v2"
|
||||
with:
|
||||
go-version: "^1.17"
|
||||
- name: "Install gofumpt"
|
||||
run: "go install mvdan.cc/gofumpt@latest"
|
||||
- name: "Run `gofumpt`"
|
||||
run: |
|
||||
GOFUMPT_OUTPUT="$(find . -iname '*.go' -type f | xargs gofumpt -d)"
|
||||
if [ -n "$GOFUMPT_OUTPUT" ]; then
|
||||
echo "The following files are not correctly formatted:"
|
||||
echo "${GOFUMPT_OUTPUT}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
go-lint:
|
||||
name: "Lint Go"
|
||||
runs-on: "ubuntu-latest"
|
||||
steps:
|
||||
- uses: "actions/checkout@v2"
|
||||
- uses: "actions/setup-go@v2"
|
||||
with:
|
||||
go-version: "^1.17"
|
||||
- uses: "golangci/golangci-lint-action@v2"
|
||||
with:
|
||||
version: "v1.43"
|
||||
skip-go-installation: true
|
||||
skip-pkg-cache: true
|
||||
skip-build-cache: false
|
||||
|
||||
extra-lint:
|
||||
name: "Lint YAML & Markdown"
|
||||
runs-on: "ubuntu-latest"
|
||||
steps:
|
||||
- uses: "actions/checkout@v2"
|
||||
- uses: "bewuethr/yamllint-action@v1.1.1"
|
||||
with:
|
||||
config-file: ".yamllint"
|
||||
- uses: "nosborn/github-action-markdown-cli@v2.0.0"
|
||||
with:
|
||||
files: "."
|
||||
config_file: ".markdownlint.yaml"
|
||||
|
||||
codeql:
|
||||
name: "Analyze with CodeQL"
|
||||
runs-on: "ubuntu-latest"
|
||||
permissions:
|
||||
actions: "read"
|
||||
contents: "read"
|
||||
security-events: "write"
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
language: ["go"]
|
||||
steps:
|
||||
- uses: "actions/checkout@v2"
|
||||
- uses: "github/codeql-action/init@v1"
|
||||
with:
|
||||
languages: "${{ matrix.language }}"
|
||||
- uses: "github/codeql-action/autobuild@v1"
|
||||
- uses: "github/codeql-action/analyze@v1"
|
50
.golangci.yaml
Normal file
50
.golangci.yaml
Normal file
|
@ -0,0 +1,50 @@
|
|||
---
|
||||
run:
|
||||
timeout: "5m"
|
||||
output:
|
||||
sort-results: true
|
||||
linters-settings:
|
||||
goimports:
|
||||
local-prefixes: "github.com/chihaya/chihaya"
|
||||
gosec:
|
||||
excludes:
|
||||
- "G404" # Allow the usage of math/rand
|
||||
linters:
|
||||
enable:
|
||||
- "bidichk"
|
||||
- "bodyclose"
|
||||
- "deadcode"
|
||||
- "errcheck"
|
||||
- "errname"
|
||||
- "errorlint"
|
||||
- "gofumpt"
|
||||
- "goimports"
|
||||
- "goprintffuncname"
|
||||
- "gosec"
|
||||
- "gosimple"
|
||||
- "govet"
|
||||
- "ifshort"
|
||||
- "importas"
|
||||
- "ineffassign"
|
||||
- "makezero"
|
||||
- "prealloc"
|
||||
- "predeclared"
|
||||
- "revive"
|
||||
- "rowserrcheck"
|
||||
- "staticcheck"
|
||||
- "structcheck"
|
||||
- "stylecheck"
|
||||
- "tenv"
|
||||
- "typecheck"
|
||||
- "unconvert"
|
||||
- "unused"
|
||||
- "varcheck"
|
||||
- "wastedassign"
|
||||
- "whitespace"
|
||||
issues:
|
||||
include:
|
||||
- "EXC0012" # Exported should have comment
|
||||
- "EXC0012" # Exported should have comment
|
||||
- "EXC0013" # Package comment should be of form
|
||||
- "EXC0014" # Comment on exported should be of form
|
||||
- "EXC0015" # Should have a package comment
|
3
.markdownlint.yaml
Normal file
3
.markdownlint.yaml
Normal file
|
@ -0,0 +1,3 @@
|
|||
---
|
||||
line-length: false
|
||||
no-hard-tabs: false
|
22
.travis.yml
22
.travis.yml
|
@ -1,22 +0,0 @@
|
|||
language: go
|
||||
go:
|
||||
- 1.7
|
||||
sudo: false
|
||||
install:
|
||||
- go get -t ./...
|
||||
- go get -u github.com/golang/lint/golint
|
||||
- go get -u golang.org/x/tools/cmd/goimports
|
||||
script:
|
||||
- go test -v $(go list ./... | grep -v /vendor/)
|
||||
- go vet $(go list ./... | grep -v /vendor/)
|
||||
- diff <(goimports -d $(find . -type f -name '*.go' -not -path "./vendor/*")) <(printf "")
|
||||
- (for d in $(go list ./... | grep -v /vendor/); do diff <(golint $d) <(printf "") || exit 1; done)
|
||||
notifications:
|
||||
irc:
|
||||
channels:
|
||||
- irc.freenode.net#chihaya
|
||||
use_notice: true
|
||||
skip_join: true
|
||||
on_success: always
|
||||
on_failure: always
|
||||
email: false
|
11
.yamllint
Normal file
11
.yamllint
Normal file
|
@ -0,0 +1,11 @@
|
|||
# vim: ft=yaml
|
||||
---
|
||||
yaml-files:
|
||||
- "*.yaml"
|
||||
- "*.yml"
|
||||
- ".yamllint"
|
||||
ignore: "dist/helm/"
|
||||
extends: "default"
|
||||
rules:
|
||||
quoted-strings: "enable"
|
||||
line-length: "disable"
|
|
@ -1,78 +1,3 @@
|
|||
## Discussion
|
||||
## Contributing to LBRY
|
||||
|
||||
Long-term discussion and bug reports are maintained via [GitHub Issues].
|
||||
Code review is done via [GitHub Pull Requests].
|
||||
Real-time discussion is done via [freenode IRC].
|
||||
|
||||
[GitHub Issues]: https://github.com/chihaya/chihaya/issues
|
||||
[GitHub Pull Requests]: https://github.com/chihaya/chihaya/pulls
|
||||
[freenode IRC]: http://webchat.freenode.net/?channels=chihaya
|
||||
|
||||
## Pull Request Procedure
|
||||
|
||||
If you're looking to contribute, search the GitHub for issues labeled "low-hanging fruit".
|
||||
You can also hop into IRC and ask a developer who's online for their opinion.
|
||||
|
||||
Small, self-describing fixes are perfectly fine to submit without discussion.
|
||||
However, please do not submit a massive Pull Request without prior communication.
|
||||
Large, unannounced changes usually lead to confusion and time wasted for everyone.
|
||||
If you were planning to write a large change, post an issue on GitHub first and discuss it.
|
||||
|
||||
Pull Requests will be treated as "review requests", and we will give feedback we expect to see corrected on style and substance before merging.
|
||||
Changes contributed via Pull Request should focus on a single issue at a time.
|
||||
We will not accept pull-requests that try to "sneak" unrelated changes in.
|
||||
|
||||
The average contribution flow is as follows:
|
||||
|
||||
- Determine what to work on via creating and issue or finding an issue you want to solve.
|
||||
- Create a topic branch from where you want to base your work. This is usually `master`.
|
||||
- Make commits of logical units.
|
||||
- Make sure your commit messages are in the proper format
|
||||
- Push your changes to a topic branch in your fork of the repository.
|
||||
- Submit a pull request.
|
||||
- Your PR will be reviewed and merged by one of the maintainers.
|
||||
- You may be asked to make changes and [rebase] your commits.
|
||||
|
||||
[rebase]: https://git-scm.com/book/en/v2/Git-Branching-Rebasin://git-scm.com/book/en/v2/Git-Branching-Rebasing
|
||||
|
||||
## Style
|
||||
|
||||
Any new files should include the license header found at the top of every source file.
|
||||
|
||||
### Go
|
||||
|
||||
The project follows idiomatic [Go conventions] for style.
|
||||
If you're just starting out writing Go, you can check out this [meta-package] that documents style idiomatic style decisions you will find in open source Go code.
|
||||
All files should have `gofmt` executed on them and code should strive to have full coverage of static analysis tools like [govet] and [golint].
|
||||
|
||||
[Go conventions]: https://github.com/golang/go/wiki/CodeReviewComments
|
||||
[meta-package]: https://github.com/jzelinskie/conventions
|
||||
[govet]: https://golang.org/cmd/vet
|
||||
[golint]: https://github.com/golang/lint
|
||||
|
||||
### Commit Messages
|
||||
|
||||
We follow a rough convention for commit messages that is designed to answer two questions: what changed and why.
|
||||
The subject line should feature the what and the body of the commit should describe the why.
|
||||
|
||||
```
|
||||
scripts: add the test-cluster command
|
||||
|
||||
this uses tmux to setup a test cluster that you can easily kill and
|
||||
start for debugging.
|
||||
|
||||
Fixes #38
|
||||
```
|
||||
|
||||
The format can be described more formally as follows:
|
||||
|
||||
```
|
||||
<subsystem>: <what changed>
|
||||
<BLANK LINE>
|
||||
<why this change was made>
|
||||
<BLANK LINE>
|
||||
<footer>
|
||||
```
|
||||
|
||||
The first line is the subject and should be no longer than 70 characters, the second line is always blank, and other lines should be wrapped at 80 characters.
|
||||
This allows the message to be easier to read on GitHub as well as in various git tools.
|
||||
https://lbry.tech/contribute
|
||||
|
|
29
Dockerfile
29
Dockerfile
|
@ -1,23 +1,26 @@
|
|||
FROM golang:alpine
|
||||
MAINTAINER Jimmy Zelinskie <jimmyzelinskie@gmail.com>
|
||||
FROM golang:alpine AS build-env
|
||||
LABEL maintainer "Jimmy Zelinskie <jimmyzelinskie+git@gmail.com>"
|
||||
|
||||
# Install OS-level dependencies.
|
||||
RUN apk update && \
|
||||
apk add curl git && \
|
||||
curl https://glide.sh/get | sh
|
||||
RUN apk add --no-cache curl git
|
||||
|
||||
# Copy our source code into the container.
|
||||
WORKDIR /go/src/github.com/chihaya/chihaya
|
||||
ADD . /go/src/github.com/chihaya/chihaya
|
||||
COPY . /go/src/github.com/chihaya/chihaya
|
||||
|
||||
# Install our golang dependencies and compile our binary.
|
||||
RUN glide install
|
||||
RUN go install github.com/chihaya/chihaya/cmd/chihaya
|
||||
RUN CGO_ENABLED=0 go install ./cmd/chihaya
|
||||
|
||||
# Delete the compiler from the container.
|
||||
# This makes the container much smaller when using Quay's squashing feature.
|
||||
RUN rm -r /usr/local/go
|
||||
FROM alpine:latest
|
||||
RUN apk add --no-cache ca-certificates
|
||||
COPY --from=build-env /go/bin/chihaya /chihaya
|
||||
|
||||
RUN adduser -D chihaya
|
||||
|
||||
# Expose a docker interface to our binary.
|
||||
EXPOSE 6880 6881
|
||||
ENTRYPOINT ["chihaya"]
|
||||
EXPOSE 6880 6969
|
||||
|
||||
# Drop root privileges
|
||||
USER chihaya
|
||||
|
||||
ENTRYPOINT ["/chihaya"]
|
||||
|
|
18
LICENSE
18
LICENSE
|
@ -1,3 +1,21 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015-2022 LBRY Inc
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the
|
||||
following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
|
||||
|
||||
Chihaya is released under a BSD 2-Clause license, reproduced below.
|
||||
|
||||
Copyright (c) 2015, The Chihaya Authors
|
||||
|
|
233
README.md
233
README.md
|
@ -1,123 +1,146 @@
|
|||
# Chihaya
|
||||
# LBRY Tracker
|
||||
|
||||
[](https://travis-ci.org/chihaya/chihaya)
|
||||
[](https://quay.io/repository/jzelinskie/chihaya)
|
||||
[](https://goreportcard.com/report/github.com/chihaya/chihaya)
|
||||
[](https://godoc.org/github.com/chihaya/chihaya)
|
||||
[](https://en.wikipedia.org/wiki/BSD_licenses#2-clause_license_.28.22Simplified_BSD_License.22_or_.22FreeBSD_License.22.29)
|
||||
[](http://webchat.freenode.net/?channels=chihaya)
|
||||
The LBRY tracker is a server that helps peers find each other. It was forked from [Chihaya](https://github.com/chihaya/chihaya), an open-source [BitTorrent tracker](https://en.wikipedia.org/wiki/BitTorrent_tracker).
|
||||
|
||||
**Note:** The master branch may be in an unstable or even broken state during development.
|
||||
Please use [releases] instead of the master branch in order to get stable binaries.
|
||||
|
||||
Chihaya is an open source [BitTorrent tracker] written in [Go].
|
||||
## Installation and Usage
|
||||
|
||||
Differentiating features include:
|
||||
### Building from HEAD
|
||||
|
||||
- Protocol-agnostic middleware
|
||||
- HTTP and UDP frontends
|
||||
- IPv4 and IPv6 support
|
||||
- [YAML] configuration
|
||||
- Metrics via [Prometheus]
|
||||
|
||||
[releases]: https://github.com/chihaya/chihaya/releases
|
||||
[BitTorrent tracker]: http://en.wikipedia.org/wiki/BitTorrent_tracker
|
||||
[Go]: https://golang.org
|
||||
[YAML]: http://yaml.org
|
||||
[Prometheus]: http://prometheus.io
|
||||
|
||||
## Why Chihaya?
|
||||
|
||||
Chihaya is built for developers looking to integrate BitTorrent into a preexisting production environment.
|
||||
Chihaya's pluggable architecture and middleware framework offers a simple and flexible integration point that abstracts the BitTorrent tracker protocols.
|
||||
The most common use case for Chihaya is integration with the deployment of cloud software.
|
||||
|
||||
[OpenBittorrent]: https://openbittorrent.com
|
||||
|
||||
### Production Use
|
||||
|
||||
#### Facebook
|
||||
|
||||
[Facebook] uses BitTorrent to deploy new versions of their software.
|
||||
In order to optimize the flow of traffic within their datacenters, Chihaya is configured to prefer peers within the same subnet.
|
||||
Because Facebook organizes their network such that server racks are allocated IP addresses in the same subnet, the vast majority of deployment traffic never impacts the congested areas of their network.
|
||||
|
||||
[Facebook]: https://facebook.com
|
||||
|
||||
#### CoreOS
|
||||
|
||||
[Quay] is a container registry that offers the ability to download containers via BitTorrent in order to speed up large or geographically distant deployments.
|
||||
Announce URLs from Quay's torrent files contain a [JWT] in order to allow Chihaya to verify that an infohash was approved by the registry.
|
||||
By verifying the infohash, Quay can be sure that only their content is being shared by their tracker.
|
||||
|
||||
[Quay]: https://quay.io
|
||||
[JWT]: https://jwt.io
|
||||
|
||||
## Development
|
||||
|
||||
### Getting Started
|
||||
|
||||
In order to compile the project, the [latest stable version of Go] and a [working Go environment] are required.
|
||||
In order to compile the project, the [latest stable version of Go] and knowledge of a [working Go environment] are required.
|
||||
|
||||
```sh
|
||||
$ go get -t -u github.com/chihaya/chihaya
|
||||
$ go install github.com/chihaya/chihaya/cmd/chihaya
|
||||
git clone git@github.com:lbryio/tracker.git
|
||||
cd tracker
|
||||
go build ./cmd/chihaya
|
||||
./chihaya --help
|
||||
```
|
||||
|
||||
[latest stable version of Go]: https://golang.org/dl
|
||||
[working Go environment]: https://golang.org/doc/code.html
|
||||
|
||||
### Contributing
|
||||
### Testing
|
||||
|
||||
Long-term discussion and bug reports are maintained via [GitHub Issues].
|
||||
Code review is done via [GitHub Pull Requests].
|
||||
Real-time discussion is done via [freenode IRC].
|
||||
The following will run all tests and benchmarks.
|
||||
Removing `-bench` will just run unit tests.
|
||||
|
||||
For more information read [CONTRIBUTING.md].
|
||||
|
||||
[GitHub Issues]: https://github.com/chihaya/chihaya/issues
|
||||
[GitHub Pull Requests]: https://github.com/chihaya/chihaya/pulls
|
||||
[freenode IRC]: http://webchat.freenode.net/?channels=chihaya
|
||||
[CONTRIBUTING.md]: https://github.com/chihaya/chihaya/blob/master/CONTRIBUTING.md
|
||||
|
||||
### Architecture
|
||||
|
||||
```
|
||||
+----------------------+
|
||||
| BitTorrent Client |<--------------+
|
||||
+----------------------+ |
|
||||
| |
|
||||
| |
|
||||
| |
|
||||
+------------v--------------------------+-------------------+-------------------------+
|
||||
|+----------------------+ +----------------------+frontend| chihaya|
|
||||
|| Parser | | Writer | | |
|
||||
|+----------------------+ +----------------------+ | |
|
||||
| | ^ | |
|
||||
+------------+--------------------------+-------------------+ |
|
||||
+------------v--------------------------+-------------------+ |
|
||||
|+----------------------+ +----------------------+ logic| |
|
||||
|| PreHook Middleware |-->| Response Generator |<-------|-------------+ |
|
||||
|+----------------------+ +----------------------+ | | |
|
||||
| | | |
|
||||
|+----------------------+ | +----------------------+|
|
||||
|| PostHook Middleware |-----------------------------------|>| Storage ||
|
||||
|+----------------------+ | +----------------------+|
|
||||
| | |
|
||||
+-----------------------------------------------------------+-------------------------+
|
||||
```sh
|
||||
go test -bench $(go list ./...)
|
||||
```
|
||||
|
||||
BitTorrent clients send Announce and Scrape requests to a _Frontend_.
|
||||
Frontends parse requests and write responses for the particular protocol they implement.
|
||||
The _TrackerLogic_ interface to is used to generate responses for their requests and optionally perform a task after responding to a client.
|
||||
A configurable chain of _PreHook_ and _PostHook_ middleware is used to construct an instance of TrackerLogic.
|
||||
PreHooks are middleware that are executed before the response has been written.
|
||||
After all PreHooks have executed, any missing response fields that are required are filled by reading out of the configured implementation of the _Storage_ interface.
|
||||
PostHooks are asynchronous tasks that occur after a response has been delivered to the client.
|
||||
Request data is written to the storage asynchronously in one of these PostHooks.
|
||||
The tracker executable contains a command to end-to-end test a BitTorrent tracker.
|
||||
See
|
||||
|
||||
## Related projects
|
||||
```sh
|
||||
tracker --help
|
||||
```
|
||||
|
||||
- [BitTorrent.org](https://github.com/bittorrent/bittorrent.org): a static website containing the BitTorrent spec and all BEPs
|
||||
- [OpenTracker](http://erdgeist.org/arts/software/opentracker): a popular BitTorrent tracker written in C
|
||||
- [Ocelot](https://github.com/WhatCD/Ocelot): a private BitTorrent tracker written in C++
|
||||
### Configuration
|
||||
|
||||
Configuration of the tracker is done via one YAML configuration file.
|
||||
The `dist/` directory contains an example configuration file.
|
||||
Files and directories under `docs/` contain detailed information about configuring middleware, storage implementations, architecture etc.
|
||||
|
||||
This is an example for an UDP server running on 9252 with metrics enabled. Remember to **change the private key** to some random string.
|
||||
|
||||
```
|
||||
---
|
||||
chihaya:
|
||||
announce_interval: "30m"
|
||||
min_announce_interval: "15m"
|
||||
metrics_addr: "0.0.0.0:6880"
|
||||
udp:
|
||||
addr: "0.0.0.0:9252"
|
||||
max_clock_skew: "10s"
|
||||
private_key: ">>>>CHANGE THIS TO SOME RANDOM THING<<<<"
|
||||
enable_request_timing: false
|
||||
allow_ip_spoofing: false
|
||||
max_numwant: 100
|
||||
default_numwant: 50
|
||||
max_scrape_infohashes: 50
|
||||
storage:
|
||||
name: "memory"
|
||||
config:
|
||||
gc_interval: "3m"
|
||||
peer_lifetime: "31m"
|
||||
shard_count: 1024
|
||||
prometheus_reporting_interval: "1s"
|
||||
```
|
||||
|
||||
# Running from Docker
|
||||
|
||||
This section assumes `docker` and `docker-compose` to be installed on a Linux distro. Please check official docs on how to install [Docker Engine](https://docs.docker.com/engine/install/) and [Docker Compose](https://docs.docker.com/compose/install/).
|
||||
|
||||
## Docker Compose from lbry/tracker
|
||||
In order to define a tracker service and let Docker Compose manage it, create a file named `docker-compose.yml` with:
|
||||
```
|
||||
version: "3"
|
||||
services:
|
||||
tracker:
|
||||
image: lbry/tracker
|
||||
command: --config /config/conf.yml
|
||||
volumes:
|
||||
- .:/config
|
||||
network_mode: host
|
||||
restart: always
|
||||
```
|
||||
Unfortunately the tracker does not work without `network_mode: host` due some bug with UDP on Docker. In this mode, firewall configuration needs to be done manually. If using `ufw`, try `ufw allow 9252`.
|
||||
|
||||
Now, move the configuration to the same directory as `docker-compose.yml`, naming it `conf.yml`. If it is not ready, check the configuration section above.
|
||||
|
||||
Start the tracker by running the following in the same directory as the compose file:
|
||||
`docker-compose up -d`
|
||||
Logs can be read with:
|
||||
`docker-compose logs`
|
||||
To stop:
|
||||
`docker-compose down`
|
||||
|
||||
## Building the containter
|
||||
A Dockerfile is provided within the repo. To build the container locally, run this command on the same directory the repo was cloned:
|
||||
`sudo docker build -f Dockerfile . -t some_name/tracker:latest`
|
||||
It will produce an image called `some_name/tracker`, which can be used in the Docker Compose section.
|
||||
|
||||
# Running from source as a service
|
||||
|
||||
For ease of maintenance, it is recommended to run the tracker as a service.
|
||||
|
||||
This is an example for running it under as the current user using `systemd`:
|
||||
```
|
||||
[Unit]
|
||||
Description=Chihaya BT tracker
|
||||
After=network.target
|
||||
[Service]
|
||||
Type=simple
|
||||
#User=chihaya
|
||||
#Group=chihaya
|
||||
WorkingDirectory=/home/user/github/tracker
|
||||
ExecStart=/home/user/github/tracker/chihaya --config dist/example_config.yaml
|
||||
Restart=on-failure
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
```
|
||||
|
||||
To try it, change `/home/user/github/tracker` to where the code was cloned and run:
|
||||
```bash=
|
||||
mkdir -p ~/.config/systemd/user
|
||||
# PASTE FILE IN ~/.config/systemd/user/tracker.service
|
||||
systemctl --user enable tracker
|
||||
systemctl --user start tracker
|
||||
systemctl --user status tracker
|
||||
```
|
||||
|
||||
## Contributing
|
||||
|
||||
Contributions to this project are welcome, encouraged, and compensated. For more details, please check [this](https://lbry.tech/contribute) link.
|
||||
|
||||
## License
|
||||
|
||||
LBRY's code changes are MIT licensed, and the upstream Chihaya code is licensed under a BSD 2-Clause license. For the full license, see [LICENSE](LICENSE).
|
||||
|
||||
## Security
|
||||
|
||||
We take security seriously. Please contact security@lbry.com regarding any security issues. [Our PGP key is here](https://lbry.com/faq/pgp-key) if you need it.
|
||||
|
||||
## Contact
|
||||
|
||||
The primary contact for this project is [@shyba](mailto:vshyba@lbry.com).
|
||||
|
|
|
@ -4,8 +4,11 @@
|
|||
package bittorrent
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/chihaya/chihaya/pkg/log"
|
||||
)
|
||||
|
||||
// PeerID represents a peer ID.
|
||||
|
@ -24,6 +27,16 @@ func PeerIDFromBytes(b []byte) PeerID {
|
|||
return PeerID(buf)
|
||||
}
|
||||
|
||||
// String implements fmt.Stringer, returning the base16 encoded PeerID.
|
||||
func (p PeerID) String() string {
|
||||
return fmt.Sprintf("%x", p[:])
|
||||
}
|
||||
|
||||
// RawString returns a 20-byte string of the raw bytes of the ID.
|
||||
func (p PeerID) RawString() string {
|
||||
return string(p[:])
|
||||
}
|
||||
|
||||
// PeerIDFromString creates a PeerID from a string.
|
||||
//
|
||||
// It panics if s is not 20 bytes long.
|
||||
|
@ -66,11 +79,24 @@ func InfoHashFromString(s string) InfoHash {
|
|||
return InfoHash(buf)
|
||||
}
|
||||
|
||||
// String implements fmt.Stringer, returning the base16 encoded InfoHash.
|
||||
func (i InfoHash) String() string {
|
||||
return fmt.Sprintf("%x", i[:])
|
||||
}
|
||||
|
||||
// RawString returns a 20-byte string of the raw bytes of the InfoHash.
|
||||
func (i InfoHash) RawString() string {
|
||||
return string(i[:])
|
||||
}
|
||||
|
||||
// AnnounceRequest represents the parsed parameters from an announce request.
|
||||
type AnnounceRequest struct {
|
||||
Event Event
|
||||
InfoHash InfoHash
|
||||
Compact bool
|
||||
EventProvided bool
|
||||
NumWantProvided bool
|
||||
IPProvided bool
|
||||
NumWant uint32
|
||||
Left uint64
|
||||
Downloaded uint64
|
||||
|
@ -80,6 +106,24 @@ type AnnounceRequest struct {
|
|||
Params
|
||||
}
|
||||
|
||||
// LogFields renders the current response as a set of log fields.
|
||||
func (r AnnounceRequest) LogFields() log.Fields {
|
||||
return log.Fields{
|
||||
"event": r.Event,
|
||||
"infoHash": r.InfoHash,
|
||||
"compact": r.Compact,
|
||||
"eventProvided": r.EventProvided,
|
||||
"numWantProvided": r.NumWantProvided,
|
||||
"ipProvided": r.IPProvided,
|
||||
"numWant": r.NumWant,
|
||||
"left": r.Left,
|
||||
"downloaded": r.Downloaded,
|
||||
"uploaded": r.Uploaded,
|
||||
"peer": r.Peer,
|
||||
"params": r.Params,
|
||||
}
|
||||
}
|
||||
|
||||
// AnnounceResponse represents the parameters used to create an announce
|
||||
// response.
|
||||
type AnnounceResponse struct {
|
||||
|
@ -92,37 +136,116 @@ type AnnounceResponse struct {
|
|||
IPv6Peers []Peer
|
||||
}
|
||||
|
||||
// LogFields renders the current response as a set of log fields.
|
||||
func (r AnnounceResponse) LogFields() log.Fields {
|
||||
return log.Fields{
|
||||
"compact": r.Compact,
|
||||
"complete": r.Complete,
|
||||
"interval": r.Interval,
|
||||
"minInterval": r.MinInterval,
|
||||
"ipv4Peers": r.IPv4Peers,
|
||||
"ipv6Peers": r.IPv6Peers,
|
||||
}
|
||||
}
|
||||
|
||||
// ScrapeRequest represents the parsed parameters from a scrape request.
|
||||
type ScrapeRequest struct {
|
||||
AddressFamily AddressFamily
|
||||
InfoHashes []InfoHash
|
||||
Params Params
|
||||
}
|
||||
|
||||
// LogFields renders the current response as a set of log fields.
|
||||
func (r ScrapeRequest) LogFields() log.Fields {
|
||||
return log.Fields{
|
||||
"addressFamily": r.AddressFamily,
|
||||
"infoHashes": r.InfoHashes,
|
||||
"params": r.Params,
|
||||
}
|
||||
}
|
||||
|
||||
// ScrapeResponse represents the parameters used to create a scrape response.
|
||||
//
|
||||
// The Scrapes must be in the same order as the InfoHashes in the corresponding
|
||||
// ScrapeRequest.
|
||||
type ScrapeResponse struct {
|
||||
Files map[InfoHash]Scrape
|
||||
Files []Scrape
|
||||
}
|
||||
|
||||
// LogFields renders the current response as a set of Logrus fields.
|
||||
func (sr ScrapeResponse) LogFields() log.Fields {
|
||||
return log.Fields{
|
||||
"files": sr.Files,
|
||||
}
|
||||
}
|
||||
|
||||
// Scrape represents the state of a swarm that is returned in a scrape response.
|
||||
type Scrape struct {
|
||||
InfoHash InfoHash
|
||||
Snatches uint32
|
||||
Complete uint32
|
||||
Incomplete uint32
|
||||
}
|
||||
|
||||
// AddressFamily is the address family of an IP address.
|
||||
type AddressFamily uint8
|
||||
|
||||
func (af AddressFamily) String() string {
|
||||
switch af {
|
||||
case IPv4:
|
||||
return "IPv4"
|
||||
case IPv6:
|
||||
return "IPv6"
|
||||
default:
|
||||
panic("tried to print unknown AddressFamily")
|
||||
}
|
||||
}
|
||||
|
||||
// AddressFamily constants.
|
||||
const (
|
||||
IPv4 AddressFamily = iota
|
||||
IPv6
|
||||
)
|
||||
|
||||
// IP is a net.IP with an AddressFamily.
|
||||
type IP struct {
|
||||
net.IP
|
||||
AddressFamily
|
||||
}
|
||||
|
||||
func (ip IP) String() string {
|
||||
return ip.IP.String()
|
||||
}
|
||||
|
||||
// Peer represents the connection details of a peer that is returned in an
|
||||
// announce response.
|
||||
type Peer struct {
|
||||
ID PeerID
|
||||
IP net.IP
|
||||
IP IP
|
||||
Port uint16
|
||||
}
|
||||
|
||||
// String implements fmt.Stringer to return a human-readable representation.
|
||||
// The string will have the format <PeerID>@[<IP>]:<port>, for example
|
||||
// "0102030405060708090a0b0c0d0e0f1011121314@[10.11.12.13]:1234"
|
||||
func (p Peer) String() string {
|
||||
return fmt.Sprintf("%s@[%s]:%d", p.ID.String(), p.IP.String(), p.Port)
|
||||
}
|
||||
|
||||
// LogFields renders the current peer as a set of Logrus fields.
|
||||
func (p Peer) LogFields() log.Fields {
|
||||
return log.Fields{
|
||||
"ID": p.ID,
|
||||
"IP": p.IP,
|
||||
"port": p.Port,
|
||||
}
|
||||
}
|
||||
|
||||
// Equal reports whether p and x are the same.
|
||||
func (p Peer) Equal(x Peer) bool { return p.EqualEndpoint(x) && p.ID == x.ID }
|
||||
|
||||
// EqualEndpoint reports whether p and x have the same endpoint.
|
||||
func (p Peer) EqualEndpoint(x Peer) bool { return p.Port == x.Port && p.IP.Equal(x.IP) }
|
||||
func (p Peer) EqualEndpoint(x Peer) bool { return p.Port == x.Port && p.IP.Equal(x.IP.IP) }
|
||||
|
||||
// ClientError represents an error that should be exposed to the client over
|
||||
// the BitTorrent protocol implementation.
|
||||
|
|
53
bittorrent/bittorrent_test.go
Normal file
53
bittorrent/bittorrent_test.go
Normal file
|
@ -0,0 +1,53 @@
|
|||
package bittorrent
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var (
|
||||
b = []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20}
|
||||
expected = "0102030405060708090a0b0c0d0e0f1011121314"
|
||||
)
|
||||
|
||||
var peerStringTestCases = []struct {
|
||||
input Peer
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
input: Peer{
|
||||
ID: PeerIDFromBytes(b),
|
||||
IP: IP{net.IPv4(10, 11, 12, 1), IPv4},
|
||||
Port: 1234,
|
||||
},
|
||||
expected: fmt.Sprintf("%s@[10.11.12.1]:1234", expected),
|
||||
},
|
||||
{
|
||||
input: Peer{
|
||||
ID: PeerIDFromBytes(b),
|
||||
IP: IP{net.ParseIP("2001:db8::ff00:42:8329"), IPv6},
|
||||
Port: 1234,
|
||||
},
|
||||
expected: fmt.Sprintf("%s@[2001:db8::ff00:42:8329]:1234", expected),
|
||||
},
|
||||
}
|
||||
|
||||
func TestPeerID_String(t *testing.T) {
|
||||
s := PeerIDFromBytes(b).String()
|
||||
require.Equal(t, expected, s)
|
||||
}
|
||||
|
||||
func TestInfoHash_String(t *testing.T) {
|
||||
s := InfoHashFromBytes(b).String()
|
||||
require.Equal(t, expected, s)
|
||||
}
|
||||
|
||||
func TestPeer_String(t *testing.T) {
|
||||
for _, c := range peerStringTestCases {
|
||||
got := c.input.String()
|
||||
require.Equal(t, c.expected, got)
|
||||
}
|
||||
}
|
|
@ -5,7 +5,7 @@ import (
|
|||
)
|
||||
|
||||
func TestClientID(t *testing.T) {
|
||||
var clientTable = []struct{ peerID, clientID string }{
|
||||
clientTable := []struct{ peerID, clientID string }{
|
||||
{"-AZ3034-6wfG2wk6wWLc", "AZ3034"},
|
||||
{"-AZ3042-6ozMq5q6Q3NX", "AZ3042"},
|
||||
{"-BS5820-oy4La2MWGEFj", "BS5820"},
|
||||
|
@ -43,11 +43,13 @@ func TestClientID(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, tt := range clientTable {
|
||||
t.Run(tt.peerID, func(t *testing.T) {
|
||||
var clientID ClientID
|
||||
copy(clientID[:], []byte(tt.clientID))
|
||||
parsedID := NewClientID(PeerIDFromString(tt.peerID))
|
||||
if parsedID != clientID {
|
||||
t.Error("Incorrectly parsed peer ID", tt.peerID, "as", parsedID)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,13 +1,14 @@
|
|||
package bittorrent
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestNew(t *testing.T) {
|
||||
var table = []struct {
|
||||
table := []struct {
|
||||
data string
|
||||
expected Event
|
||||
expectedErr error
|
||||
|
@ -22,8 +23,17 @@ func TestNew(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, tt := range table {
|
||||
t.Run(fmt.Sprintf("%#v expecting %s", tt.data, nilPrinter(tt.expectedErr)), func(t *testing.T) {
|
||||
got, err := NewEvent(tt.data)
|
||||
assert.Equal(t, err, tt.expectedErr, "errors should equal the expected value")
|
||||
assert.Equal(t, got, tt.expected, "events should equal the expected value")
|
||||
require.Equal(t, err, tt.expectedErr, "errors should equal the expected value")
|
||||
require.Equal(t, got, tt.expected, "events should equal the expected value")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func nilPrinter(err error) string {
|
||||
if err == nil {
|
||||
return "nil"
|
||||
}
|
||||
return err.Error()
|
||||
}
|
||||
|
|
|
@ -5,6 +5,8 @@ import (
|
|||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/chihaya/chihaya/pkg/log"
|
||||
)
|
||||
|
||||
// Params is used to fetch (optional) request parameters from an Announce.
|
||||
|
@ -39,6 +41,10 @@ var ErrKeyNotFound = errors.New("query: value for the provided key does not exis
|
|||
// with invalid length.
|
||||
var ErrInvalidInfohash = ClientError("provided invalid infohash")
|
||||
|
||||
// ErrInvalidQueryEscape is returned when a query string contains invalid
|
||||
// escapes.
|
||||
var ErrInvalidQueryEscape = ClientError("invalid query escape")
|
||||
|
||||
// QueryParams parses a URL Query and implements the Params interface with some
|
||||
// additional helpers.
|
||||
type QueryParams struct {
|
||||
|
@ -48,6 +54,37 @@ type QueryParams struct {
|
|||
infoHashes []InfoHash
|
||||
}
|
||||
|
||||
type routeParamsKey struct{}
|
||||
|
||||
// RouteParamsKey is a key for the context of a request that
|
||||
// contains the named parameters from the http router.
|
||||
var RouteParamsKey = routeParamsKey{}
|
||||
|
||||
// RouteParam is a type that contains the values from the named parameters
|
||||
// on the route.
|
||||
type RouteParam struct {
|
||||
Key string
|
||||
Value string
|
||||
}
|
||||
|
||||
// RouteParams is a collection of RouteParam instances.
|
||||
type RouteParams []RouteParam
|
||||
|
||||
// ByName returns the value of the first RouteParam that matches the given
|
||||
// name. If no matching RouteParam is found, an empty string is returned.
|
||||
// In the event that a "catch-all" parameter is provided on the route and
|
||||
// no value is matched, an empty string is returned. For example: a route of
|
||||
// "/announce/*param" matches on "/announce/". However, ByName("param") will
|
||||
// return an empty string.
|
||||
func (rp RouteParams) ByName(name string) string {
|
||||
for _, p := range rp {
|
||||
if p.Key == name {
|
||||
return p.Value
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// ParseURLData parses a request URL or UDP URLData as defined in BEP41.
|
||||
// It expects a concatenated string of the request's path and query parts as
|
||||
// defined in RFC 3986. As both the udp: and http: scheme used by BitTorrent
|
||||
|
@ -63,6 +100,10 @@ type QueryParams struct {
|
|||
// parse each value as an InfoHash and return an error if parsing fails. All
|
||||
// InfoHashes are collected and can later be retrieved by calling the InfoHashes
|
||||
// method.
|
||||
//
|
||||
// Also note that any error that is encountered during parsing is returned as a
|
||||
// ClientError, as this method is expected to be used to parse client-provided
|
||||
// data.
|
||||
func ParseURLData(urlData string) (*QueryParams, error) {
|
||||
var path, query string
|
||||
|
||||
|
@ -76,7 +117,7 @@ func ParseURLData(urlData string) (*QueryParams, error) {
|
|||
|
||||
q, err := parseQuery(query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, ClientError(err.Error())
|
||||
}
|
||||
q.path = path
|
||||
return q, nil
|
||||
|
@ -84,69 +125,55 @@ func ParseURLData(urlData string) (*QueryParams, error) {
|
|||
|
||||
// parseQuery parses a URL query into QueryParams.
|
||||
// The query is expected to exclude the delimiting '?'.
|
||||
func parseQuery(rawQuery string) (*QueryParams, error) {
|
||||
var (
|
||||
keyStart, keyEnd int
|
||||
valStart, valEnd int
|
||||
|
||||
onKey = true
|
||||
|
||||
func parseQuery(query string) (q *QueryParams, err error) {
|
||||
// This is basically url.parseQuery, but with a map[string]string
|
||||
// instead of map[string][]string for the values.
|
||||
q = &QueryParams{
|
||||
query: rawQuery,
|
||||
query: query,
|
||||
infoHashes: nil,
|
||||
params: make(map[string]string),
|
||||
}
|
||||
)
|
||||
|
||||
for i, length := 0, len(rawQuery); i < length; i++ {
|
||||
separator := rawQuery[i] == '&' || rawQuery[i] == ';'
|
||||
last := i == length-1
|
||||
|
||||
if separator || last {
|
||||
if onKey && !last {
|
||||
keyStart = i + 1
|
||||
for query != "" {
|
||||
key := query
|
||||
if i := strings.IndexAny(key, "&;"); i >= 0 {
|
||||
key, query = key[:i], key[i+1:]
|
||||
} else {
|
||||
query = ""
|
||||
}
|
||||
if key == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
if last && !separator && !onKey {
|
||||
valEnd = i
|
||||
value := ""
|
||||
if i := strings.Index(key, "="); i >= 0 {
|
||||
key, value = key[:i], key[i+1:]
|
||||
}
|
||||
|
||||
keyStr, err := url.QueryUnescape(rawQuery[keyStart : keyEnd+1])
|
||||
key, err = url.QueryUnescape(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
// QueryUnescape returns an error like "invalid escape: '%x'".
|
||||
// But frontends record these errors to prometheus, which generates
|
||||
// a lot of time series.
|
||||
// We log it here for debugging instead.
|
||||
log.Debug("failed to unescape query param key", log.Err(err))
|
||||
return nil, ErrInvalidQueryEscape
|
||||
}
|
||||
|
||||
var valStr string
|
||||
|
||||
if valEnd > 0 {
|
||||
valStr, err = url.QueryUnescape(rawQuery[valStart : valEnd+1])
|
||||
value, err = url.QueryUnescape(value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// QueryUnescape returns an error like "invalid escape: '%x'".
|
||||
// But frontends record these errors to prometheus, which generates
|
||||
// a lot of time series.
|
||||
// We log it here for debugging instead.
|
||||
log.Debug("failed to unescape query param value", log.Err(err))
|
||||
return nil, ErrInvalidQueryEscape
|
||||
}
|
||||
|
||||
if keyStr == "info_hash" {
|
||||
if len(valStr) != 20 {
|
||||
if key == "info_hash" {
|
||||
if len(value) != 20 {
|
||||
return nil, ErrInvalidInfohash
|
||||
}
|
||||
q.infoHashes = append(q.infoHashes, InfoHashFromString(valStr))
|
||||
q.infoHashes = append(q.infoHashes, InfoHashFromString(value))
|
||||
} else {
|
||||
q.params[strings.ToLower(keyStr)] = valStr
|
||||
}
|
||||
|
||||
valEnd = 0
|
||||
onKey = true
|
||||
keyStart = i + 1
|
||||
|
||||
} else if rawQuery[i] == '=' {
|
||||
onKey = false
|
||||
valStart = i + 1
|
||||
valEnd = 0
|
||||
} else if onKey {
|
||||
keyEnd = i
|
||||
} else {
|
||||
valEnd = i
|
||||
q.params[strings.ToLower(key)] = value
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -160,15 +187,15 @@ func (qp *QueryParams) String(key string) (string, bool) {
|
|||
return value, ok
|
||||
}
|
||||
|
||||
// Uint64 returns a uint parsed from a query. After being called, it is safe to
|
||||
// Uint returns a uint parsed from a query. After being called, it is safe to
|
||||
// cast the uint64 to your desired length.
|
||||
func (qp *QueryParams) Uint64(key string) (uint64, error) {
|
||||
func (qp *QueryParams) Uint(key string, bitSize int) (uint64, error) {
|
||||
str, exists := qp.params[key]
|
||||
if !exists {
|
||||
return 0, ErrKeyNotFound
|
||||
}
|
||||
|
||||
val, err := strconv.ParseUint(str, 10, 64)
|
||||
val, err := strconv.ParseUint(str, 10, bitSize)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
|
|
@ -27,6 +27,12 @@ var (
|
|||
InvalidQueries = []string{
|
||||
"/announce?" + "info_hash=%0%a",
|
||||
}
|
||||
|
||||
// See https://github.com/chihaya/chihaya/issues/334.
|
||||
shouldNotPanicQueries = []string{
|
||||
"/annnounce?" + "info_hash=" + testPeerID + "&a",
|
||||
"/annnounce?" + "info_hash=" + testPeerID + "&=b?",
|
||||
}
|
||||
)
|
||||
|
||||
func mapArrayEqual(boxed map[string][]string, unboxed map[string]string) bool {
|
||||
|
@ -84,26 +90,40 @@ func TestParseInvalidURLData(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func BenchmarkParseQuery(b *testing.B) {
|
||||
for bCount := 0; bCount < b.N; bCount++ {
|
||||
for parseIndex, parseStr := range ValidAnnounceArguments {
|
||||
parsedQueryObj, err := parseQuery(parseStr.Encode())
|
||||
if err != nil {
|
||||
b.Error(err, parseIndex)
|
||||
b.Log(parsedQueryObj)
|
||||
func TestParseShouldNotPanicURLData(t *testing.T) {
|
||||
for _, parseStr := range shouldNotPanicQueries {
|
||||
_, _ = ParseURLData(parseStr)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkParseQuery(b *testing.B) {
|
||||
announceStrings := make([]string, 0)
|
||||
for i := range ValidAnnounceArguments {
|
||||
announceStrings = append(announceStrings, ValidAnnounceArguments[i].Encode())
|
||||
}
|
||||
b.ResetTimer()
|
||||
for bCount := 0; bCount < b.N; bCount++ {
|
||||
i := bCount % len(announceStrings)
|
||||
parsedQueryObj, err := parseQuery(announceStrings[i])
|
||||
if err != nil {
|
||||
b.Error(err, i)
|
||||
b.Log(parsedQueryObj)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkURLParseQuery(b *testing.B) {
|
||||
announceStrings := make([]string, 0)
|
||||
for i := range ValidAnnounceArguments {
|
||||
announceStrings = append(announceStrings, ValidAnnounceArguments[i].Encode())
|
||||
}
|
||||
b.ResetTimer()
|
||||
for bCount := 0; bCount < b.N; bCount++ {
|
||||
for parseIndex, parseStr := range ValidAnnounceArguments {
|
||||
parsedQueryObj, err := url.ParseQuery(parseStr.Encode())
|
||||
i := bCount % len(announceStrings)
|
||||
parsedQueryObj, err := url.ParseQuery(announceStrings[i])
|
||||
if err != nil {
|
||||
b.Error(err, parseIndex)
|
||||
b.Error(err, i)
|
||||
b.Log(parsedQueryObj)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
49
bittorrent/sanitize.go
Normal file
49
bittorrent/sanitize.go
Normal file
|
@ -0,0 +1,49 @@
|
|||
package bittorrent
|
||||
|
||||
import (
|
||||
"net"
|
||||
|
||||
"github.com/chihaya/chihaya/pkg/log"
|
||||
)
|
||||
|
||||
// ErrInvalidIP indicates an invalid IP for an Announce.
|
||||
var ErrInvalidIP = ClientError("invalid IP")
|
||||
|
||||
// SanitizeAnnounce enforces a max and default NumWant and coerces the peer's
|
||||
// IP address into the proper format.
|
||||
func SanitizeAnnounce(r *AnnounceRequest, maxNumWant, defaultNumWant uint32) error {
|
||||
|
||||
if !r.NumWantProvided {
|
||||
r.NumWant = defaultNumWant
|
||||
} else if r.NumWant > maxNumWant {
|
||||
r.NumWant = maxNumWant
|
||||
}
|
||||
|
||||
if ip := r.Peer.IP.To4(); ip != nil {
|
||||
r.Peer.IP.IP = ip
|
||||
r.Peer.IP.AddressFamily = IPv4
|
||||
} else if len(r.Peer.IP.IP) == net.IPv6len { // implies r.Peer.IP.To4() == nil
|
||||
r.Peer.IP.AddressFamily = IPv6
|
||||
} else {
|
||||
return ErrInvalidIP
|
||||
}
|
||||
|
||||
log.Debug("sanitized announce", r, log.Fields{
|
||||
"maxNumWant": maxNumWant,
|
||||
"defaultNumWant": defaultNumWant,
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
// SanitizeScrape enforces a max number of infohashes for a single scrape
|
||||
// request.
|
||||
func SanitizeScrape(r *ScrapeRequest, maxScrapeInfoHashes uint32) error {
|
||||
if len(r.InfoHashes) > int(maxScrapeInfoHashes) {
|
||||
r.InfoHashes = r.InfoHashes[:maxScrapeInfoHashes]
|
||||
}
|
||||
|
||||
log.Debug("sanitized scrape", r, log.Fields{
|
||||
"maxScrapeInfoHashes": maxScrapeInfoHashes,
|
||||
})
|
||||
return nil
|
||||
}
|
|
@ -5,32 +5,61 @@ import (
|
|||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"gopkg.in/yaml.v2"
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
|
||||
httpfrontend "github.com/chihaya/chihaya/frontend/http"
|
||||
udpfrontend "github.com/chihaya/chihaya/frontend/udp"
|
||||
"github.com/chihaya/chihaya/frontend/http"
|
||||
"github.com/chihaya/chihaya/frontend/udp"
|
||||
"github.com/chihaya/chihaya/middleware"
|
||||
"github.com/chihaya/chihaya/middleware/clientapproval"
|
||||
"github.com/chihaya/chihaya/middleware/jwt"
|
||||
"github.com/chihaya/chihaya/storage/memory"
|
||||
|
||||
// Imports to register middleware drivers.
|
||||
_ "github.com/chihaya/chihaya/middleware/clientapproval"
|
||||
_ "github.com/chihaya/chihaya/middleware/fixedpeer"
|
||||
_ "github.com/chihaya/chihaya/middleware/jwt"
|
||||
_ "github.com/chihaya/chihaya/middleware/torrentapproval"
|
||||
_ "github.com/chihaya/chihaya/middleware/varinterval"
|
||||
|
||||
// Imports to register storage drivers.
|
||||
_ "github.com/chihaya/chihaya/storage/memory"
|
||||
_ "github.com/chihaya/chihaya/storage/redis"
|
||||
)
|
||||
|
||||
type hookConfig struct {
|
||||
type storageConfig struct {
|
||||
Name string `yaml:"name"`
|
||||
Config interface{} `yaml:"config"`
|
||||
}
|
||||
|
||||
// Config represents the configuration used for executing Chihaya.
|
||||
type Config struct {
|
||||
middleware.ResponseConfig `yaml:",inline"`
|
||||
MetricsAddr string `yaml:"metrics_addr"`
|
||||
HTTPConfig http.Config `yaml:"http"`
|
||||
UDPConfig udp.Config `yaml:"udp"`
|
||||
Storage storageConfig `yaml:"storage"`
|
||||
PreHooks []middleware.HookConfig `yaml:"prehooks"`
|
||||
PostHooks []middleware.HookConfig `yaml:"posthooks"`
|
||||
}
|
||||
|
||||
// PreHookNames returns only the names of the configured middleware.
|
||||
func (cfg Config) PreHookNames() (names []string) {
|
||||
for _, hook := range cfg.PreHooks {
|
||||
names = append(names, hook.Name)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// PostHookNames returns only the names of the configured middleware.
|
||||
func (cfg Config) PostHookNames() (names []string) {
|
||||
for _, hook := range cfg.PostHooks {
|
||||
names = append(names, hook.Name)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// ConfigFile represents a namespaced YAML configation file.
|
||||
type ConfigFile struct {
|
||||
MainConfigBlock struct {
|
||||
middleware.Config `yaml:",inline"`
|
||||
PrometheusAddr string `yaml:"prometheus_addr"`
|
||||
HTTPConfig httpfrontend.Config `yaml:"http"`
|
||||
UDPConfig udpfrontend.Config `yaml:"udp"`
|
||||
Storage memory.Config `yaml:"storage"`
|
||||
PreHooks []hookConfig `yaml:"prehooks"`
|
||||
PostHooks []hookConfig `yaml:"posthooks"`
|
||||
} `yaml:"chihaya"`
|
||||
Chihaya Config `yaml:"chihaya"`
|
||||
}
|
||||
|
||||
// ParseConfigFile returns a new ConfigFile given the path to a YAML
|
||||
|
@ -61,46 +90,3 @@ func ParseConfigFile(path string) (*ConfigFile, error) {
|
|||
|
||||
return &cfgFile, nil
|
||||
}
|
||||
|
||||
// CreateHooks creates instances of Hooks for all of the PreHooks and PostHooks
|
||||
// configured in a ConfigFile.
|
||||
func (cfg ConfigFile) CreateHooks() (preHooks, postHooks []middleware.Hook, err error) {
|
||||
for _, hookCfg := range cfg.MainConfigBlock.PreHooks {
|
||||
cfgBytes, err := yaml.Marshal(hookCfg.Config)
|
||||
if err != nil {
|
||||
panic("failed to remarshal valid YAML")
|
||||
}
|
||||
|
||||
switch hookCfg.Name {
|
||||
case "jwt":
|
||||
var jwtCfg jwt.Config
|
||||
err := yaml.Unmarshal(cfgBytes, &jwtCfg)
|
||||
if err != nil {
|
||||
return nil, nil, errors.New("invalid JWT middleware config: " + err.Error())
|
||||
}
|
||||
hook, err := jwt.NewHook(jwtCfg)
|
||||
if err != nil {
|
||||
return nil, nil, errors.New("invalid JWT middleware config: " + err.Error())
|
||||
}
|
||||
preHooks = append(preHooks, hook)
|
||||
case "client approval":
|
||||
var caCfg clientapproval.Config
|
||||
err := yaml.Unmarshal(cfgBytes, &caCfg)
|
||||
if err != nil {
|
||||
return nil, nil, errors.New("invalid client approval middleware config: " + err.Error())
|
||||
}
|
||||
hook, err := clientapproval.NewHook(caCfg)
|
||||
if err != nil {
|
||||
return nil, nil, errors.New("invalid client approval middleware config: " + err.Error())
|
||||
}
|
||||
preHooks = append(preHooks, hook)
|
||||
}
|
||||
}
|
||||
|
||||
for _, hookCfg := range cfg.MainConfigBlock.PostHooks {
|
||||
switch hookCfg.Name {
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
|
134
cmd/chihaya/e2e.go
Normal file
134
cmd/chihaya/e2e.go
Normal file
|
@ -0,0 +1,134 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/anacrolix/torrent/tracker"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/chihaya/chihaya/bittorrent"
|
||||
"github.com/chihaya/chihaya/pkg/log"
|
||||
)
|
||||
|
||||
// EndToEndRunCmdFunc implements a Cobra command that runs the end-to-end test
|
||||
// suite for a Chihaya build.
|
||||
func EndToEndRunCmdFunc(cmd *cobra.Command, args []string) error {
|
||||
delay, err := cmd.Flags().GetDuration("delay")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Test the HTTP tracker
|
||||
httpAddr, err := cmd.Flags().GetString("httpaddr")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(httpAddr) != 0 {
|
||||
log.Info("testing HTTP...")
|
||||
err := test(httpAddr, delay)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Info("success")
|
||||
}
|
||||
|
||||
// Test the UDP tracker.
|
||||
udpAddr, err := cmd.Flags().GetString("udpaddr")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(udpAddr) != 0 {
|
||||
log.Info("testing UDP...")
|
||||
err := test(udpAddr, delay)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Info("success")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func generateInfohash() [20]byte {
|
||||
b := make([]byte, 20)
|
||||
|
||||
n, err := rand.Read(b)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if n != 20 {
|
||||
panic(fmt.Errorf("not enough randomness? Got %d bytes", n))
|
||||
}
|
||||
|
||||
return [20]byte(bittorrent.InfoHashFromBytes(b))
|
||||
}
|
||||
|
||||
func test(addr string, delay time.Duration) error {
|
||||
ih := generateInfohash()
|
||||
return testWithInfohash(ih, addr, delay)
|
||||
}
|
||||
|
||||
func testWithInfohash(infoHash [20]byte, url string, delay time.Duration) error {
|
||||
req := tracker.AnnounceRequest{
|
||||
InfoHash: infoHash,
|
||||
PeerId: [20]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20},
|
||||
Downloaded: 50,
|
||||
Left: 100,
|
||||
Uploaded: 50,
|
||||
Event: tracker.Started,
|
||||
IPAddress: uint32(50<<24 | 10<<16 | 12<<8 | 1),
|
||||
NumWant: 50,
|
||||
Port: 10001,
|
||||
}
|
||||
|
||||
resp, err := tracker.Announce{
|
||||
TrackerUrl: url,
|
||||
Request: req,
|
||||
UserAgent: "chihaya-e2e",
|
||||
}.Do()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "announce failed")
|
||||
}
|
||||
|
||||
if len(resp.Peers) != 1 {
|
||||
return fmt.Errorf("expected one peer, got %d", len(resp.Peers))
|
||||
}
|
||||
|
||||
time.Sleep(delay)
|
||||
|
||||
req = tracker.AnnounceRequest{
|
||||
InfoHash: infoHash,
|
||||
PeerId: [20]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21},
|
||||
Downloaded: 50,
|
||||
Left: 100,
|
||||
Uploaded: 50,
|
||||
Event: tracker.Started,
|
||||
IPAddress: uint32(50<<24 | 10<<16 | 12<<8 | 2),
|
||||
NumWant: 50,
|
||||
Port: 10002,
|
||||
}
|
||||
|
||||
resp, err = tracker.Announce{
|
||||
TrackerUrl: url,
|
||||
Request: req,
|
||||
UserAgent: "chihaya-e2e",
|
||||
}.Do()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "announce failed")
|
||||
}
|
||||
|
||||
if len(resp.Peers) != 1 {
|
||||
return fmt.Errorf("expected 1 peers, got %d", len(resp.Peers))
|
||||
}
|
||||
|
||||
if resp.Peers[0].Port != 10001 {
|
||||
return fmt.Errorf("expected port 10001, got %d ", resp.Peers[0].Port)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -1,166 +1,246 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
"runtime/pprof"
|
||||
"runtime"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
log "github.com/Sirupsen/logrus"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
httpfrontend "github.com/chihaya/chihaya/frontend/http"
|
||||
udpfrontend "github.com/chihaya/chihaya/frontend/udp"
|
||||
"github.com/chihaya/chihaya/frontend/http"
|
||||
"github.com/chihaya/chihaya/frontend/udp"
|
||||
"github.com/chihaya/chihaya/middleware"
|
||||
"github.com/chihaya/chihaya/storage/memory"
|
||||
"github.com/chihaya/chihaya/pkg/log"
|
||||
"github.com/chihaya/chihaya/pkg/metrics"
|
||||
"github.com/chihaya/chihaya/pkg/stop"
|
||||
"github.com/chihaya/chihaya/storage"
|
||||
)
|
||||
|
||||
func rootCmdRun(cmd *cobra.Command, args []string) error {
|
||||
debugLog, _ := cmd.Flags().GetBool("debug")
|
||||
if debugLog {
|
||||
log.SetLevel(log.DebugLevel)
|
||||
log.Debugln("debug logging enabled")
|
||||
}
|
||||
cpuProfilePath, _ := cmd.Flags().GetString("cpuprofile")
|
||||
if cpuProfilePath != "" {
|
||||
log.Infoln("enabled CPU profiling to", cpuProfilePath)
|
||||
f, err := os.Create(cpuProfilePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pprof.StartCPUProfile(f)
|
||||
defer pprof.StopCPUProfile()
|
||||
// Run represents the state of a running instance of Chihaya.
|
||||
type Run struct {
|
||||
configFilePath string
|
||||
peerStore storage.PeerStore
|
||||
logic *middleware.Logic
|
||||
sg *stop.Group
|
||||
}
|
||||
|
||||
// NewRun runs an instance of Chihaya.
|
||||
func NewRun(configFilePath string) (*Run, error) {
|
||||
r := &Run{
|
||||
configFilePath: configFilePath,
|
||||
}
|
||||
|
||||
configFilePath, _ := cmd.Flags().GetString("config")
|
||||
configFile, err := ParseConfigFile(configFilePath)
|
||||
return r, r.Start(nil)
|
||||
}
|
||||
|
||||
// Start begins an instance of Chihaya.
|
||||
// It is optional to provide an instance of the peer store to avoid the
|
||||
// creation of a new one.
|
||||
func (r *Run) Start(ps storage.PeerStore) error {
|
||||
configFile, err := ParseConfigFile(r.configFilePath)
|
||||
if err != nil {
|
||||
return errors.New("failed to read config: " + err.Error())
|
||||
}
|
||||
cfg := configFile.MainConfigBlock
|
||||
cfg := configFile.Chihaya
|
||||
|
||||
go func() {
|
||||
promServer := http.Server{
|
||||
Addr: cfg.PrometheusAddr,
|
||||
Handler: prometheus.Handler(),
|
||||
}
|
||||
log.Infoln("started serving prometheus stats on", cfg.PrometheusAddr)
|
||||
if err := promServer.ListenAndServe(); err != nil {
|
||||
log.Fatalln("failed to start prometheus server:", err.Error())
|
||||
}
|
||||
}()
|
||||
r.sg = stop.NewGroup()
|
||||
|
||||
// Force the compiler to enforce memory against the storage interface.
|
||||
peerStore, err := memory.New(cfg.Storage)
|
||||
log.Info("starting metrics server", log.Fields{"addr": cfg.MetricsAddr})
|
||||
r.sg.Add(metrics.NewServer(cfg.MetricsAddr))
|
||||
|
||||
if ps == nil {
|
||||
log.Info("starting storage", log.Fields{"name": cfg.Storage.Name})
|
||||
ps, err = storage.NewPeerStore(cfg.Storage.Name, cfg.Storage.Config)
|
||||
if err != nil {
|
||||
return errors.New("failed to create memory storage: " + err.Error())
|
||||
return errors.New("failed to create storage: " + err.Error())
|
||||
}
|
||||
log.Info("started storage", ps)
|
||||
}
|
||||
r.peerStore = ps
|
||||
|
||||
preHooks, postHooks, err := configFile.CreateHooks()
|
||||
preHooks, err := middleware.HooksFromHookConfigs(cfg.PreHooks)
|
||||
if err != nil {
|
||||
return errors.New("failed to create hooks: " + err.Error())
|
||||
return errors.New("failed to validate hook config: " + err.Error())
|
||||
}
|
||||
|
||||
logic := middleware.NewLogic(cfg.Config, peerStore, preHooks, postHooks)
|
||||
postHooks, err := middleware.HooksFromHookConfigs(cfg.PostHooks)
|
||||
if err != nil {
|
||||
return errors.New("failed to create TrackerLogic: " + err.Error())
|
||||
return errors.New("failed to validate hook config: " + err.Error())
|
||||
}
|
||||
|
||||
shutdown := make(chan struct{})
|
||||
errChan := make(chan error)
|
||||
|
||||
var httpFrontend *httpfrontend.Frontend
|
||||
var udpFrontend *udpfrontend.Frontend
|
||||
log.Info("starting tracker logic", log.Fields{
|
||||
"prehooks": cfg.PreHookNames(),
|
||||
"posthooks": cfg.PostHookNames(),
|
||||
})
|
||||
r.logic = middleware.NewLogic(cfg.ResponseConfig, r.peerStore, preHooks, postHooks)
|
||||
|
||||
if cfg.HTTPConfig.Addr != "" {
|
||||
httpFrontend = httpfrontend.NewFrontend(logic, cfg.HTTPConfig)
|
||||
|
||||
go func() {
|
||||
log.Infoln("started serving HTTP on", cfg.HTTPConfig.Addr)
|
||||
if err := httpFrontend.ListenAndServe(); err != nil {
|
||||
errChan <- errors.New("failed to cleanly shutdown HTTP frontend: " + err.Error())
|
||||
log.Info("starting HTTP frontend", cfg.HTTPConfig)
|
||||
httpfe, err := http.NewFrontend(r.logic, cfg.HTTPConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}()
|
||||
r.sg.Add(httpfe)
|
||||
}
|
||||
|
||||
if cfg.UDPConfig.Addr != "" {
|
||||
udpFrontend = udpfrontend.NewFrontend(logic, cfg.UDPConfig)
|
||||
|
||||
go func() {
|
||||
log.Infoln("started serving UDP on", cfg.UDPConfig.Addr)
|
||||
if err := udpFrontend.ListenAndServe(); err != nil {
|
||||
errChan <- errors.New("failed to cleanly shutdown UDP frontend: " + err.Error())
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
sigChan := make(chan os.Signal)
|
||||
signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM)
|
||||
go func() {
|
||||
select {
|
||||
case <-sigChan:
|
||||
case <-shutdown:
|
||||
}
|
||||
|
||||
if udpFrontend != nil {
|
||||
udpFrontend.Stop()
|
||||
}
|
||||
|
||||
if httpFrontend != nil {
|
||||
httpFrontend.Stop()
|
||||
}
|
||||
|
||||
for err := range peerStore.Stop() {
|
||||
log.Info("starting UDP frontend", cfg.UDPConfig)
|
||||
udpfe, err := udp.NewFrontend(r.logic, cfg.UDPConfig)
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
return err
|
||||
}
|
||||
r.sg.Add(udpfe)
|
||||
}
|
||||
|
||||
// Stop hooks.
|
||||
errs := logic.Stop()
|
||||
return nil
|
||||
}
|
||||
|
||||
func combineErrors(prefix string, errs []error) error {
|
||||
errStrs := make([]string, 0, len(errs))
|
||||
for _, err := range errs {
|
||||
errChan <- err
|
||||
errStrs = append(errStrs, err.Error())
|
||||
}
|
||||
|
||||
close(errChan)
|
||||
}()
|
||||
return errors.New(prefix + ": " + strings.Join(errStrs, "; "))
|
||||
}
|
||||
|
||||
closed := false
|
||||
var bufErr error
|
||||
for err = range errChan {
|
||||
// Stop shuts down an instance of Chihaya.
|
||||
func (r *Run) Stop(keepPeerStore bool) (storage.PeerStore, error) {
|
||||
log.Debug("stopping frontends and metrics server")
|
||||
if errs := r.sg.Stop().Wait(); len(errs) != 0 {
|
||||
return nil, combineErrors("failed while shutting down frontends", errs)
|
||||
}
|
||||
|
||||
log.Debug("stopping logic")
|
||||
if errs := r.logic.Stop().Wait(); len(errs) != 0 {
|
||||
return nil, combineErrors("failed while shutting down middleware", errs)
|
||||
}
|
||||
|
||||
if !keepPeerStore {
|
||||
log.Debug("stopping peer store")
|
||||
if errs := r.peerStore.Stop().Wait(); len(errs) != 0 {
|
||||
return nil, combineErrors("failed while shutting down peer store", errs)
|
||||
}
|
||||
r.peerStore = nil
|
||||
}
|
||||
|
||||
return r.peerStore, nil
|
||||
}
|
||||
|
||||
// RootRunCmdFunc implements a Cobra command that runs an instance of Chihaya
|
||||
// and handles reloading and shutdown via process signals.
|
||||
func RootRunCmdFunc(cmd *cobra.Command, args []string) error {
|
||||
configFilePath, err := cmd.Flags().GetString("config")
|
||||
if err != nil {
|
||||
if !closed {
|
||||
close(shutdown)
|
||||
closed = true
|
||||
} else {
|
||||
log.Infoln(bufErr)
|
||||
}
|
||||
bufErr = err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
return bufErr
|
||||
r, err := NewRun(configFilePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx, _ := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
|
||||
reload, _ := signal.NotifyContext(context.Background(), ReloadSignals...)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-reload.Done():
|
||||
log.Info("reloading; received reload signal")
|
||||
peerStore, err := r.Stop(true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := r.Start(peerStore); err != nil {
|
||||
return err
|
||||
}
|
||||
case <-ctx.Done():
|
||||
log.Info("shutting down; received shutdown signal")
|
||||
if _, err := r.Stop(false); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// RootPreRunCmdFunc handles command line flags for the Run command.
|
||||
func RootPreRunCmdFunc(cmd *cobra.Command, args []string) error {
|
||||
noColors, err := cmd.Flags().GetBool("nocolors")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if noColors {
|
||||
log.SetFormatter(&logrus.TextFormatter{DisableColors: true})
|
||||
}
|
||||
|
||||
jsonLog, err := cmd.Flags().GetBool("json")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if jsonLog {
|
||||
log.SetFormatter(&logrus.JSONFormatter{})
|
||||
log.Info("enabled JSON logging")
|
||||
}
|
||||
|
||||
debugLog, err := cmd.Flags().GetBool("debug")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if debugLog {
|
||||
log.SetDebug(true)
|
||||
log.Info("enabled debug logging")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RootPostRunCmdFunc handles clean up of any state initialized by command line
|
||||
// flags.
|
||||
func RootPostRunCmdFunc(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
var rootCmd = &cobra.Command{
|
||||
rootCmd := &cobra.Command{
|
||||
Use: "chihaya",
|
||||
Short: "BitTorrent Tracker",
|
||||
Long: "A customizible, multi-protocol BitTorrent Tracker",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if err := rootCmdRun(cmd, args); err != nil {
|
||||
log.Fatal(err)
|
||||
Long: "A customizable, multi-protocol BitTorrent Tracker",
|
||||
PersistentPreRunE: RootPreRunCmdFunc,
|
||||
RunE: RootRunCmdFunc,
|
||||
PersistentPostRunE: RootPostRunCmdFunc,
|
||||
}
|
||||
},
|
||||
|
||||
rootCmd.PersistentFlags().Bool("debug", false, "enable debug logging")
|
||||
rootCmd.PersistentFlags().Bool("json", false, "enable json logging")
|
||||
if runtime.GOOS == "windows" {
|
||||
rootCmd.PersistentFlags().Bool("nocolors", true, "disable log coloring")
|
||||
} else {
|
||||
rootCmd.PersistentFlags().Bool("nocolors", false, "disable log coloring")
|
||||
}
|
||||
|
||||
rootCmd.Flags().String("config", "/etc/chihaya.yaml", "location of configuration file")
|
||||
rootCmd.Flags().String("cpuprofile", "", "location to save a CPU profile")
|
||||
rootCmd.Flags().Bool("debug", false, "enable debug logging")
|
||||
|
||||
e2eCmd := &cobra.Command{
|
||||
Use: "e2e",
|
||||
Short: "exec e2e tests",
|
||||
Long: "Execute the Chihaya end-to-end test suite",
|
||||
RunE: EndToEndRunCmdFunc,
|
||||
}
|
||||
|
||||
e2eCmd.Flags().String("httpaddr", "http://127.0.0.1:6969/announce", "address of the HTTP tracker")
|
||||
e2eCmd.Flags().String("udpaddr", "udp://127.0.0.1:6969", "address of the UDP tracker")
|
||||
e2eCmd.Flags().Duration("delay", time.Second, "delay between announces")
|
||||
|
||||
rootCmd.AddCommand(e2eCmd)
|
||||
|
||||
if err := rootCmd.Execute(); err != nil {
|
||||
log.Fatal(err)
|
||||
log.Fatal("failed when executing root cobra command: " + err.Error())
|
||||
}
|
||||
}
|
||||
|
|
15
cmd/chihaya/signal_unix.go
Normal file
15
cmd/chihaya/signal_unix.go
Normal file
|
@ -0,0 +1,15 @@
|
|||
//go:build darwin || freebsd || linux || netbsd || openbsd || dragonfly || solaris
|
||||
// +build darwin freebsd linux netbsd openbsd dragonfly solaris
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// ReloadSignals are the signals that the current OS will send to the process
|
||||
// when a configuration reload is requested.
|
||||
var ReloadSignals = []os.Signal{
|
||||
syscall.SIGUSR1,
|
||||
}
|
14
cmd/chihaya/signal_windows.go
Normal file
14
cmd/chihaya/signal_windows.go
Normal file
|
@ -0,0 +1,14 @@
|
|||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
var ReloadSignals = []os.Signal{
|
||||
syscall.SIGHUP,
|
||||
}
|
197
dist/example_config.yaml
vendored
Normal file
197
dist/example_config.yaml
vendored
Normal file
|
@ -0,0 +1,197 @@
|
|||
---
|
||||
chihaya:
|
||||
# The interval communicated with BitTorrent clients informing them how
|
||||
# frequently they should announce in between client events.
|
||||
announce_interval: "30m"
|
||||
|
||||
# The interval communicated with BitTorrent clients informing them of the
|
||||
# minimal duration between announces.
|
||||
min_announce_interval: "15m"
|
||||
|
||||
# The network interface that will bind to an HTTP endpoint that can be
|
||||
# scraped by programs collecting metrics.
|
||||
#
|
||||
# /metrics serves metrics in the Prometheus format
|
||||
# /debug/pprof/{cmdline,profile,symbol,trace} serves profiles in the pprof format
|
||||
metrics_addr: "0.0.0.0:6880"
|
||||
|
||||
# This block defines configuration for the tracker's HTTP interface.
|
||||
# If you do not wish to run this, delete this section.
|
||||
http:
|
||||
# The network interface that will bind to an HTTP server for serving
|
||||
# BitTorrent traffic. Remove this to disable the non-TLS listener.
|
||||
addr: "0.0.0.0:6969"
|
||||
|
||||
# The network interface that will bind to an HTTPS server for serving
|
||||
# BitTorrent traffic. If set, tls_cert_path and tls_key_path are required.
|
||||
https_addr: ""
|
||||
|
||||
# The path to the required files to listen via HTTPS.
|
||||
tls_cert_path: ""
|
||||
tls_key_path: ""
|
||||
|
||||
# The timeout durations for HTTP requests.
|
||||
read_timeout: "5s"
|
||||
write_timeout: "5s"
|
||||
|
||||
# When true, persistent connections will be allowed. Generally this is not
|
||||
# useful for a public tracker, but helps performance in some cases (use of
|
||||
# a reverse proxy, or when there are few clients issuing many requests).
|
||||
enable_keepalive: false
|
||||
idle_timeout: "30s"
|
||||
|
||||
# Whether to time requests.
|
||||
# Disabling this should increase performance/decrease load.
|
||||
enable_request_timing: false
|
||||
|
||||
# An array of routes to listen on for announce requests. This is an option
|
||||
# to support trackers that do not listen for /announce or need to listen
|
||||
# on multiple routes.
|
||||
#
|
||||
# This supports named parameters and catch-all parameters as described at
|
||||
# https://github.com/julienschmidt/httprouter#named-parameters
|
||||
announce_routes:
|
||||
- "/announce"
|
||||
# - "/announce.php"
|
||||
|
||||
# An array of routes to listen on for scrape requests. This is an option
|
||||
# to support trackers that do not listen for /scrape or need to listen
|
||||
# on multiple routes.
|
||||
#
|
||||
# This supports named parameters and catch-all parameters as described at
|
||||
# https://github.com/julienschmidt/httprouter#named-parameters
|
||||
scrape_routes:
|
||||
- "/scrape"
|
||||
# - "/scrape.php"
|
||||
|
||||
# When enabled, the IP address used to connect to the tracker will not
|
||||
# override the value clients advertise as their IP address.
|
||||
allow_ip_spoofing: false
|
||||
|
||||
# The HTTP Header containing the IP address of the client.
|
||||
# This is only necessary if using a reverse proxy.
|
||||
real_ip_header: "x-real-ip"
|
||||
|
||||
# The maximum number of peers returned for an individual request.
|
||||
max_numwant: 100
|
||||
|
||||
# The default number of peers returned for an individual request.
|
||||
default_numwant: 50
|
||||
|
||||
# The maximum number of infohashes that can be scraped in one request.
|
||||
max_scrape_infohashes: 50
|
||||
|
||||
# This block defines configuration for the tracker's UDP interface.
|
||||
# If you do not wish to run this, delete this section.
|
||||
udp:
|
||||
# The network interface that will bind to a UDP server for serving
|
||||
# BitTorrent traffic.
|
||||
addr: "0.0.0.0:6969"
|
||||
|
||||
# The leeway for a timestamp on a connection ID.
|
||||
max_clock_skew: "10s"
|
||||
|
||||
# The key used to encrypt connection IDs.
|
||||
private_key: "paste a random string here that will be used to hmac connection IDs"
|
||||
|
||||
# Whether to time requests.
|
||||
# Disabling this should increase performance/decrease load.
|
||||
enable_request_timing: false
|
||||
|
||||
# When enabled, the IP address used to connect to the tracker will not
|
||||
# override the value clients advertise as their IP address.
|
||||
allow_ip_spoofing: false
|
||||
|
||||
# The maximum number of peers returned for an individual request.
|
||||
max_numwant: 100
|
||||
|
||||
# The default number of peers returned for an individual request.
|
||||
default_numwant: 50
|
||||
|
||||
# The maximum number of infohashes that can be scraped in one request.
|
||||
max_scrape_infohashes: 50
|
||||
|
||||
|
||||
# This block defines configuration used for the storage of peer data.
|
||||
storage:
|
||||
name: "memory"
|
||||
config:
|
||||
# The frequency which stale peers are removed.
|
||||
# This balances between
|
||||
# - collecting garbage more often, potentially using more CPU time, but potentially using less memory (lower value)
|
||||
# - collecting garbage less frequently, saving CPU time, but keeping old peers long, thus using more memory (higher value).
|
||||
gc_interval: "3m"
|
||||
|
||||
# The amount of time until a peer is considered stale.
|
||||
# To avoid churn, keep this slightly larger than `announce_interval`
|
||||
peer_lifetime: "31m"
|
||||
|
||||
# The number of partitions data will be divided into in order to provide a
|
||||
# higher degree of parallelism.
|
||||
shard_count: 1024
|
||||
|
||||
# The interval at which metrics about the number of infohashes and peers
|
||||
# are collected and posted to Prometheus.
|
||||
prometheus_reporting_interval: "1s"
|
||||
|
||||
# This block defines configuration used for redis storage.
|
||||
# storage:
|
||||
# name: redis
|
||||
# config:
|
||||
# # The frequency which stale peers are removed.
|
||||
# # This balances between
|
||||
# # - collecting garbage more often, potentially using more CPU time, but potentially using less memory (lower value)
|
||||
# # - collecting garbage less frequently, saving CPU time, but keeping old peers long, thus using more memory (higher value).
|
||||
# gc_interval: "3m"
|
||||
|
||||
# # The interval at which metrics about the number of infohashes and peers
|
||||
# # are collected and posted to Prometheus.
|
||||
# prometheus_reporting_interval: "1s"
|
||||
|
||||
# # The amount of time until a peer is considered stale.
|
||||
# # To avoid churn, keep this slightly larger than `announce_interval`
|
||||
# peer_lifetime: "31m"
|
||||
|
||||
# # The address of redis storage.
|
||||
# redis_broker: "redis://pwd@127.0.0.1:6379/0"
|
||||
|
||||
# # The timeout for reading a command reply from redis.
|
||||
# redis_read_timeout: "15s"
|
||||
|
||||
# # The timeout for writing a command to redis.
|
||||
# redis_write_timeout: "15s"
|
||||
|
||||
# # The timeout for connecting to redis server.
|
||||
# redis_connect_timeout: "15s"
|
||||
|
||||
# This block defines configuration used for middleware executed before a
|
||||
# response has been returned to a BitTorrent client.
|
||||
prehooks:
|
||||
# - name: "jwt"
|
||||
# options:
|
||||
# issuer: "https://issuer.com"
|
||||
# audience: "https://chihaya.issuer.com"
|
||||
# jwk_set_url: "https://issuer.com/keys"
|
||||
# jwk_set_update_interval: "5m"
|
||||
|
||||
# - name: "client approval"
|
||||
# options:
|
||||
# whitelist:
|
||||
# - "OP1011"
|
||||
# blacklist:
|
||||
# - "OP1012"
|
||||
|
||||
# - name: "interval variation"
|
||||
# options:
|
||||
# modify_response_probability: 0.2
|
||||
# max_increase_delta: 60
|
||||
# modify_min_interval: true
|
||||
|
||||
# This block defines configuration used for torrent approval, it requires to be given
|
||||
# hashes for whitelist or for blacklist. Hashes are hexadecimal-encoaded.
|
||||
# - name: "torrent approval"
|
||||
# options:
|
||||
# whitelist:
|
||||
# - "a1b2c3d4e5a1b2c3d4e5a1b2c3d4e5a1b2c3d4e5"
|
||||
# blacklist:
|
||||
# - "e1d2c3b4a5e1b2c3b4a5e1d2c3b4e5e1d2c3b4a5"
|
21
dist/helm/chihaya/.helmignore
vendored
Normal file
21
dist/helm/chihaya/.helmignore
vendored
Normal file
|
@ -0,0 +1,21 @@
|
|||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
10
dist/helm/chihaya/Chart.yaml
vendored
Normal file
10
dist/helm/chihaya/Chart.yaml
vendored
Normal file
|
@ -0,0 +1,10 @@
|
|||
apiVersion: v1
|
||||
name: chihaya
|
||||
home: https://chihaya.io
|
||||
version: 0.1.0
|
||||
description: A Helm chart for running the Chihaya BitTorrent tracker on Kubernetes.
|
||||
sources:
|
||||
- https://github.com/chihaya/chihaya
|
||||
maintainers:
|
||||
- name: Jimmy Zelinskie
|
||||
email: jimmyzelinskie@gmail.com
|
6
dist/helm/chihaya/templates/NOTES.txt
vendored
Normal file
6
dist/helm/chihaya/templates/NOTES.txt
vendored
Normal file
|
@ -0,0 +1,6 @@
|
|||
You can port forward a local port to Prometheus or the HTTP tracker by running:
|
||||
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "fullname" . }}" -o jsonpath="{.items[0].metadata.name}")
|
||||
# Metrics port
|
||||
kubectl port-forward $POD_NAME 8080:{{ $v := .Values.config.chihaya.metrics_addr | split ":" }}{{ $v._1 }}
|
||||
# HTTP tracker port
|
||||
kubectl port-forward $POD_NAME 8080:{{ $v := .Values.config.chihaya.http.addr | split ":" }}{{ $v._1 }}
|
16
dist/helm/chihaya/templates/_helpers.tpl
vendored
Normal file
16
dist/helm/chihaya/templates/_helpers.tpl
vendored
Normal file
|
@ -0,0 +1,16 @@
|
|||
{{/* vim: set filetype=mustache: */}}
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 24 -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 24 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
*/}}
|
||||
{{- define "fullname" -}}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 24 -}}
|
||||
{{- end -}}
|
9
dist/helm/chihaya/templates/configmap.yaml
vendored
Normal file
9
dist/helm/chihaya/templates/configmap.yaml
vendored
Normal file
|
@ -0,0 +1,9 @@
|
|||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ template "fullname" . }}
|
||||
labels:
|
||||
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
|
||||
data:
|
||||
config.yaml: |
|
||||
{{ toYaml .Values.config | indent 4 }}
|
43
dist/helm/chihaya/templates/deployment.yaml
vendored
Normal file
43
dist/helm/chihaya/templates/deployment.yaml
vendored
Normal file
|
@ -0,0 +1,43 @@
|
|||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ template "fullname" . }}
|
||||
labels:
|
||||
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
|
||||
spec:
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ template "fullname" . }}
|
||||
spec:
|
||||
volumes:
|
||||
- name: config
|
||||
configMap:
|
||||
name: {{ template "fullname" . }}
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
args:
|
||||
- "--config=/etc/chihaya/config.yaml"
|
||||
- "--debug"
|
||||
- "--json"
|
||||
ports:
|
||||
- name: bittorrent-http
|
||||
containerPort: {{ $v := .Values.config.chihaya.http.addr | split ":" }}{{ $v._1 }}
|
||||
protocol: TCP
|
||||
- name: bittorrent-udp
|
||||
containerPort: {{ $v := .Values.config.chihaya.udp.addr | split ":" }}{{ $v._1 }}
|
||||
protocol: UDP
|
||||
- name: metrics
|
||||
containerPort: {{ $v := .Values.config.chihaya.metrics_addr | split ":" }}{{ $v._1 }}
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: {{ $v := .Values.config.chihaya.metrics_addr | split ":" }}{{ $v._1 }}
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /etc/chihaya
|
||||
resources:
|
||||
{{ toYaml .Values.resources | indent 10 }}
|
27
dist/helm/chihaya/templates/service.yaml
vendored
Normal file
27
dist/helm/chihaya/templates/service.yaml
vendored
Normal file
|
@ -0,0 +1,27 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "fullname" . }}
|
||||
labels:
|
||||
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
prometheus.io/path: "/metrics"
|
||||
prometheus.io/port: {{ $v := .Values.config.chihaya.metrics_addr | split ":" }}{{ $v._1 | quote }}
|
||||
spec:
|
||||
type: {{ .Values.service.type }}
|
||||
ports:
|
||||
- name: bittorrent-http
|
||||
port: {{ $v := .Values.config.chihaya.http.addr | split ":" }}{{ $v._1 }}
|
||||
targetPort: {{ $v := .Values.config.chihaya.http.addr | split ":" }}{{ $v._1 }}
|
||||
protocol: TCP
|
||||
- name: bittorrent-udp
|
||||
port: {{ $v := .Values.config.chihaya.udp.addr | split ":" }}{{ $v._1 }}
|
||||
targetPort: {{ $v := .Values.config.chihaya.udp.addr | split ":" }}{{ $v._1 }}
|
||||
protocol: UDP
|
||||
- name: metrics
|
||||
port: {{ $v := .Values.config.chihaya.metrics_addr | split ":" }}{{ $v._1 }}
|
||||
targetPort: {{ $v := .Values.config.chihaya.metrics_addr | split ":" }}{{ $v._1 }}
|
||||
protocol: TCP
|
||||
selector:
|
||||
app: {{ template "fullname" . }}
|
162
dist/helm/chihaya/values.yaml
vendored
Normal file
162
dist/helm/chihaya/values.yaml
vendored
Normal file
|
@ -0,0 +1,162 @@
|
|||
replicaCount: 1
|
||||
image:
|
||||
repository: quay.io/jzelinskie/chihaya-git
|
||||
tag: latest
|
||||
pullPolicy: IfNotPresent
|
||||
service:
|
||||
name: chihaya
|
||||
type: ClusterIP
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 1Gi
|
||||
config:
|
||||
chihaya:
|
||||
# The interval communicated with BitTorrent clients informing them how
|
||||
# frequently they should announce in between client events.
|
||||
announce_interval: 30m
|
||||
|
||||
# The interval communicated with BitTorrent clients informing them of the
|
||||
# minimal duration between announces.
|
||||
min_announce_interval: 15m
|
||||
|
||||
# The network interface that will bind to an HTTP endpoint that can be
|
||||
# scraped by programs collecting metrics.
|
||||
#
|
||||
# /metrics serves metrics in the Prometheus format
|
||||
# /debug/pprof/{cmdline,profile,symbol,trace} serves profiles in the pprof format
|
||||
metrics_addr: "0.0.0.0:6880"
|
||||
|
||||
# The maximum number of peers returned in an announce.
|
||||
max_numwant: 50
|
||||
|
||||
# The default number of peers returned in an announce.
|
||||
default_numwant: 25
|
||||
|
||||
# The number of infohashes a single scrape can request before being truncated.
|
||||
max_scrape_infohashes: 50
|
||||
|
||||
# This block defines configuration for the tracker's HTTP interface.
|
||||
# If you do not wish to run this, delete this section.
|
||||
http:
|
||||
# The network interface that will bind to an HTTP server for serving
|
||||
# BitTorrent traffic.
|
||||
addr: "0.0.0.0:6969"
|
||||
|
||||
# The path to the required files to listen via HTTPS.
|
||||
tls_cert_path: ""
|
||||
tls_key_path: ""
|
||||
|
||||
# The timeout durations for HTTP requests.
|
||||
read_timeout: 5s
|
||||
write_timeout: 5s
|
||||
|
||||
# Whether to time requests.
|
||||
# Disabling this should increase performance/decrease load.
|
||||
enable_request_timing: false
|
||||
|
||||
# When true, persistent connections will be allowed. Generally this is not
|
||||
# useful for a public tracker, but helps performance in some cases (use of
|
||||
# a reverse proxy, or when there are few clients issuing many requests).
|
||||
enable_keepalive: false
|
||||
idle_timeout: 30s
|
||||
|
||||
# Whether to listen on /announce.php and /scrape.php in addition to their
|
||||
# non-.php counterparts.
|
||||
# This is an option for compatibility with (very) old clients or otherwise
|
||||
# outdated systems.
|
||||
# This might be useful to retracker.local users, for more information see
|
||||
# http://rutracker.wiki/Оптимизация_обмена_битторрент_траффиком_в_локальных_сетях
|
||||
# and
|
||||
# http://rutracker.wiki/Retracker.local
|
||||
enable_legacy_php_urls: false
|
||||
|
||||
# When enabled, the IP address used to connect to the tracker will not
|
||||
# override the value clients advertise as their IP address.
|
||||
allow_ip_spoofing: false
|
||||
|
||||
# The HTTP Header containing the IP address of the client.
|
||||
# This is only necessary if using a reverse proxy.
|
||||
real_ip_header: "x-real-ip"
|
||||
|
||||
# The maximum number of peers returned for an individual request.
|
||||
max_numwant: 100
|
||||
|
||||
# The default number of peers returned for an individual request.
|
||||
default_numwant: 50
|
||||
|
||||
# The maximum number of infohashes that can be scraped in one request.
|
||||
max_scrape_infohashes: 50
|
||||
|
||||
# This block defines configuration for the tracker's UDP interface.
|
||||
# If you do not wish to run this, delete this section.
|
||||
udp:
|
||||
# The network interface that will bind to a UDP server for serving
|
||||
# BitTorrent traffic.
|
||||
addr: "0.0.0.0:6969"
|
||||
|
||||
# The leeway for a timestamp on a connection ID.
|
||||
max_clock_skew: 10s
|
||||
|
||||
# The key used to encrypt connection IDs.
|
||||
private_key: "paste a random string here that will be used to hmac connection IDs"
|
||||
|
||||
# Whether to time requests.
|
||||
# Disabling this should increase performance/decrease load.
|
||||
enable_request_timing: false
|
||||
|
||||
# When enabled, the IP address used to connect to the tracker will not
|
||||
# override the value clients advertise as their IP address.
|
||||
allow_ip_spoofing: false
|
||||
|
||||
# The maximum number of peers returned for an individual request.
|
||||
max_numwant: 100
|
||||
|
||||
# The default number of peers returned for an individual request.
|
||||
default_numwant: 50
|
||||
|
||||
# The maximum number of infohashes that can be scraped in one request.
|
||||
max_scrape_infohashes: 50
|
||||
|
||||
|
||||
# This block defines configuration used for the storage of peer data.
|
||||
storage:
|
||||
name: memory
|
||||
config:
|
||||
# The frequency which stale peers are removed.
|
||||
gc_interval: 3m
|
||||
|
||||
# The amount of time until a peer is considered stale.
|
||||
# To avoid churn, keep this slightly larger than `announce_interval`
|
||||
peer_lifetime: 31m
|
||||
|
||||
# The number of partitions data will be divided into in order to provide a
|
||||
# higher degree of parallelism.
|
||||
shard_count: 1024
|
||||
|
||||
# The interval at which metrics about the number of infohashes and peers
|
||||
# are collected and posted to Prometheus.
|
||||
prometheus_reporting_interval: 1s
|
||||
|
||||
# This block defines configuration used for middleware executed before a
|
||||
# response has been returned to a BitTorrent client.
|
||||
prehooks:
|
||||
#- name: jwt
|
||||
# options:
|
||||
# issuer: "https://issuer.com"
|
||||
# audience: "https://chihaya.issuer.com"
|
||||
# jwk_set_url: "https://issuer.com/keys"
|
||||
# jwk_set_update_interval: 5m
|
||||
|
||||
#- name: client approval
|
||||
# options:
|
||||
# whitelist:
|
||||
# - "OP1011"
|
||||
# blacklist:
|
||||
# - "OP1012"
|
||||
|
||||
#- name: interval variation
|
||||
# options:
|
||||
# modify_response_probability: 0.2
|
||||
# max_increase_delta: 60
|
||||
# modify_min_interval: true
|
12
dist/prometheus/prometheus.yaml
vendored
Normal file
12
dist/prometheus/prometheus.yaml
vendored
Normal file
|
@ -0,0 +1,12 @@
|
|||
---
|
||||
global:
|
||||
scrape_interval: "5s"
|
||||
evaluation_interval: "5s"
|
||||
|
||||
# A scrape configuration containing exactly one endpoint to scrape:
|
||||
scrape_configs:
|
||||
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
|
||||
- job_name: "local-chihaya" # you can name this however you want
|
||||
scrape_interval: "5s" # optionally override the global scrape_interval
|
||||
static_configs:
|
||||
- targets: ["localhost:6881"] # provide the address of chihaya's prometheus endpoint
|
36
docs/architecture.dot
Normal file
36
docs/architecture.dot
Normal file
|
@ -0,0 +1,36 @@
|
|||
digraph G {
|
||||
subgraph cluster_0 {
|
||||
label = "chihaya";
|
||||
style = "line";
|
||||
color = "blue";
|
||||
|
||||
"Storage";
|
||||
|
||||
subgraph cluster_1 {
|
||||
label = "frontend";
|
||||
style = "line";
|
||||
color = "hotpink";
|
||||
|
||||
"Parser";
|
||||
"Writer";
|
||||
}
|
||||
|
||||
subgraph cluster_2 {
|
||||
label = "logic";
|
||||
style = "line";
|
||||
color = "purple";
|
||||
|
||||
"PreHook Middleware";
|
||||
"PostHook Middleware";
|
||||
"Response Generator";
|
||||
}
|
||||
}
|
||||
|
||||
"BitTorrent Client" -> "Parser";
|
||||
"Parser" -> "PreHook Middleware";
|
||||
"PreHook Middleware" -> "Response Generator";
|
||||
"PostHook Middleware" -> "Storage";
|
||||
"Storage" -> "Response Generator";
|
||||
"Response Generator" -> "Writer";
|
||||
"Writer" -> "BitTorrent Client";
|
||||
}
|
16
docs/architecture.md
Normal file
16
docs/architecture.md
Normal file
|
@ -0,0 +1,16 @@
|
|||
# Architecture
|
||||
|
||||
## Overview
|
||||
|
||||
BitTorrent clients send Announce and Scrape requests to a _Frontend_.
|
||||
Frontends parse requests and write responses for the particular protocol they implement.
|
||||
The _TrackerLogic_ interface is used to generate responses for requests and optionally perform a task after responding to a client.
|
||||
A configurable chain of _PreHook_ and _PostHook_ middleware is used to construct an instance of TrackerLogic.
|
||||
PreHooks are middleware that are executed before the response has been written.
|
||||
After all PreHooks have executed, any missing response fields that are required are filled by reading out of the configured implementation of the _Storage_ interface.
|
||||
PostHooks are asynchronous tasks that occur after a response has been delivered to the client.
|
||||
Because they are unnecessary to for generating a response, updates to the Storage for a particular request are done asynchronously in a PostHook.
|
||||
|
||||
## Diagram
|
||||
|
||||

|
111
docs/frontend.md
Normal file
111
docs/frontend.md
Normal file
|
@ -0,0 +1,111 @@
|
|||
# Frontends
|
||||
|
||||
A _Frontend_ is a component of Chihaya that serves a BitTorrent tracker on one protocol.
|
||||
The frontend accepts, parses and sanitizes requests, passes them to the _Logic_ and writes responses to _Clients_.
|
||||
|
||||
This documentation first gives a high-level overview of Frontends and later goes into implementation specifics.
|
||||
Users of Chihaya are expected to just read the first part - developers should read both.
|
||||
|
||||
## Functionality
|
||||
|
||||
A Frontend serves one protocol, for example HTTP ([BEP 3]) or UDP ([BEP 15]).
|
||||
It listens for requests and usually answers each of them with one response, a basic overview of the control flow is:
|
||||
|
||||
1. Read the request.
|
||||
2. Parse the request.
|
||||
3. Have the Logic handle the request. This calls a series of `PreHooks`.
|
||||
4. Send a response to the Client.
|
||||
5. Process the request and response through `PostHooks`.
|
||||
|
||||
## Available Frontends
|
||||
|
||||
Chihaya ships with frontends for HTTP(S) and UDP.
|
||||
The HTTP frontend uses Go's `http` package.
|
||||
The UDP frontend implements both [old-opentracker-style] IPv6 and the IPv6 support specified in [BEP 15].
|
||||
The advantage of the old opentracker style is that it contains a usable IPv6 `ip` field, to enable IP overrides in announces.
|
||||
|
||||
## Implementing a Frontend
|
||||
|
||||
This part is intended for developers.
|
||||
|
||||
### Implementation Specifics
|
||||
|
||||
A frontend should serve only one protocol.
|
||||
It may serve that protocol on multiple transports or networks, if applicable.
|
||||
An example of that is the `http` Frontend, operating both on HTTP and HTTPS.
|
||||
|
||||
The typical control flow of handling announces, in more detail, is:
|
||||
|
||||
1. Read the request.
|
||||
2. Parse the request, if invalid go to 9.
|
||||
3. Validate/sanitize the request, if invalid go to 9.
|
||||
4. If the request is protocol-specific, handle, respond, and go to 8.
|
||||
5. Pass the request to the `TrackerLogic`'s `HandleAnnounce` or `HandleScrape` method, if an error is returned go to 9.
|
||||
6. Send the response to the Client.
|
||||
7. Pass the request and response to the `TrackerLogic`'s `AfterAnnounce` or `AfterScrape` method.
|
||||
8. Finish, accept next request.
|
||||
9. For invalid requests or errors during processing: Send an error response to the client.
|
||||
This step may be skipped for suspected denial-of-service attacks.
|
||||
The error response may contain information about the cause of the error.
|
||||
Only errors where the Client is at fault should be explained, internal server errors should be returned without explanation.
|
||||
Then finish, and accept the next request.
|
||||
|
||||
#### Configuration
|
||||
|
||||
The frontend must be configurable using a single, exported struct.
|
||||
The struct must have YAML annotations.
|
||||
The struct must implement `log.Fielder` to be logged on startup.
|
||||
|
||||
#### Metrics
|
||||
|
||||
Frontends may provide runtime metrics, such as the number of requests or their duration.
|
||||
Metrics must be reported using [Prometheus].
|
||||
|
||||
A frontend should provide at least the following metrics:
|
||||
|
||||
- The number of valid and invalid requests handled
|
||||
- The average time it takes to handle a single request.
|
||||
This request timing should be made optional using a config entry.
|
||||
|
||||
Requests should be separated by type, i.e. Scrapes, Announces, and other protocol-specific requests.
|
||||
If the frontend serves multiple transports or networks, metrics for them should be separable.
|
||||
|
||||
It is recommended to publish one Prometheus `HistogramVec` with:
|
||||
|
||||
- A name like `chihaya_PROTOCOL_response_duration_milliseconds`
|
||||
- A value holding the duration in milliseconds of the reported request
|
||||
- Labels for:
|
||||
- `action` (= `announce`, `scrape`, ...)
|
||||
- `address_family` (= `Unknown`, `IPv4`, `IPv6`, ...), if applicable
|
||||
- `error` (= A textual representation of the error encountered during processing.)
|
||||
Because `error` is expected to hold the textual representation of any error that occurred during the request, great care must be taken to ensure all error messages are static.
|
||||
`error` must not contain any information directly taken from the request, e.g. the value of an invalid parameter.
|
||||
This would cause this dimension of prometheus to explode, which slows down prometheus clients and reporters.
|
||||
|
||||
#### Error Handling
|
||||
|
||||
Frontends should return `bittorrent.ClientError`s to the Client.
|
||||
Frontends must not return errors that are not a `bittorrent.ClientError` to the Client.
|
||||
A message like `internal server error` should be used instead.
|
||||
|
||||
#### Request Sanitization
|
||||
|
||||
The `TrackerLogic` expects sanitized requests in order to function properly.
|
||||
|
||||
The `bittorrent` package provides the `SanitizeAnnounce` and `SanitizeScrape` functions to sanitize Announces and Scrapes, respectively.
|
||||
This is the minimal required sanitization, every `AnnounceRequest` and `ScrapeRequest` must be sanitized this way.
|
||||
|
||||
Note that the `AnnounceRequest` struct contains booleans of the form `XProvided`, where `X` denotes an optional parameter of the BitTorrent protocol.
|
||||
These should be set according to the values received by the Client.
|
||||
|
||||
#### Contexts
|
||||
|
||||
All methods of the `TrackerLogic` interface expect a `context.Context` as a parameter.
|
||||
After a request is handled by `HandleAnnounce` without errors, the populated context returned must be used to call `AfterAnnounce`.
|
||||
The same applies to Scrapes.
|
||||
This way, a PreHook can communicate with a PostHook by setting a context value.
|
||||
|
||||
[BEP 3]: http://bittorrent.org/beps/bep_0003.html
|
||||
[BEP 15]: http://bittorrent.org/beps/bep_0015.html
|
||||
[Prometheus]: https://prometheus.io/
|
||||
[old-opentracker-style]: https://web.archive.org/web/20170503181830/http://opentracker.blog.h3q.com/2007/12/28/the-ipv6-situation/
|
35
docs/middleware/interval_variation.md
Normal file
35
docs/middleware/interval_variation.md
Normal file
|
@ -0,0 +1,35 @@
|
|||
# Announce Interval Variation Middleware
|
||||
|
||||
This package provides the announce middleware `interval variation` which randomizes the announce interval.
|
||||
|
||||
## Functionality
|
||||
|
||||
This middleware chooses random announces and modifies the `interval` and `min_interval` fields.
|
||||
A random number of seconds are added to the `interval` field and, if desired, also to the `min_interval` field.
|
||||
|
||||
Note that if a response is picked for modification and `min_interval` should be changed as well, both `interval` and `min_interval` are modified by the same amount.
|
||||
|
||||
## Use Case
|
||||
|
||||
Use this middleware to avoid recurring load spikes on the tracker.
|
||||
By randomizing the announce interval, load spikes will flatten out after a few announce cycles.
|
||||
|
||||
## Configuration
|
||||
|
||||
This middleware provides the following parameters for configuration:
|
||||
|
||||
- `modify_response_probability` (float, >0, <= 1) indicates the probability by which a response will be chosen to have its announce intervals modified.
|
||||
- `max_increase_delta` (int, >0) sets an upper boundary (inclusive) for the amount of seconds added.
|
||||
- `modify_min_interval` (boolean) whether to modify the `min_interval` field as well.
|
||||
|
||||
An example config might look like this:
|
||||
|
||||
```yaml
|
||||
chihaya:
|
||||
prehooks:
|
||||
- name: interval variation
|
||||
config:
|
||||
modify_response_probability: 0.2
|
||||
max_increase_delta: 60
|
||||
modify_min_interval: true
|
||||
```
|
86
docs/storage/redis.md
Normal file
86
docs/storage/redis.md
Normal file
|
@ -0,0 +1,86 @@
|
|||
# Redis Storage
|
||||
|
||||
This storage implementation separates Chihaya from its storage service.
|
||||
Chihaya achieves HA by storing all peer data in Redis.
|
||||
Multiple instances of Chihaya can use the same redis instance concurrently.
|
||||
The storage service can get HA by clustering.
|
||||
If one instance of Chihaya goes down, peer data will still be available in Redis.
|
||||
|
||||
The HA of storage service is not considered here.
|
||||
In case Redis runs as a single node, peer data will be unavailable if the node is down.
|
||||
You should consider setting up a Redis cluster for Chihaya in production.
|
||||
|
||||
This storage implementation is currently orders of magnitude slower than the in-memory implementation.
|
||||
|
||||
## Use Case
|
||||
|
||||
When one instance of Chihaya is down, other instances can continue serving peers from Redis.
|
||||
|
||||
## Configuration
|
||||
|
||||
```yaml
|
||||
chihaya:
|
||||
storage:
|
||||
name: redis
|
||||
config:
|
||||
# The frequency which stale peers are removed.
|
||||
# This balances between
|
||||
# - collecting garbage more often, potentially using more CPU time, but potentially using less memory (lower value)
|
||||
# - collecting garbage less frequently, saving CPU time, but keeping old peers long, thus using more memory (higher value).
|
||||
gc_interval: 3m
|
||||
|
||||
# The interval at which metrics about the number of infohashes and peers
|
||||
# are collected and posted to Prometheus.
|
||||
prometheus_reporting_interval: 1s
|
||||
|
||||
# The amount of time until a peer is considered stale.
|
||||
# To avoid churn, keep this slightly larger than `announce_interval`
|
||||
peer_lifetime: 31m
|
||||
|
||||
# The address of redis storage.
|
||||
redis_broker: "redis://pwd@127.0.0.1:6379/0"
|
||||
|
||||
# The timeout for reading a command reply from redis.
|
||||
redis_read_timeout: 15s
|
||||
|
||||
# The timeout for writing a command to redis.
|
||||
redis_write_timeout: 15s
|
||||
|
||||
# The timeout for connecting to redis server.
|
||||
redis_connect_timeout: 15s
|
||||
```
|
||||
|
||||
## Implementation
|
||||
|
||||
Seeders and Leechers for a particular InfoHash are stored within a redis hash.
|
||||
The InfoHash is used as key, _peer keys_ are the fields, last modified times are values.
|
||||
Peer keys are derived from peers and contain Peer ID, IP, and Port.
|
||||
All the InfoHashes (swarms) are also stored in a redis hash, with IP family as the key, infohash as field, and last modified time as value.
|
||||
|
||||
Here is an example:
|
||||
|
||||
```yaml
|
||||
- IPv4
|
||||
- IPv4_S_<infohash 1>: <modification time>
|
||||
- IPv4_L_<infohash 1>: <modification time>
|
||||
- IPv4_S_<infohash 2>: <modification time>
|
||||
- IPv4_S_<infohash 1>
|
||||
- <peer 1 key>: <modification time>
|
||||
- <peer 2 key>: <modification time>
|
||||
- IPv4_L_<infohash 1>
|
||||
- <peer 3 key>: <modification time>
|
||||
- IPv4_S_<infohash 2>
|
||||
- <peer 3 key>: <modification time>
|
||||
```
|
||||
|
||||
In this case, prometheus would record two swarms, three seeders, and one leecher.
|
||||
These three keys per address family are used to record the count of swarms, seeders, and leechers.
|
||||
|
||||
```yaml
|
||||
- IPv4_infohash_count: 2
|
||||
- IPv4_S_count: 3
|
||||
- IPv4_L_count: 1
|
||||
```
|
||||
|
||||
Note: `IPv4_infohash_count` has a different meaning compared to the `memory` storage:
|
||||
It represents the number of infohashes reported by seeder, meaning that infohashes without seeders are not counted.
|
|
@ -1,43 +0,0 @@
|
|||
chihaya:
|
||||
announce_interval: 15m
|
||||
prometheus_addr: localhost:6880
|
||||
|
||||
http:
|
||||
addr: 0.0.0.0:6881
|
||||
allow_ip_spoofing: false
|
||||
real_ip_header: x-real-ip
|
||||
read_timeout: 5s
|
||||
write_timeout: 5s
|
||||
request_timeout: 5s
|
||||
|
||||
udp:
|
||||
addr: 0.0.0.0:6881
|
||||
allow_ip_spoofing: false
|
||||
max_clock_skew: 10s
|
||||
private_key: |
|
||||
paste a random string here that will be used to hmac connection IDs
|
||||
|
||||
storage:
|
||||
gc_interval: 14m
|
||||
peer_lifetime: 15m
|
||||
shards: 1
|
||||
max_numwant: 100
|
||||
|
||||
prehooks:
|
||||
- name: jwt
|
||||
config:
|
||||
issuer: https://issuer.com
|
||||
audience: https://chihaya.issuer.com
|
||||
jwk_set_url: https://issuer.com/keys
|
||||
jwk_set_update_interval: 5m
|
||||
- name: client approval
|
||||
config:
|
||||
whitelist:
|
||||
- OP1011
|
||||
blacklist:
|
||||
- OP1012
|
||||
|
||||
posthooks:
|
||||
- name: gossip
|
||||
config:
|
||||
boostrap_node: 127.0.0.1:6881
|
|
@ -11,14 +11,20 @@ import (
|
|||
// after the response has been delivered to the client.
|
||||
type TrackerLogic interface {
|
||||
// HandleAnnounce generates a response for an Announce.
|
||||
HandleAnnounce(context.Context, *bittorrent.AnnounceRequest) (*bittorrent.AnnounceResponse, error)
|
||||
//
|
||||
// Returns the updated context, the generated AnnounceResponse and no error
|
||||
// on success; nil and error on failure.
|
||||
HandleAnnounce(context.Context, *bittorrent.AnnounceRequest) (context.Context, *bittorrent.AnnounceResponse, error)
|
||||
|
||||
// AfterAnnounce does something with the results of an Announce after it
|
||||
// has been completed.
|
||||
AfterAnnounce(context.Context, *bittorrent.AnnounceRequest, *bittorrent.AnnounceResponse)
|
||||
|
||||
// HandleScrape generates a response for a Scrape.
|
||||
HandleScrape(context.Context, *bittorrent.ScrapeRequest) (*bittorrent.ScrapeResponse, error)
|
||||
//
|
||||
// Returns the updated context, the generated AnnounceResponse and no error
|
||||
// on success; nil and error on failure.
|
||||
HandleScrape(context.Context, *bittorrent.ScrapeRequest) (context.Context, *bittorrent.ScrapeResponse, error)
|
||||
|
||||
// AfterScrape does something with the results of a Scrape after it has been completed.
|
||||
AfterScrape(context.Context, *bittorrent.ScrapeRequest, *bittorrent.ScrapeResponse)
|
||||
|
|
|
@ -3,7 +3,7 @@ package bencode
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var unmarshalTests = []struct {
|
||||
|
@ -24,9 +24,11 @@ var unmarshalTests = []struct {
|
|||
|
||||
func TestUnmarshal(t *testing.T) {
|
||||
for _, tt := range unmarshalTests {
|
||||
t.Run(tt.input, func(t *testing.T) {
|
||||
got, err := Unmarshal([]byte(tt.input))
|
||||
assert.Nil(t, err, "unmarshal should not fail")
|
||||
assert.Equal(t, got, tt.expected, "unmarshalled values should match the expected results")
|
||||
require.Nil(t, err, "unmarshal should not fail")
|
||||
require.Equal(t, got, tt.expected, "unmarshalled values should match the expected results")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -44,8 +46,8 @@ func BenchmarkUnmarshalScalar(b *testing.B) {
|
|||
d2 := NewDecoder(&bufferLoop{"i42e"})
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
d1.Decode()
|
||||
d2.Decode()
|
||||
_, _ = d1.Decode()
|
||||
_, _ = d2.Decode()
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -61,8 +63,8 @@ func TestUnmarshalLarge(t *testing.T) {
|
|||
dec := NewDecoder(&bufferLoop{string(buf)})
|
||||
|
||||
got, err := dec.Decode()
|
||||
assert.Nil(t, err, "decode should not fail")
|
||||
assert.Equal(t, got, data, "encoding and decoding should equal the original value")
|
||||
require.Nil(t, err, "decode should not fail")
|
||||
require.Equal(t, got, data, "encoding and decoding should equal the original value")
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshalLarge(b *testing.B) {
|
||||
|
@ -77,6 +79,6 @@ func BenchmarkUnmarshalLarge(b *testing.B) {
|
|||
dec := NewDecoder(&bufferLoop{string(buf)})
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
dec.Decode()
|
||||
_, _ = dec.Decode()
|
||||
}
|
||||
}
|
||||
|
|
|
@ -66,7 +66,7 @@ func marshal(w io.Writer, data interface{}) (err error) {
|
|||
err = marshalInt(w, int64(v))
|
||||
|
||||
case int64:
|
||||
err = marshalInt(w, int64(v))
|
||||
err = marshalInt(w, v)
|
||||
|
||||
case uint:
|
||||
err = marshalUint(w, uint64(v))
|
||||
|
@ -78,7 +78,7 @@ func marshal(w io.Writer, data interface{}) (err error) {
|
|||
err = marshalUint(w, uint64(v))
|
||||
|
||||
case uint64:
|
||||
err = marshalUint(w, uint64(v))
|
||||
err = marshalUint(w, v)
|
||||
|
||||
case time.Duration: // Assume seconds
|
||||
err = marshalInt(w, int64(v/time.Second))
|
||||
|
@ -90,7 +90,7 @@ func marshal(w io.Writer, data interface{}) (err error) {
|
|||
err = marshalList(w, v)
|
||||
|
||||
case []Dict:
|
||||
var interfaceSlice = make([]interface{}, len(v))
|
||||
interfaceSlice := make([]interface{}, len(v))
|
||||
for i, d := range v {
|
||||
interfaceSlice[i] = d
|
||||
}
|
||||
|
|
|
@ -2,10 +2,11 @@ package bencode
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var marshalTests = []struct {
|
||||
|
@ -35,10 +36,12 @@ var marshalTests = []struct {
|
|||
}
|
||||
|
||||
func TestMarshal(t *testing.T) {
|
||||
for _, test := range marshalTests {
|
||||
got, err := Marshal(test.input)
|
||||
assert.Nil(t, err, "marshal should not fail")
|
||||
assert.Contains(t, test.expected, string(got), "the marshaled result should be one of the expected permutations")
|
||||
for _, tt := range marshalTests {
|
||||
t.Run(fmt.Sprintf("%#v", tt.input), func(t *testing.T) {
|
||||
got, err := Marshal(tt.input)
|
||||
require.Nil(t, err, "marshal should not fail")
|
||||
require.Contains(t, tt.expected, string(got), "the marshaled result should be one of the expected permutations")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -47,8 +50,8 @@ func BenchmarkMarshalScalar(b *testing.B) {
|
|||
encoder := NewEncoder(buf)
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
encoder.Encode("test")
|
||||
encoder.Encode(123)
|
||||
_ = encoder.Encode("test")
|
||||
_ = encoder.Encode(123)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -64,6 +67,6 @@ func BenchmarkMarshalLarge(b *testing.B) {
|
|||
encoder := NewEncoder(buf)
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
encoder.Encode(data)
|
||||
_ = encoder.Encode(data)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -4,188 +4,398 @@ package http
|
|||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"net"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
log "github.com/Sirupsen/logrus"
|
||||
"github.com/julienschmidt/httprouter"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/tylerb/graceful"
|
||||
|
||||
"github.com/chihaya/chihaya/bittorrent"
|
||||
"github.com/chihaya/chihaya/frontend"
|
||||
"github.com/chihaya/chihaya/middleware"
|
||||
"github.com/chihaya/chihaya/pkg/log"
|
||||
"github.com/chihaya/chihaya/pkg/stop"
|
||||
)
|
||||
|
||||
func init() {
|
||||
prometheus.MustRegister(promResponseDurationMilliseconds)
|
||||
recordResponseDuration("action", nil, time.Second)
|
||||
}
|
||||
|
||||
var promResponseDurationMilliseconds = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "chihaya_http_response_duration_milliseconds",
|
||||
Help: "The duration of time it takes to receive and write a response to an API request",
|
||||
Buckets: prometheus.ExponentialBuckets(9.375, 2, 10),
|
||||
},
|
||||
[]string{"action", "error"},
|
||||
)
|
||||
|
||||
// recordResponseDuration records the duration of time to respond to a Request
|
||||
// in milliseconds .
|
||||
func recordResponseDuration(action string, err error, duration time.Duration) {
|
||||
var errString string
|
||||
if err != nil {
|
||||
errString = err.Error()
|
||||
}
|
||||
|
||||
promResponseDurationMilliseconds.
|
||||
WithLabelValues(action, errString).
|
||||
Observe(float64(duration.Nanoseconds()) / float64(time.Millisecond))
|
||||
}
|
||||
|
||||
// Config represents all of the configurable options for an HTTP BitTorrent
|
||||
// Frontend.
|
||||
type Config struct {
|
||||
Addr string `yaml:"addr"`
|
||||
HTTPSAddr string `yaml:"https_addr"`
|
||||
ReadTimeout time.Duration `yaml:"read_timeout"`
|
||||
WriteTimeout time.Duration `yaml:"write_timeout"`
|
||||
RequestTimeout time.Duration `yaml:"request_timeout"`
|
||||
AllowIPSpoofing bool `yaml:"allow_ip_spoofing"`
|
||||
RealIPHeader string `yaml:"real_ip_header"`
|
||||
IdleTimeout time.Duration `yaml:"idle_timeout"`
|
||||
EnableKeepAlive bool `yaml:"enable_keepalive"`
|
||||
TLSCertPath string `yaml:"tls_cert_path"`
|
||||
TLSKeyPath string `yaml:"tls_key_path"`
|
||||
AnnounceRoutes []string `yaml:"announce_routes"`
|
||||
ScrapeRoutes []string `yaml:"scrape_routes"`
|
||||
EnableRequestTiming bool `yaml:"enable_request_timing"`
|
||||
ParseOptions `yaml:",inline"`
|
||||
}
|
||||
|
||||
// Frontend holds the state of an HTTP BitTorrent Frontend.
|
||||
// LogFields renders the current config as a set of Logrus fields.
|
||||
func (cfg Config) LogFields() log.Fields {
|
||||
return log.Fields{
|
||||
"addr": cfg.Addr,
|
||||
"httpsAddr": cfg.HTTPSAddr,
|
||||
"readTimeout": cfg.ReadTimeout,
|
||||
"writeTimeout": cfg.WriteTimeout,
|
||||
"idleTimeout": cfg.IdleTimeout,
|
||||
"enableKeepAlive": cfg.EnableKeepAlive,
|
||||
"tlsCertPath": cfg.TLSCertPath,
|
||||
"tlsKeyPath": cfg.TLSKeyPath,
|
||||
"announceRoutes": cfg.AnnounceRoutes,
|
||||
"scrapeRoutes": cfg.ScrapeRoutes,
|
||||
"enableRequestTiming": cfg.EnableRequestTiming,
|
||||
"allowIPSpoofing": cfg.AllowIPSpoofing,
|
||||
"realIPHeader": cfg.RealIPHeader,
|
||||
"maxNumWant": cfg.MaxNumWant,
|
||||
"defaultNumWant": cfg.DefaultNumWant,
|
||||
"maxScrapeInfoHashes": cfg.MaxScrapeInfoHashes,
|
||||
}
|
||||
}
|
||||
|
||||
// Default config constants.
|
||||
const (
|
||||
defaultReadTimeout = 2 * time.Second
|
||||
defaultWriteTimeout = 2 * time.Second
|
||||
defaultIdleTimeout = 30 * time.Second
|
||||
)
|
||||
|
||||
// Validate sanity checks values set in a config and returns a new config with
|
||||
// default values replacing anything that is invalid.
|
||||
//
|
||||
// This function warns to the logger when a value is changed.
|
||||
func (cfg Config) Validate() Config {
|
||||
validcfg := cfg
|
||||
|
||||
if cfg.ReadTimeout <= 0 {
|
||||
validcfg.ReadTimeout = defaultReadTimeout
|
||||
log.Warn("falling back to default configuration", log.Fields{
|
||||
"name": "http.ReadTimeout",
|
||||
"provided": cfg.ReadTimeout,
|
||||
"default": validcfg.ReadTimeout,
|
||||
})
|
||||
}
|
||||
|
||||
if cfg.WriteTimeout <= 0 {
|
||||
validcfg.WriteTimeout = defaultWriteTimeout
|
||||
log.Warn("falling back to default configuration", log.Fields{
|
||||
"name": "http.WriteTimeout",
|
||||
"provided": cfg.WriteTimeout,
|
||||
"default": validcfg.WriteTimeout,
|
||||
})
|
||||
}
|
||||
|
||||
if cfg.IdleTimeout <= 0 {
|
||||
validcfg.IdleTimeout = defaultIdleTimeout
|
||||
|
||||
if cfg.EnableKeepAlive {
|
||||
// If keepalive is disabled, this configuration isn't used anyway.
|
||||
log.Warn("falling back to default configuration", log.Fields{
|
||||
"name": "http.IdleTimeout",
|
||||
"provided": cfg.IdleTimeout,
|
||||
"default": validcfg.IdleTimeout,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if cfg.MaxNumWant <= 0 {
|
||||
validcfg.MaxNumWant = defaultMaxNumWant
|
||||
log.Warn("falling back to default configuration", log.Fields{
|
||||
"name": "http.MaxNumWant",
|
||||
"provided": cfg.MaxNumWant,
|
||||
"default": validcfg.MaxNumWant,
|
||||
})
|
||||
}
|
||||
|
||||
if cfg.DefaultNumWant <= 0 {
|
||||
validcfg.DefaultNumWant = defaultDefaultNumWant
|
||||
log.Warn("falling back to default configuration", log.Fields{
|
||||
"name": "http.DefaultNumWant",
|
||||
"provided": cfg.DefaultNumWant,
|
||||
"default": validcfg.DefaultNumWant,
|
||||
})
|
||||
}
|
||||
|
||||
if cfg.MaxScrapeInfoHashes <= 0 {
|
||||
validcfg.MaxScrapeInfoHashes = defaultMaxScrapeInfoHashes
|
||||
log.Warn("falling back to default configuration", log.Fields{
|
||||
"name": "http.MaxScrapeInfoHashes",
|
||||
"provided": cfg.MaxScrapeInfoHashes,
|
||||
"default": validcfg.MaxScrapeInfoHashes,
|
||||
})
|
||||
}
|
||||
|
||||
return validcfg
|
||||
}
|
||||
|
||||
// Frontend represents the state of an HTTP BitTorrent Frontend.
|
||||
type Frontend struct {
|
||||
grace *graceful.Server
|
||||
srv *http.Server
|
||||
tlsSrv *http.Server
|
||||
tlsCfg *tls.Config
|
||||
|
||||
logic frontend.TrackerLogic
|
||||
Config
|
||||
}
|
||||
|
||||
// NewFrontend allocates a new instance of a Frontend.
|
||||
func NewFrontend(logic frontend.TrackerLogic, cfg Config) *Frontend {
|
||||
return &Frontend{
|
||||
// NewFrontend creates a new instance of an HTTP Frontend that asynchronously
|
||||
// serves requests.
|
||||
func NewFrontend(logic frontend.TrackerLogic, provided Config) (*Frontend, error) {
|
||||
cfg := provided.Validate()
|
||||
|
||||
f := &Frontend{
|
||||
logic: logic,
|
||||
Config: cfg,
|
||||
}
|
||||
|
||||
if cfg.Addr == "" && cfg.HTTPSAddr == "" {
|
||||
return nil, errors.New("must specify addr or https_addr or both")
|
||||
}
|
||||
|
||||
if len(cfg.AnnounceRoutes) < 1 || len(cfg.ScrapeRoutes) < 1 {
|
||||
return nil, errors.New("must specify routes")
|
||||
}
|
||||
|
||||
// If TLS is enabled, create a key pair.
|
||||
if cfg.TLSCertPath != "" && cfg.TLSKeyPath != "" {
|
||||
var err error
|
||||
f.tlsCfg = &tls.Config{
|
||||
MinVersion: tls.VersionTLS12,
|
||||
Certificates: make([]tls.Certificate, 1),
|
||||
}
|
||||
f.tlsCfg.Certificates[0], err = tls.LoadX509KeyPair(cfg.TLSCertPath, cfg.TLSKeyPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if cfg.HTTPSAddr != "" && f.tlsCfg == nil {
|
||||
return nil, errors.New("must specify tls_cert_path and tls_key_path when using https_addr")
|
||||
}
|
||||
if cfg.HTTPSAddr == "" && f.tlsCfg != nil {
|
||||
return nil, errors.New("must specify https_addr when using tls_cert_path and tls_key_path")
|
||||
}
|
||||
|
||||
var listenerHTTP, listenerHTTPS net.Listener
|
||||
var err error
|
||||
if cfg.Addr != "" {
|
||||
listenerHTTP, err = net.Listen("tcp", f.Addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if cfg.HTTPSAddr != "" {
|
||||
listenerHTTPS, err = net.Listen("tcp", f.HTTPSAddr)
|
||||
if err != nil {
|
||||
if listenerHTTP != nil {
|
||||
listenerHTTP.Close()
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if cfg.Addr != "" {
|
||||
go func() {
|
||||
if err := f.serveHTTP(listenerHTTP); err != nil {
|
||||
log.Fatal("failed while serving http", log.Err(err))
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
if cfg.HTTPSAddr != "" {
|
||||
go func() {
|
||||
if err := f.serveHTTPS(listenerHTTPS); err != nil {
|
||||
log.Fatal("failed while serving https", log.Err(err))
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Stop provides a thread-safe way to shutdown a currently running Tracker.
|
||||
func (t *Frontend) Stop() {
|
||||
t.grace.Stop(t.grace.Timeout)
|
||||
<-t.grace.StopChan()
|
||||
// Stop provides a thread-safe way to shutdown a currently running Frontend.
|
||||
func (f *Frontend) Stop() stop.Result {
|
||||
stopGroup := stop.NewGroup()
|
||||
|
||||
if f.srv != nil {
|
||||
stopGroup.AddFunc(f.makeStopFunc(f.srv))
|
||||
}
|
||||
if f.tlsSrv != nil {
|
||||
stopGroup.AddFunc(f.makeStopFunc(f.tlsSrv))
|
||||
}
|
||||
|
||||
return stopGroup.Stop()
|
||||
}
|
||||
|
||||
func (t *Frontend) handler() http.Handler {
|
||||
func (f *Frontend) makeStopFunc(stopSrv *http.Server) stop.Func {
|
||||
return func() stop.Result {
|
||||
c := make(stop.Channel)
|
||||
go func() {
|
||||
c.Done(stopSrv.Shutdown(context.Background()))
|
||||
}()
|
||||
return c.Result()
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Frontend) handler() http.Handler {
|
||||
router := httprouter.New()
|
||||
router.GET("/announce", t.announceRoute)
|
||||
router.GET("/scrape", t.scrapeRoute)
|
||||
for _, route := range f.AnnounceRoutes {
|
||||
router.GET(route, f.announceRoute)
|
||||
}
|
||||
for _, route := range f.ScrapeRoutes {
|
||||
router.GET(route, f.scrapeRoute)
|
||||
}
|
||||
return router
|
||||
}
|
||||
|
||||
// ListenAndServe listens on the TCP network address t.Addr and blocks serving
|
||||
// BitTorrent requests until t.Stop() is called or an error is returned.
|
||||
func (t *Frontend) ListenAndServe() error {
|
||||
t.grace = &graceful.Server{
|
||||
Server: &http.Server{
|
||||
Addr: t.Addr,
|
||||
Handler: t.handler(),
|
||||
ReadTimeout: t.ReadTimeout,
|
||||
WriteTimeout: t.WriteTimeout,
|
||||
},
|
||||
Timeout: t.RequestTimeout,
|
||||
NoSignalHandling: true,
|
||||
ConnState: func(conn net.Conn, state http.ConnState) {
|
||||
switch state {
|
||||
case http.StateNew:
|
||||
//stats.RecordEvent(stats.AcceptedConnection)
|
||||
|
||||
case http.StateClosed:
|
||||
//stats.RecordEvent(stats.ClosedConnection)
|
||||
|
||||
case http.StateHijacked:
|
||||
panic("http: connection impossibly hijacked")
|
||||
|
||||
// Ignore the following cases.
|
||||
case http.StateActive, http.StateIdle:
|
||||
|
||||
default:
|
||||
panic("http: connection transitioned to unknown state")
|
||||
}
|
||||
},
|
||||
}
|
||||
t.grace.SetKeepAlivesEnabled(false)
|
||||
|
||||
if err := t.grace.ListenAndServe(); err != nil {
|
||||
if opErr, ok := err.(*net.OpError); !ok || (ok && opErr.Op != "accept") {
|
||||
panic("http: failed to gracefully run HTTP server: " + err.Error())
|
||||
}
|
||||
// serveHTTP blocks while listening and serving non-TLS HTTP BitTorrent
|
||||
// requests until Stop() is called or an error is returned.
|
||||
func (f *Frontend) serveHTTP(l net.Listener) error {
|
||||
f.srv = &http.Server{
|
||||
Addr: f.Addr,
|
||||
Handler: f.handler(),
|
||||
ReadTimeout: f.ReadTimeout,
|
||||
WriteTimeout: f.WriteTimeout,
|
||||
IdleTimeout: f.IdleTimeout,
|
||||
}
|
||||
|
||||
f.srv.SetKeepAlivesEnabled(f.EnableKeepAlive)
|
||||
|
||||
// Start the HTTP server.
|
||||
if err := f.srv.Serve(l); !errors.Is(err, http.ErrServerClosed) {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// announceRoute parses and responds to an Announce by using t.TrackerLogic.
|
||||
func (t *Frontend) announceRoute(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
|
||||
var err error
|
||||
start := time.Now()
|
||||
defer recordResponseDuration("announce", err, time.Since(start))
|
||||
|
||||
req, err := ParseAnnounce(r, t.RealIPHeader, t.AllowIPSpoofing)
|
||||
if err != nil {
|
||||
WriteError(w, err)
|
||||
return
|
||||
// serveHTTPS blocks while listening and serving TLS HTTP BitTorrent
|
||||
// requests until Stop() is called or an error is returned.
|
||||
func (f *Frontend) serveHTTPS(l net.Listener) error {
|
||||
f.tlsSrv = &http.Server{
|
||||
Addr: f.HTTPSAddr,
|
||||
TLSConfig: f.tlsCfg,
|
||||
Handler: f.handler(),
|
||||
ReadTimeout: f.ReadTimeout,
|
||||
WriteTimeout: f.WriteTimeout,
|
||||
}
|
||||
|
||||
resp, err := t.logic.HandleAnnounce(context.Background(), req)
|
||||
if err != nil {
|
||||
WriteError(w, err)
|
||||
return
|
||||
}
|
||||
f.tlsSrv.SetKeepAlivesEnabled(f.EnableKeepAlive)
|
||||
|
||||
err = WriteAnnounceResponse(w, resp)
|
||||
if err != nil {
|
||||
WriteError(w, err)
|
||||
return
|
||||
// Start the HTTP server.
|
||||
if err := f.tlsSrv.ServeTLS(l, "", ""); !errors.Is(err, http.ErrServerClosed) {
|
||||
return err
|
||||
}
|
||||
|
||||
go t.logic.AfterAnnounce(context.Background(), req, resp)
|
||||
return nil
|
||||
}
|
||||
|
||||
// scrapeRoute parses and responds to a Scrape by using t.TrackerLogic.
|
||||
func (t *Frontend) scrapeRoute(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
|
||||
var err error
|
||||
start := time.Now()
|
||||
defer recordResponseDuration("scrape", err, time.Since(start))
|
||||
func injectRouteParamsToContext(ctx context.Context, ps httprouter.Params) context.Context {
|
||||
rp := bittorrent.RouteParams{}
|
||||
for _, p := range ps {
|
||||
rp = append(rp, bittorrent.RouteParam{Key: p.Key, Value: p.Value})
|
||||
}
|
||||
return context.WithValue(ctx, bittorrent.RouteParamsKey, rp)
|
||||
}
|
||||
|
||||
req, err := ParseScrape(r)
|
||||
// announceRoute parses and responds to an Announce.
|
||||
func (f *Frontend) announceRoute(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
|
||||
var err error
|
||||
var start time.Time
|
||||
if f.EnableRequestTiming {
|
||||
start = time.Now()
|
||||
}
|
||||
var af *bittorrent.AddressFamily
|
||||
defer func() {
|
||||
if f.EnableRequestTiming {
|
||||
recordResponseDuration("announce", af, err, time.Since(start))
|
||||
} else {
|
||||
recordResponseDuration("announce", af, err, time.Duration(0))
|
||||
}
|
||||
}()
|
||||
|
||||
req, err := ParseAnnounce(r, f.ParseOptions)
|
||||
if err != nil {
|
||||
WriteError(w, err)
|
||||
_ = WriteError(w, err)
|
||||
return
|
||||
}
|
||||
af = new(bittorrent.AddressFamily)
|
||||
*af = req.IP.AddressFamily
|
||||
|
||||
ctx := injectRouteParamsToContext(context.Background(), ps)
|
||||
ctx, resp, err := f.logic.HandleAnnounce(ctx, req)
|
||||
if err != nil {
|
||||
_ = WriteError(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||
err = WriteAnnounceResponse(w, resp)
|
||||
if err != nil {
|
||||
_ = WriteError(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
go f.logic.AfterAnnounce(ctx, req, resp)
|
||||
}
|
||||
|
||||
// scrapeRoute parses and responds to a Scrape.
|
||||
func (f *Frontend) scrapeRoute(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
|
||||
var err error
|
||||
var start time.Time
|
||||
if f.EnableRequestTiming {
|
||||
start = time.Now()
|
||||
}
|
||||
var af *bittorrent.AddressFamily
|
||||
defer func() {
|
||||
if f.EnableRequestTiming {
|
||||
recordResponseDuration("scrape", af, err, time.Since(start))
|
||||
} else {
|
||||
recordResponseDuration("scrape", af, err, time.Duration(0))
|
||||
}
|
||||
}()
|
||||
|
||||
req, err := ParseScrape(r, f.ParseOptions)
|
||||
if err != nil {
|
||||
_ = WriteError(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
host, _, err := net.SplitHostPort(r.RemoteAddr)
|
||||
if err != nil {
|
||||
log.Errorln("http: unable to determine remote address for scrape:", err)
|
||||
WriteError(w, err)
|
||||
log.Error("http: unable to determine remote address for scrape", log.Err(err))
|
||||
_ = WriteError(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
ip := net.ParseIP(host)
|
||||
ctx := context.WithValue(context.Background(), middleware.ScrapeIsIPv6Key, len(ip) == net.IPv6len)
|
||||
reqIP := net.ParseIP(host)
|
||||
if reqIP.To4() != nil {
|
||||
req.AddressFamily = bittorrent.IPv4
|
||||
} else if len(reqIP) == net.IPv6len { // implies reqIP.To4() == nil
|
||||
req.AddressFamily = bittorrent.IPv6
|
||||
} else {
|
||||
log.Error("http: invalid IP: neither v4 nor v6", log.Fields{"RemoteAddr": r.RemoteAddr})
|
||||
_ = WriteError(w, bittorrent.ErrInvalidIP)
|
||||
return
|
||||
}
|
||||
af = new(bittorrent.AddressFamily)
|
||||
*af = req.AddressFamily
|
||||
|
||||
resp, err := t.logic.HandleScrape(ctx, req)
|
||||
ctx := injectRouteParamsToContext(context.Background(), ps)
|
||||
ctx, resp, err := f.logic.HandleScrape(ctx, req)
|
||||
if err != nil {
|
||||
WriteError(w, err)
|
||||
_ = WriteError(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||
err = WriteScrapeResponse(w, resp)
|
||||
if err != nil {
|
||||
WriteError(w, err)
|
||||
_ = WriteError(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
go t.logic.AfterScrape(context.Background(), req, resp)
|
||||
go f.logic.AfterScrape(ctx, req, resp)
|
||||
}
|
||||
|
|
|
@ -1,18 +1,35 @@
|
|||
package http
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net"
|
||||
"net/http"
|
||||
|
||||
"github.com/chihaya/chihaya/bittorrent"
|
||||
)
|
||||
|
||||
// ParseAnnounce parses an bittorrent.AnnounceRequest from an http.Request.
|
||||
// ParseOptions is the configuration used to parse an Announce Request.
|
||||
//
|
||||
// If allowIPSpoofing is true, IPs provided via params will be used.
|
||||
// If realIPHeader is not empty string, the first value of the HTTP Header with
|
||||
// If AllowIPSpoofing is true, IPs provided via BitTorrent params will be used.
|
||||
// If RealIPHeader is not empty string, the value of the first HTTP Header with
|
||||
// that name will be used.
|
||||
func ParseAnnounce(r *http.Request, realIPHeader string, allowIPSpoofing bool) (*bittorrent.AnnounceRequest, error) {
|
||||
type ParseOptions struct {
|
||||
AllowIPSpoofing bool `yaml:"allow_ip_spoofing"`
|
||||
RealIPHeader string `yaml:"real_ip_header"`
|
||||
MaxNumWant uint32 `yaml:"max_numwant"`
|
||||
DefaultNumWant uint32 `yaml:"default_numwant"`
|
||||
MaxScrapeInfoHashes uint32 `yaml:"max_scrape_infohashes"`
|
||||
}
|
||||
|
||||
// Default parser config constants.
|
||||
const (
|
||||
defaultMaxNumWant = 100
|
||||
defaultDefaultNumWant = 50
|
||||
defaultMaxScrapeInfoHashes = 50
|
||||
)
|
||||
|
||||
// ParseAnnounce parses an bittorrent.AnnounceRequest from an http.Request.
|
||||
func ParseAnnounce(r *http.Request, opts ParseOptions) (*bittorrent.AnnounceRequest, error) {
|
||||
qp, err := bittorrent.ParseURLData(r.RequestURI)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -20,15 +37,23 @@ func ParseAnnounce(r *http.Request, realIPHeader string, allowIPSpoofing bool) (
|
|||
|
||||
request := &bittorrent.AnnounceRequest{Params: qp}
|
||||
|
||||
eventStr, _ := qp.String("event")
|
||||
// Attempt to parse the event from the request.
|
||||
var eventStr string
|
||||
eventStr, request.EventProvided = qp.String("event")
|
||||
if request.EventProvided {
|
||||
request.Event, err = bittorrent.NewEvent(eventStr)
|
||||
if err != nil {
|
||||
return nil, bittorrent.ClientError("failed to provide valid client event")
|
||||
}
|
||||
} else {
|
||||
request.Event = bittorrent.None
|
||||
}
|
||||
|
||||
// Determine if the client expects a compact response.
|
||||
compactStr, _ := qp.String("compact")
|
||||
request.Compact = compactStr != "" && compactStr != "0"
|
||||
|
||||
// Parse the infohash from the request.
|
||||
infoHashes := qp.InfoHashes()
|
||||
if len(infoHashes) < 1 {
|
||||
return nil, bittorrent.ClientError("no info_hash parameter supplied")
|
||||
|
@ -38,6 +63,7 @@ func ParseAnnounce(r *http.Request, realIPHeader string, allowIPSpoofing bool) (
|
|||
}
|
||||
request.InfoHash = infoHashes[0]
|
||||
|
||||
// Parse the PeerID from the request.
|
||||
peerID, ok := qp.String("peer_id")
|
||||
if !ok {
|
||||
return nil, bittorrent.ClientError("failed to parse parameter: peer_id")
|
||||
|
@ -47,48 +73,55 @@ func ParseAnnounce(r *http.Request, realIPHeader string, allowIPSpoofing bool) (
|
|||
}
|
||||
request.Peer.ID = bittorrent.PeerIDFromString(peerID)
|
||||
|
||||
request.Left, err = qp.Uint64("left")
|
||||
// Determine the number of remaining bytes for the client.
|
||||
request.Left, err = qp.Uint("left", 64)
|
||||
if err != nil {
|
||||
return nil, bittorrent.ClientError("failed to parse parameter: left")
|
||||
}
|
||||
|
||||
request.Downloaded, err = qp.Uint64("downloaded")
|
||||
// Determine the number of bytes downloaded by the client.
|
||||
request.Downloaded, err = qp.Uint("downloaded", 64)
|
||||
if err != nil {
|
||||
return nil, bittorrent.ClientError("failed to parse parameter: downloaded")
|
||||
}
|
||||
|
||||
request.Uploaded, err = qp.Uint64("uploaded")
|
||||
// Determine the number of bytes shared by the client.
|
||||
request.Uploaded, err = qp.Uint("uploaded", 64)
|
||||
if err != nil {
|
||||
return nil, bittorrent.ClientError("failed to parse parameter: uploaded")
|
||||
}
|
||||
|
||||
numwant, err := qp.Uint64("numwant")
|
||||
if err != nil {
|
||||
// Determine the number of peers the client wants in the response.
|
||||
numwant, err := qp.Uint("numwant", 32)
|
||||
if err != nil && !errors.Is(err, bittorrent.ErrKeyNotFound) {
|
||||
return nil, bittorrent.ClientError("failed to parse parameter: numwant")
|
||||
}
|
||||
// If there were no errors, the user actually provided the numwant.
|
||||
request.NumWantProvided = err == nil
|
||||
request.NumWant = uint32(numwant)
|
||||
|
||||
port, err := qp.Uint64("port")
|
||||
// Parse the port where the client is listening.
|
||||
port, err := qp.Uint("port", 16)
|
||||
if err != nil {
|
||||
return nil, bittorrent.ClientError("failed to parse parameter: port")
|
||||
}
|
||||
request.Peer.Port = uint16(port)
|
||||
|
||||
request.Peer.IP = requestedIP(r, qp, realIPHeader, allowIPSpoofing)
|
||||
if request.Peer.IP == nil {
|
||||
// Parse the IP address where the client is listening.
|
||||
request.Peer.IP.IP, request.IPProvided = requestedIP(r, qp, opts)
|
||||
if request.Peer.IP.IP == nil {
|
||||
return nil, bittorrent.ClientError("failed to parse peer IP address")
|
||||
}
|
||||
|
||||
// Sanitize IPv4 addresses to 4 bytes.
|
||||
if ip := request.Peer.IP.To4(); ip != nil {
|
||||
request.Peer.IP = ip
|
||||
if err := bittorrent.SanitizeAnnounce(request, opts.MaxNumWant, opts.DefaultNumWant); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return request, nil
|
||||
}
|
||||
|
||||
// ParseScrape parses an bittorrent.ScrapeRequest from an http.Request.
|
||||
func ParseScrape(r *http.Request) (*bittorrent.ScrapeRequest, error) {
|
||||
func ParseScrape(r *http.Request, opts ParseOptions) (*bittorrent.ScrapeRequest, error) {
|
||||
qp, err := bittorrent.ParseURLData(r.RequestURI)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -104,39 +137,35 @@ func ParseScrape(r *http.Request) (*bittorrent.ScrapeRequest, error) {
|
|||
Params: qp,
|
||||
}
|
||||
|
||||
if err := bittorrent.SanitizeScrape(request, opts.MaxScrapeInfoHashes); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return request, nil
|
||||
}
|
||||
|
||||
// requestedIP determines the IP address for a BitTorrent client request.
|
||||
//
|
||||
// If allowIPSpoofing is true, IPs provided via params will be used.
|
||||
// If realIPHeader is not empty string, the first value of the HTTP Header with
|
||||
// that name will be used.
|
||||
func requestedIP(r *http.Request, p bittorrent.Params, realIPHeader string, allowIPSpoofing bool) net.IP {
|
||||
if allowIPSpoofing {
|
||||
func requestedIP(r *http.Request, p bittorrent.Params, opts ParseOptions) (ip net.IP, provided bool) {
|
||||
if opts.AllowIPSpoofing {
|
||||
if ipstr, ok := p.String("ip"); ok {
|
||||
ip := net.ParseIP(ipstr)
|
||||
return ip
|
||||
return net.ParseIP(ipstr), true
|
||||
}
|
||||
|
||||
if ipstr, ok := p.String("ipv4"); ok {
|
||||
ip := net.ParseIP(ipstr)
|
||||
return ip
|
||||
return net.ParseIP(ipstr), true
|
||||
}
|
||||
|
||||
if ipstr, ok := p.String("ipv6"); ok {
|
||||
ip := net.ParseIP(ipstr)
|
||||
return ip
|
||||
return net.ParseIP(ipstr), true
|
||||
}
|
||||
}
|
||||
|
||||
if realIPHeader != "" {
|
||||
if ips, ok := r.Header[realIPHeader]; ok && len(ips) > 0 {
|
||||
ip := net.ParseIP(ips[0])
|
||||
return ip
|
||||
if opts.RealIPHeader != "" {
|
||||
if ip := r.Header.Get(opts.RealIPHeader); ip != "" {
|
||||
return net.ParseIP(ip), false
|
||||
}
|
||||
}
|
||||
|
||||
host, _, _ := net.SplitHostPort(r.RemoteAddr)
|
||||
return net.ParseIP(host)
|
||||
return net.ParseIP(host), false
|
||||
}
|
||||
|
|
50
frontend/http/prometheus.go
Normal file
50
frontend/http/prometheus.go
Normal file
|
@ -0,0 +1,50 @@
|
|||
package http
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
"github.com/chihaya/chihaya/bittorrent"
|
||||
)
|
||||
|
||||
func init() {
|
||||
prometheus.MustRegister(promResponseDurationMilliseconds)
|
||||
}
|
||||
|
||||
var promResponseDurationMilliseconds = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "chihaya_http_response_duration_milliseconds",
|
||||
Help: "The duration of time it takes to receive and write a response to an API request",
|
||||
Buckets: prometheus.ExponentialBuckets(9.375, 2, 10),
|
||||
},
|
||||
[]string{"action", "address_family", "error"},
|
||||
)
|
||||
|
||||
// recordResponseDuration records the duration of time to respond to a Request
|
||||
// in milliseconds.
|
||||
func recordResponseDuration(action string, af *bittorrent.AddressFamily, err error, duration time.Duration) {
|
||||
var errString string
|
||||
if err != nil {
|
||||
var clientErr bittorrent.ClientError
|
||||
if errors.As(err, &clientErr) {
|
||||
errString = clientErr.Error()
|
||||
} else {
|
||||
errString = "internal error"
|
||||
}
|
||||
}
|
||||
|
||||
var afString string
|
||||
if af == nil {
|
||||
afString = "Unknown"
|
||||
} else if *af == bittorrent.IPv4 {
|
||||
afString = "IPv4"
|
||||
} else if *af == bittorrent.IPv6 {
|
||||
afString = "IPv6"
|
||||
}
|
||||
|
||||
promResponseDurationMilliseconds.
|
||||
WithLabelValues(action, afString, errString).
|
||||
Observe(float64(duration.Nanoseconds()) / float64(time.Millisecond))
|
||||
}
|
|
@ -1,21 +1,22 @@
|
|||
package http
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/http"
|
||||
|
||||
log "github.com/Sirupsen/logrus"
|
||||
|
||||
"github.com/chihaya/chihaya/bittorrent"
|
||||
"github.com/chihaya/chihaya/frontend/http/bencode"
|
||||
"github.com/chihaya/chihaya/pkg/log"
|
||||
)
|
||||
|
||||
// WriteError communicates an error to a BitTorrent client over HTTP.
|
||||
func WriteError(w http.ResponseWriter, err error) error {
|
||||
message := "internal server error"
|
||||
if _, clientErr := err.(bittorrent.ClientError); clientErr {
|
||||
message = err.Error()
|
||||
var clientErr bittorrent.ClientError
|
||||
if errors.As(err, &clientErr) {
|
||||
message = clientErr.Error()
|
||||
} else {
|
||||
log.Errorf("http: internal error: %s", err)
|
||||
log.Error("http: internal error", log.Err(err))
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
|
@ -58,7 +59,7 @@ func WriteAnnounceResponse(w http.ResponseWriter, resp *bittorrent.AnnounceRespo
|
|||
}
|
||||
|
||||
// Add the peers to the dictionary.
|
||||
var peers []bencode.Dict
|
||||
peers := make([]bencode.Dict, 0, len(resp.IPv4Peers)+len(resp.IPv6Peers))
|
||||
for _, peer := range resp.IPv4Peers {
|
||||
peers = append(peers, dict(peer))
|
||||
}
|
||||
|
@ -74,8 +75,8 @@ func WriteAnnounceResponse(w http.ResponseWriter, resp *bittorrent.AnnounceRespo
|
|||
// client over HTTP.
|
||||
func WriteScrapeResponse(w http.ResponseWriter, resp *bittorrent.ScrapeResponse) error {
|
||||
filesDict := bencode.NewDict()
|
||||
for infohash, scrape := range resp.Files {
|
||||
filesDict[string(infohash[:])] = bencode.Dict{
|
||||
for _, scrape := range resp.Files {
|
||||
filesDict[string(scrape.InfoHash[:])] = bencode.Dict{
|
||||
"complete": scrape.Complete,
|
||||
"incomplete": scrape.Incomplete,
|
||||
}
|
||||
|
|
|
@ -1,16 +1,17 @@
|
|||
package http
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/chihaya/chihaya/bittorrent"
|
||||
)
|
||||
|
||||
func TestWriteError(t *testing.T) {
|
||||
var table = []struct {
|
||||
table := []struct {
|
||||
reason, expected string
|
||||
}{
|
||||
{"hello world", "d14:failure reason11:hello worlde"},
|
||||
|
@ -18,16 +19,28 @@ func TestWriteError(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, tt := range table {
|
||||
t.Run(fmt.Sprintf("%s expecting %s", tt.reason, tt.expected), func(t *testing.T) {
|
||||
r := httptest.NewRecorder()
|
||||
err := WriteError(r, bittorrent.ClientError(tt.reason))
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, r.Body.String(), tt.expected)
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, r.Body.String(), tt.expected)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriteStatus(t *testing.T) {
|
||||
table := []struct {
|
||||
reason, expected string
|
||||
}{
|
||||
{"something is missing", "d14:failure reason20:something is missinge"},
|
||||
}
|
||||
|
||||
for _, tt := range table {
|
||||
t.Run(fmt.Sprintf("%s expecting %s", tt.reason, tt.expected), func(t *testing.T) {
|
||||
r := httptest.NewRecorder()
|
||||
err := WriteError(r, bittorrent.ClientError("something is missing"))
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, r.Body.String(), "d14:failure reason20:something is missinge")
|
||||
err := WriteError(r, bittorrent.ClientError(tt.reason))
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, r.Body.String(), tt.expected)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -11,24 +11,27 @@ type BytePool struct {
|
|||
func New(length int) *BytePool {
|
||||
var bp BytePool
|
||||
bp.Pool.New = func() interface{} {
|
||||
return make([]byte, length, length)
|
||||
b := make([]byte, length)
|
||||
return &b
|
||||
}
|
||||
return &bp
|
||||
}
|
||||
|
||||
// Get returns a byte slice from the pool.
|
||||
func (bp *BytePool) Get() []byte {
|
||||
return bp.Pool.Get().([]byte)
|
||||
func (bp *BytePool) Get() *[]byte {
|
||||
return bp.Pool.Get().(*[]byte)
|
||||
}
|
||||
|
||||
// Put returns a byte slice to the pool.
|
||||
func (bp *BytePool) Put(b []byte) {
|
||||
b = b[:cap(b)]
|
||||
func (bp *BytePool) Put(b *[]byte) {
|
||||
*b = (*b)[:cap(*b)]
|
||||
|
||||
// Zero out the bytes.
|
||||
// Apparently this specific expression is optimized by the compiler, see
|
||||
// github.com/golang/go/issues/5373.
|
||||
for i := range b {
|
||||
b[i] = 0
|
||||
// This specific expression is optimized by the compiler:
|
||||
// https://github.com/golang/go/issues/5373.
|
||||
for i := range *b {
|
||||
(*b)[i] = 0
|
||||
}
|
||||
|
||||
bp.Pool.Put(b)
|
||||
}
|
||||
|
|
|
@ -2,18 +2,77 @@ package udp
|
|||
|
||||
import (
|
||||
"crypto/hmac"
|
||||
"crypto/sha256"
|
||||
"encoding/binary"
|
||||
"hash"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
sha256 "github.com/minio/sha256-simd"
|
||||
|
||||
"github.com/chihaya/chihaya/pkg/log"
|
||||
)
|
||||
|
||||
// ttl is the number of seconds a connection ID should be valid according to
|
||||
// BEP 15.
|
||||
// ttl is the duration a connection ID should be valid according to BEP 15.
|
||||
const ttl = 2 * time.Minute
|
||||
|
||||
// NewConnectionID creates a new 8 byte connection identifier for UDP packets
|
||||
// as described by BEP 15.
|
||||
// NewConnectionID creates an 8-byte connection identifier for UDP packets as
|
||||
// described by BEP 15.
|
||||
// This is a wrapper around creating a new ConnectionIDGenerator and generating
|
||||
// an ID. It is recommended to use the generator for performance.
|
||||
func NewConnectionID(ip net.IP, now time.Time, key string) []byte {
|
||||
return NewConnectionIDGenerator(key).Generate(ip, now)
|
||||
}
|
||||
|
||||
// ValidConnectionID determines whether a connection identifier is legitimate.
|
||||
// This is a wrapper around creating a new ConnectionIDGenerator and validating
|
||||
// the ID. It is recommended to use the generator for performance.
|
||||
func ValidConnectionID(connectionID []byte, ip net.IP, now time.Time, maxClockSkew time.Duration, key string) bool {
|
||||
return NewConnectionIDGenerator(key).Validate(connectionID, ip, now, maxClockSkew)
|
||||
}
|
||||
|
||||
// A ConnectionIDGenerator is a reusable generator and validator for connection
|
||||
// IDs as described in BEP 15.
|
||||
// It is not thread safe, but is safe to be pooled and reused by other
|
||||
// goroutines. It manages its state itself, so it can be taken from and returned
|
||||
// to a pool without any cleanup.
|
||||
// After initial creation, it can generate connection IDs without allocating.
|
||||
// See Generate and Validate for usage notes and guarantees.
|
||||
type ConnectionIDGenerator struct {
|
||||
// mac is a keyed HMAC that can be reused for subsequent connection ID
|
||||
// generations.
|
||||
mac hash.Hash
|
||||
|
||||
// connID is an 8-byte slice that holds the generated connection ID after a
|
||||
// call to Generate.
|
||||
// It must not be referenced after the generator is returned to a pool.
|
||||
// It will be overwritten by subsequent calls to Generate.
|
||||
connID []byte
|
||||
|
||||
// scratch is a 32-byte slice that is used as a scratchpad for the generated
|
||||
// HMACs.
|
||||
scratch []byte
|
||||
}
|
||||
|
||||
// NewConnectionIDGenerator creates a new connection ID generator.
|
||||
func NewConnectionIDGenerator(key string) *ConnectionIDGenerator {
|
||||
return &ConnectionIDGenerator{
|
||||
mac: hmac.New(sha256.New, []byte(key)),
|
||||
connID: make([]byte, 8),
|
||||
scratch: make([]byte, 32),
|
||||
}
|
||||
}
|
||||
|
||||
// reset resets the generator.
|
||||
// This is called by other methods of the generator, it's not necessary to call
|
||||
// it after getting a generator from a pool.
|
||||
func (g *ConnectionIDGenerator) reset() {
|
||||
g.mac.Reset()
|
||||
g.connID = g.connID[:8]
|
||||
g.scratch = g.scratch[:0]
|
||||
}
|
||||
|
||||
// Generate generates an 8-byte connection ID as described in BEP 15 for the
|
||||
// given IP and the current time.
|
||||
//
|
||||
// The first 4 bytes of the connection identifier is a unix timestamp and the
|
||||
// last 4 bytes are a truncated HMAC token created from the aforementioned
|
||||
|
@ -22,29 +81,36 @@ const ttl = 2 * time.Minute
|
|||
// Truncated HMAC is known to be safe for 2^(-n) where n is the size in bits
|
||||
// of the truncated HMAC token. In this use case we have 32 bits, thus a
|
||||
// forgery probability of approximately 1 in 4 billion.
|
||||
func NewConnectionID(ip net.IP, now time.Time, key string) []byte {
|
||||
buf := make([]byte, 8)
|
||||
binary.BigEndian.PutUint32(buf, uint32(now.UTC().Unix()))
|
||||
//
|
||||
// The generated ID is written to g.connID, which is also returned. g.connID
|
||||
// will be reused, so it must not be referenced after returning the generator
|
||||
// to a pool and will be overwritten be subsequent calls to Generate!
|
||||
func (g *ConnectionIDGenerator) Generate(ip net.IP, now time.Time) []byte {
|
||||
g.reset()
|
||||
|
||||
mac := hmac.New(sha256.New, []byte(key))
|
||||
mac.Write(buf[:4])
|
||||
mac.Write(ip)
|
||||
macBytes := mac.Sum(nil)[:4]
|
||||
copy(buf[4:], macBytes)
|
||||
binary.BigEndian.PutUint32(g.connID, uint32(now.Unix()))
|
||||
|
||||
return buf
|
||||
g.mac.Write(g.connID[:4])
|
||||
g.mac.Write(ip)
|
||||
g.scratch = g.mac.Sum(g.scratch)
|
||||
copy(g.connID[4:8], g.scratch[:4])
|
||||
|
||||
log.Debug("generated connection ID", log.Fields{"ip": ip, "now": now, "connID": g.connID})
|
||||
return g.connID
|
||||
}
|
||||
|
||||
// ValidConnectionID determines whether a connection identifier is legitimate.
|
||||
func ValidConnectionID(connectionID []byte, ip net.IP, now time.Time, maxClockSkew time.Duration, key string) bool {
|
||||
// Validate validates the given connection ID for an IP and the current time.
|
||||
func (g *ConnectionIDGenerator) Validate(connectionID []byte, ip net.IP, now time.Time, maxClockSkew time.Duration) bool {
|
||||
ts := time.Unix(int64(binary.BigEndian.Uint32(connectionID[:4])), 0)
|
||||
log.Debug("validating connection ID", log.Fields{"connID": connectionID, "ip": ip, "ts": ts, "now": now})
|
||||
if now.After(ts.Add(ttl)) || ts.After(now.Add(maxClockSkew)) {
|
||||
return false
|
||||
}
|
||||
|
||||
mac := hmac.New(sha256.New, []byte(key))
|
||||
mac.Write(connectionID[:4])
|
||||
mac.Write(ip)
|
||||
expectedMAC := mac.Sum(nil)[:4]
|
||||
return hmac.Equal(expectedMAC, connectionID[4:])
|
||||
g.reset()
|
||||
|
||||
g.mac.Write(connectionID[:4])
|
||||
g.mac.Write(ip)
|
||||
g.scratch = g.mac.Sum(g.scratch)
|
||||
return hmac.Equal(g.scratch[:4], connectionID[4:])
|
||||
}
|
||||
|
|
|
@ -1,9 +1,18 @@
|
|||
package udp
|
||||
|
||||
import (
|
||||
"crypto/hmac"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"net"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
sha256 "github.com/minio/sha256-simd"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/chihaya/chihaya/pkg/log"
|
||||
)
|
||||
|
||||
var golden = []struct {
|
||||
|
@ -18,12 +27,167 @@ var golden = []struct {
|
|||
{0, 0, "[::]", "", true},
|
||||
}
|
||||
|
||||
// simpleNewConnectionID generates a new connection ID the explicit way.
|
||||
// This is used to verify correct behaviour of the generator.
|
||||
func simpleNewConnectionID(ip net.IP, now time.Time, key string) []byte {
|
||||
buf := make([]byte, 8)
|
||||
binary.BigEndian.PutUint32(buf, uint32(now.Unix()))
|
||||
|
||||
mac := hmac.New(sha256.New, []byte(key))
|
||||
mac.Write(buf[:4])
|
||||
mac.Write(ip)
|
||||
macBytes := mac.Sum(nil)[:4]
|
||||
copy(buf[4:], macBytes)
|
||||
|
||||
// this is just in here because logging impacts performance and we benchmark
|
||||
// this version too.
|
||||
log.Debug("manually generated connection ID", log.Fields{"ip": ip, "now": now, "connID": buf})
|
||||
return buf
|
||||
}
|
||||
|
||||
func TestVerification(t *testing.T) {
|
||||
for _, tt := range golden {
|
||||
t.Run(fmt.Sprintf("%s created at %d verified at %d", tt.ip, tt.createdAt, tt.now), func(t *testing.T) {
|
||||
cid := NewConnectionID(net.ParseIP(tt.ip), time.Unix(tt.createdAt, 0), tt.key)
|
||||
got := ValidConnectionID(cid, net.ParseIP(tt.ip), time.Unix(tt.now, 0), time.Minute, tt.key)
|
||||
if got != tt.valid {
|
||||
t.Errorf("expected validity: %t got validity: %t", tt.valid, got)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGeneration(t *testing.T) {
|
||||
for _, tt := range golden {
|
||||
t.Run(fmt.Sprintf("%s created at %d", tt.ip, tt.createdAt), func(t *testing.T) {
|
||||
want := simpleNewConnectionID(net.ParseIP(tt.ip), time.Unix(tt.createdAt, 0), tt.key)
|
||||
got := NewConnectionID(net.ParseIP(tt.ip), time.Unix(tt.createdAt, 0), tt.key)
|
||||
require.Equal(t, want, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestReuseGeneratorGenerate(t *testing.T) {
|
||||
for _, tt := range golden {
|
||||
t.Run(fmt.Sprintf("%s created at %d", tt.ip, tt.createdAt), func(t *testing.T) {
|
||||
cid := NewConnectionID(net.ParseIP(tt.ip), time.Unix(tt.createdAt, 0), tt.key)
|
||||
require.Len(t, cid, 8)
|
||||
|
||||
gen := NewConnectionIDGenerator(tt.key)
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
connID := gen.Generate(net.ParseIP(tt.ip), time.Unix(tt.createdAt, 0))
|
||||
require.Equal(t, cid, connID)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestReuseGeneratorValidate(t *testing.T) {
|
||||
for _, tt := range golden {
|
||||
t.Run(fmt.Sprintf("%s created at %d verified at %d", tt.ip, tt.createdAt, tt.now), func(t *testing.T) {
|
||||
gen := NewConnectionIDGenerator(tt.key)
|
||||
cid := gen.Generate(net.ParseIP(tt.ip), time.Unix(tt.createdAt, 0))
|
||||
for i := 0; i < 3; i++ {
|
||||
got := gen.Validate(cid, net.ParseIP(tt.ip), time.Unix(tt.now, 0), time.Minute)
|
||||
if got != tt.valid {
|
||||
t.Errorf("expected validity: %t got validity: %t", tt.valid, got)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkSimpleNewConnectionID(b *testing.B) {
|
||||
ip := net.ParseIP("127.0.0.1")
|
||||
key := "some random string that is hopefully at least this long"
|
||||
createdAt := time.Now()
|
||||
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
sum := int64(0)
|
||||
|
||||
for pb.Next() {
|
||||
cid := simpleNewConnectionID(ip, createdAt, key)
|
||||
sum += int64(cid[7])
|
||||
}
|
||||
|
||||
_ = sum
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkNewConnectionID(b *testing.B) {
|
||||
ip := net.ParseIP("127.0.0.1")
|
||||
key := "some random string that is hopefully at least this long"
|
||||
createdAt := time.Now()
|
||||
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
sum := int64(0)
|
||||
|
||||
for pb.Next() {
|
||||
cid := NewConnectionID(ip, createdAt, key)
|
||||
sum += int64(cid[7])
|
||||
}
|
||||
|
||||
_ = sum
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkConnectionIDGenerator_Generate(b *testing.B) {
|
||||
ip := net.ParseIP("127.0.0.1")
|
||||
key := "some random string that is hopefully at least this long"
|
||||
createdAt := time.Now()
|
||||
|
||||
pool := &sync.Pool{
|
||||
New: func() interface{} {
|
||||
return NewConnectionIDGenerator(key)
|
||||
},
|
||||
}
|
||||
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
sum := int64(0)
|
||||
for pb.Next() {
|
||||
gen := pool.Get().(*ConnectionIDGenerator)
|
||||
cid := gen.Generate(ip, createdAt)
|
||||
sum += int64(cid[7])
|
||||
pool.Put(gen)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkValidConnectionID(b *testing.B) {
|
||||
ip := net.ParseIP("127.0.0.1")
|
||||
key := "some random string that is hopefully at least this long"
|
||||
createdAt := time.Now()
|
||||
cid := NewConnectionID(ip, createdAt, key)
|
||||
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
if !ValidConnectionID(cid, ip, createdAt, 10*time.Second, key) {
|
||||
b.FailNow()
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkConnectionIDGenerator_Validate(b *testing.B) {
|
||||
ip := net.ParseIP("127.0.0.1")
|
||||
key := "some random string that is hopefully at least this long"
|
||||
createdAt := time.Now()
|
||||
cid := NewConnectionID(ip, createdAt, key)
|
||||
|
||||
pool := &sync.Pool{
|
||||
New: func() interface{} {
|
||||
return NewConnectionIDGenerator(key)
|
||||
},
|
||||
}
|
||||
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
gen := pool.Get().(*ConnectionIDGenerator)
|
||||
if !gen.Validate(cid, ip, createdAt, 10*time.Second) {
|
||||
b.FailNow()
|
||||
}
|
||||
pool.Put(gen)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
|
@ -6,44 +6,22 @@ import (
|
|||
"bytes"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
"github.com/chihaya/chihaya/bittorrent"
|
||||
"github.com/chihaya/chihaya/frontend"
|
||||
"github.com/chihaya/chihaya/frontend/udp/bytepool"
|
||||
"github.com/chihaya/chihaya/middleware"
|
||||
"github.com/chihaya/chihaya/pkg/log"
|
||||
"github.com/chihaya/chihaya/pkg/stop"
|
||||
"github.com/chihaya/chihaya/pkg/timecache"
|
||||
)
|
||||
|
||||
func init() {
|
||||
prometheus.MustRegister(promResponseDurationMilliseconds)
|
||||
recordResponseDuration("action", nil, time.Second)
|
||||
}
|
||||
|
||||
var promResponseDurationMilliseconds = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "chihaya_udp_response_duration_milliseconds",
|
||||
Help: "The duration of time it takes to receive and write a response to an API request",
|
||||
Buckets: prometheus.ExponentialBuckets(9.375, 2, 10),
|
||||
},
|
||||
[]string{"action", "error"},
|
||||
)
|
||||
|
||||
// recordResponseDuration records the duration of time to respond to a UDP
|
||||
// Request in milliseconds .
|
||||
func recordResponseDuration(action string, err error, duration time.Duration) {
|
||||
var errString string
|
||||
if err != nil {
|
||||
errString = err.Error()
|
||||
}
|
||||
|
||||
promResponseDurationMilliseconds.
|
||||
WithLabelValues(action, errString).
|
||||
Observe(float64(duration.Nanoseconds()) / float64(time.Millisecond))
|
||||
}
|
||||
var allowedGeneratedPrivateKeyRunes = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890")
|
||||
|
||||
// Config represents all of the configurable options for a UDP BitTorrent
|
||||
// Tracker.
|
||||
|
@ -51,7 +29,71 @@ type Config struct {
|
|||
Addr string `yaml:"addr"`
|
||||
PrivateKey string `yaml:"private_key"`
|
||||
MaxClockSkew time.Duration `yaml:"max_clock_skew"`
|
||||
AllowIPSpoofing bool `yaml:"allow_ip_spoofing"`
|
||||
EnableRequestTiming bool `yaml:"enable_request_timing"`
|
||||
ParseOptions `yaml:",inline"`
|
||||
}
|
||||
|
||||
// LogFields renders the current config as a set of Logrus fields.
|
||||
func (cfg Config) LogFields() log.Fields {
|
||||
return log.Fields{
|
||||
"addr": cfg.Addr,
|
||||
"privateKey": cfg.PrivateKey,
|
||||
"maxClockSkew": cfg.MaxClockSkew,
|
||||
"enableRequestTiming": cfg.EnableRequestTiming,
|
||||
"allowIPSpoofing": cfg.AllowIPSpoofing,
|
||||
"maxNumWant": cfg.MaxNumWant,
|
||||
"defaultNumWant": cfg.DefaultNumWant,
|
||||
"maxScrapeInfoHashes": cfg.MaxScrapeInfoHashes,
|
||||
}
|
||||
}
|
||||
|
||||
// Validate sanity checks values set in a config and returns a new config with
|
||||
// default values replacing anything that is invalid.
|
||||
//
|
||||
// This function warns to the logger when a value is changed.
|
||||
func (cfg Config) Validate() Config {
|
||||
validcfg := cfg
|
||||
|
||||
// Generate a private key if one isn't provided by the user.
|
||||
if cfg.PrivateKey == "" {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
pkeyRunes := make([]rune, 64)
|
||||
for i := range pkeyRunes {
|
||||
pkeyRunes[i] = allowedGeneratedPrivateKeyRunes[rand.Intn(len(allowedGeneratedPrivateKeyRunes))]
|
||||
}
|
||||
validcfg.PrivateKey = string(pkeyRunes)
|
||||
|
||||
log.Warn("UDP private key was not provided, using generated key", log.Fields{"key": validcfg.PrivateKey})
|
||||
}
|
||||
|
||||
if cfg.MaxNumWant <= 0 {
|
||||
validcfg.MaxNumWant = defaultMaxNumWant
|
||||
log.Warn("falling back to default configuration", log.Fields{
|
||||
"name": "udp.MaxNumWant",
|
||||
"provided": cfg.MaxNumWant,
|
||||
"default": validcfg.MaxNumWant,
|
||||
})
|
||||
}
|
||||
|
||||
if cfg.DefaultNumWant <= 0 {
|
||||
validcfg.DefaultNumWant = defaultDefaultNumWant
|
||||
log.Warn("falling back to default configuration", log.Fields{
|
||||
"name": "udp.DefaultNumWant",
|
||||
"provided": cfg.DefaultNumWant,
|
||||
"default": validcfg.DefaultNumWant,
|
||||
})
|
||||
}
|
||||
|
||||
if cfg.MaxScrapeInfoHashes <= 0 {
|
||||
validcfg.MaxScrapeInfoHashes = defaultMaxScrapeInfoHashes
|
||||
log.Warn("falling back to default configuration", log.Fields{
|
||||
"name": "udp.MaxScrapeInfoHashes",
|
||||
"provided": cfg.MaxScrapeInfoHashes,
|
||||
"default": validcfg.MaxScrapeInfoHashes,
|
||||
})
|
||||
}
|
||||
|
||||
return validcfg
|
||||
}
|
||||
|
||||
// Frontend holds the state of a UDP BitTorrent Frontend.
|
||||
|
@ -60,57 +102,94 @@ type Frontend struct {
|
|||
closing chan struct{}
|
||||
wg sync.WaitGroup
|
||||
|
||||
genPool *sync.Pool
|
||||
|
||||
logic frontend.TrackerLogic
|
||||
Config
|
||||
}
|
||||
|
||||
// NewFrontend allocates a new instance of a Frontend.
|
||||
func NewFrontend(logic frontend.TrackerLogic, cfg Config) *Frontend {
|
||||
return &Frontend{
|
||||
// NewFrontend creates a new instance of an UDP Frontend that asynchronously
|
||||
// serves requests.
|
||||
func NewFrontend(logic frontend.TrackerLogic, provided Config) (*Frontend, error) {
|
||||
cfg := provided.Validate()
|
||||
|
||||
f := &Frontend{
|
||||
closing: make(chan struct{}),
|
||||
logic: logic,
|
||||
Config: cfg,
|
||||
genPool: &sync.Pool{
|
||||
New: func() interface{} {
|
||||
return NewConnectionIDGenerator(cfg.PrivateKey)
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if err := f.listen(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
go func() {
|
||||
if err := f.serve(); err != nil {
|
||||
log.Fatal("failed while serving udp", log.Err(err))
|
||||
}
|
||||
}()
|
||||
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Stop provides a thread-safe way to shutdown a currently running Frontend.
|
||||
func (t *Frontend) Stop() {
|
||||
func (t *Frontend) Stop() stop.Result {
|
||||
select {
|
||||
case <-t.closing:
|
||||
return stop.AlreadyStopped
|
||||
default:
|
||||
}
|
||||
|
||||
c := make(stop.Channel)
|
||||
go func() {
|
||||
close(t.closing)
|
||||
t.socket.SetReadDeadline(time.Now())
|
||||
_ = t.socket.SetReadDeadline(time.Now())
|
||||
t.wg.Wait()
|
||||
c.Done(t.socket.Close())
|
||||
}()
|
||||
|
||||
return c.Result()
|
||||
}
|
||||
|
||||
// ListenAndServe listens on the UDP network address t.Addr and blocks serving
|
||||
// BitTorrent requests until t.Stop() is called or an error is returned.
|
||||
func (t *Frontend) ListenAndServe() error {
|
||||
// listen resolves the address and binds the server socket.
|
||||
func (t *Frontend) listen() error {
|
||||
udpAddr, err := net.ResolveUDPAddr("udp", t.Addr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
t.socket, err = net.ListenUDP("udp", udpAddr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer t.socket.Close()
|
||||
}
|
||||
|
||||
// serve blocks while listening and serving UDP BitTorrent requests
|
||||
// until Stop() is called or an error is returned.
|
||||
func (t *Frontend) serve() error {
|
||||
pool := bytepool.New(2048)
|
||||
|
||||
t.wg.Add(1)
|
||||
defer t.wg.Done()
|
||||
|
||||
for {
|
||||
// Check to see if we need to shutdown.
|
||||
select {
|
||||
case <-t.closing:
|
||||
log.Debug("udp serve() received shutdown signal")
|
||||
return nil
|
||||
default:
|
||||
}
|
||||
|
||||
// Read a UDP packet into a reusable buffer.
|
||||
buffer := pool.Get()
|
||||
t.socket.SetReadDeadline(time.Now().Add(time.Second))
|
||||
n, addr, err := t.socket.ReadFromUDP(buffer)
|
||||
n, addr, err := t.socket.ReadFromUDP(*buffer)
|
||||
if err != nil {
|
||||
pool.Put(buffer)
|
||||
if netErr, ok := err.(net.Error); ok && netErr.Temporary() {
|
||||
var netErr net.Error
|
||||
if errors.As(err, &netErr); netErr.Temporary() {
|
||||
// A temporary failure is not fatal; just pretend it never happened.
|
||||
continue
|
||||
}
|
||||
|
@ -133,13 +212,20 @@ func (t *Frontend) ListenAndServe() error {
|
|||
}
|
||||
|
||||
// Handle the request.
|
||||
start := time.Now()
|
||||
action, err := t.handleRequest(
|
||||
var start time.Time
|
||||
if t.EnableRequestTiming {
|
||||
start = time.Now()
|
||||
}
|
||||
action, af, err := t.handleRequest(
|
||||
// Make sure the IP is copied, not referenced.
|
||||
Request{buffer[:n], append([]byte{}, addr.IP...)},
|
||||
Request{(*buffer)[:n], append([]byte{}, addr.IP...)},
|
||||
ResponseWriter{t.socket, addr},
|
||||
)
|
||||
recordResponseDuration(action, err, time.Since(start))
|
||||
if t.EnableRequestTiming {
|
||||
recordResponseDuration(action, af, err, time.Since(start))
|
||||
} else {
|
||||
recordResponseDuration(action, af, err, time.Duration(0))
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
@ -159,12 +245,12 @@ type ResponseWriter struct {
|
|||
|
||||
// Write implements the io.Writer interface for a ResponseWriter.
|
||||
func (w ResponseWriter) Write(b []byte) (int, error) {
|
||||
w.socket.WriteToUDP(b, w.addr)
|
||||
_, _ = w.socket.WriteToUDP(b, w.addr)
|
||||
return len(b), nil
|
||||
}
|
||||
|
||||
// handleRequest parses and responds to a UDP Request.
|
||||
func (t *Frontend) handleRequest(r Request, w ResponseWriter) (actionName string, err error) {
|
||||
func (t *Frontend) handleRequest(r Request, w ResponseWriter) (actionName string, af *bittorrent.AddressFamily, err error) {
|
||||
if len(r.Packet) < 16 {
|
||||
// Malformed, no client packets are less than 16 bytes.
|
||||
// We explicitly return nothing in case this is a DoS attempt.
|
||||
|
@ -177,9 +263,13 @@ func (t *Frontend) handleRequest(r Request, w ResponseWriter) (actionName string
|
|||
actionID := binary.BigEndian.Uint32(r.Packet[8:12])
|
||||
txID := r.Packet[12:16]
|
||||
|
||||
// get a connection ID generator/validator from the pool.
|
||||
gen := t.genPool.Get().(*ConnectionIDGenerator)
|
||||
defer t.genPool.Put(gen)
|
||||
|
||||
// If this isn't requesting a new connection ID and the connection ID is
|
||||
// invalid, then fail.
|
||||
if actionID != connectActionID && !ValidConnectionID(connID, r.IP, time.Now(), t.MaxClockSkew, t.PrivateKey) {
|
||||
if actionID != connectActionID && !gen.Validate(connID, r.IP, timecache.Now(), t.MaxClockSkew) {
|
||||
err = errBadConnectionID
|
||||
WriteError(w, txID, err)
|
||||
return
|
||||
|
@ -195,43 +285,66 @@ func (t *Frontend) handleRequest(r Request, w ResponseWriter) (actionName string
|
|||
return
|
||||
}
|
||||
|
||||
WriteConnectionID(w, txID, NewConnectionID(r.IP, time.Now(), t.PrivateKey))
|
||||
af = new(bittorrent.AddressFamily)
|
||||
if r.IP.To4() != nil {
|
||||
*af = bittorrent.IPv4
|
||||
} else if len(r.IP) == net.IPv6len { // implies r.IP.To4() == nil
|
||||
*af = bittorrent.IPv6
|
||||
} else {
|
||||
// Should never happen - we got the IP straight from the UDP packet.
|
||||
panic(fmt.Sprintf("udp: invalid IP: neither v4 nor v6, IP: %#v", r.IP))
|
||||
}
|
||||
|
||||
WriteConnectionID(w, txID, gen.Generate(r.IP, timecache.Now()))
|
||||
|
||||
case announceActionID, announceV6ActionID:
|
||||
actionName = "announce"
|
||||
|
||||
var req *bittorrent.AnnounceRequest
|
||||
req, err = ParseAnnounce(r, t.AllowIPSpoofing, actionID == announceV6ActionID)
|
||||
req, err = ParseAnnounce(r, actionID == announceV6ActionID, t.ParseOptions)
|
||||
if err != nil {
|
||||
WriteError(w, txID, err)
|
||||
return
|
||||
}
|
||||
af = new(bittorrent.AddressFamily)
|
||||
*af = req.IP.AddressFamily
|
||||
|
||||
var ctx context.Context
|
||||
var resp *bittorrent.AnnounceResponse
|
||||
resp, err = t.logic.HandleAnnounce(context.Background(), req)
|
||||
ctx, resp, err = t.logic.HandleAnnounce(context.Background(), req)
|
||||
if err != nil {
|
||||
WriteError(w, txID, err)
|
||||
return
|
||||
}
|
||||
|
||||
WriteAnnounce(w, txID, resp, actionID == announceV6ActionID)
|
||||
WriteAnnounce(w, txID, resp, actionID == announceV6ActionID, req.IP.AddressFamily == bittorrent.IPv6)
|
||||
|
||||
go t.logic.AfterAnnounce(context.Background(), req, resp)
|
||||
go t.logic.AfterAnnounce(ctx, req, resp)
|
||||
|
||||
case scrapeActionID:
|
||||
actionName = "scrape"
|
||||
|
||||
var req *bittorrent.ScrapeRequest
|
||||
req, err = ParseScrape(r)
|
||||
req, err = ParseScrape(r, t.ParseOptions)
|
||||
if err != nil {
|
||||
WriteError(w, txID, err)
|
||||
return
|
||||
}
|
||||
|
||||
ctx := context.WithValue(context.Background(), middleware.ScrapeIsIPv6Key, len(r.IP) == net.IPv6len)
|
||||
if r.IP.To4() != nil {
|
||||
req.AddressFamily = bittorrent.IPv4
|
||||
} else if len(r.IP) == net.IPv6len { // implies r.IP.To4() == nil
|
||||
req.AddressFamily = bittorrent.IPv6
|
||||
} else {
|
||||
// Should never happen - we got the IP straight from the UDP packet.
|
||||
panic(fmt.Sprintf("udp: invalid IP: neither v4 nor v6, IP: %#v", r.IP))
|
||||
}
|
||||
af = new(bittorrent.AddressFamily)
|
||||
*af = req.AddressFamily
|
||||
|
||||
var ctx context.Context
|
||||
var resp *bittorrent.ScrapeResponse
|
||||
resp, err = t.logic.HandleScrape(ctx, req)
|
||||
ctx, resp, err = t.logic.HandleScrape(context.Background(), req)
|
||||
if err != nil {
|
||||
WriteError(w, txID, err)
|
||||
return
|
||||
|
@ -239,7 +352,7 @@ func (t *Frontend) handleRequest(r Request, w ResponseWriter) (actionName string
|
|||
|
||||
WriteScrape(w, txID, resp)
|
||||
|
||||
go t.logic.AfterScrape(context.Background(), req, resp)
|
||||
go t.logic.AfterScrape(ctx, req, resp)
|
||||
|
||||
default:
|
||||
err = errUnknownAction
|
||||
|
|
28
frontend/udp/frontend_test.go
Normal file
28
frontend/udp/frontend_test.go
Normal file
|
@ -0,0 +1,28 @@
|
|||
package udp_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/chihaya/chihaya/frontend/udp"
|
||||
"github.com/chihaya/chihaya/middleware"
|
||||
"github.com/chihaya/chihaya/storage"
|
||||
_ "github.com/chihaya/chihaya/storage/memory"
|
||||
)
|
||||
|
||||
func TestStartStopRaceIssue437(t *testing.T) {
|
||||
ps, err := storage.NewPeerStore("memory", nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
var responseConfig middleware.ResponseConfig
|
||||
lgc := middleware.NewLogic(responseConfig, ps, nil, nil)
|
||||
fe, err := udp.NewFrontend(lgc, udp.Config{Addr: "127.0.0.1:0"})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
errC := fe.Stop()
|
||||
errs := <-errC
|
||||
if len(errs) != 0 {
|
||||
t.Fatal(errs[0])
|
||||
}
|
||||
}
|
|
@ -15,24 +15,23 @@ const (
|
|||
announceActionID
|
||||
scrapeActionID
|
||||
errorActionID
|
||||
// action == 4 is the "old" IPv6 action used by opentracker, with a packet
|
||||
// format specified at
|
||||
// https://web.archive.org/web/20170503181830/http://opentracker.blog.h3q.com/2007/12/28/the-ipv6-situation/
|
||||
announceV6ActionID
|
||||
)
|
||||
|
||||
// Option-Types as described in BEP 41 and BEP 45.
|
||||
const (
|
||||
optionEndOfOptions byte = 0x0
|
||||
optionNOP = 0x1
|
||||
optionURLData = 0x2
|
||||
optionNOP byte = 0x1
|
||||
optionURLData byte = 0x2
|
||||
)
|
||||
|
||||
var (
|
||||
// initialConnectionID is the magic initial connection ID specified by BEP 15.
|
||||
initialConnectionID = []byte{0, 0, 0x04, 0x17, 0x27, 0x10, 0x19, 0x80}
|
||||
|
||||
// emptyIPs are the value of an IP field that has been left blank.
|
||||
emptyIPv4 = []byte{0, 0, 0, 0}
|
||||
emptyIPv6 = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
|
||||
|
||||
// eventIDs map values described in BEP 15 to Events.
|
||||
eventIDs = []bittorrent.Event{
|
||||
bittorrent.None,
|
||||
|
@ -49,16 +48,31 @@ var (
|
|||
errUnknownOptionType = bittorrent.ClientError("unknown option type")
|
||||
)
|
||||
|
||||
// ParseOptions is the configuration used to parse an Announce Request.
|
||||
//
|
||||
// If AllowIPSpoofing is true, IPs provided via params will be used.
|
||||
type ParseOptions struct {
|
||||
AllowIPSpoofing bool `yaml:"allow_ip_spoofing"`
|
||||
MaxNumWant uint32 `yaml:"max_numwant"`
|
||||
DefaultNumWant uint32 `yaml:"default_numwant"`
|
||||
MaxScrapeInfoHashes uint32 `yaml:"max_scrape_infohashes"`
|
||||
}
|
||||
|
||||
// Default parser config constants.
|
||||
const (
|
||||
defaultMaxNumWant = 100
|
||||
defaultDefaultNumWant = 50
|
||||
defaultMaxScrapeInfoHashes = 50
|
||||
)
|
||||
|
||||
// ParseAnnounce parses an AnnounceRequest from a UDP request.
|
||||
//
|
||||
// If allowIPSpoofing is true, IPs provided via params will be used.
|
||||
//
|
||||
// If v6 is true the announce will be parsed as an IPv6 announce "the
|
||||
// opentracker way", see
|
||||
// http://opentracker.blog.h3q.com/2007/12/28/the-ipv6-situation/
|
||||
func ParseAnnounce(r Request, allowIPSpoofing, v6 bool) (*bittorrent.AnnounceRequest, error) {
|
||||
// If v6Action is true, the announce is parsed the
|
||||
// "old opentracker way":
|
||||
// https://web.archive.org/web/20170503181830/http://opentracker.blog.h3q.com/2007/12/28/the-ipv6-situation/
|
||||
func ParseAnnounce(r Request, v6Action bool, opts ParseOptions) (*bittorrent.AnnounceRequest, error) {
|
||||
ipEnd := 84 + net.IPv4len
|
||||
if v6 {
|
||||
if v6Action {
|
||||
ipEnd = 84 + net.IPv6len
|
||||
}
|
||||
|
||||
|
@ -78,12 +92,14 @@ func ParseAnnounce(r Request, allowIPSpoofing, v6 bool) (*bittorrent.AnnounceReq
|
|||
}
|
||||
|
||||
ip := r.IP
|
||||
ipProvided := false
|
||||
ipbytes := r.Packet[84:ipEnd]
|
||||
if allowIPSpoofing {
|
||||
if opts.AllowIPSpoofing {
|
||||
// Make sure the bytes are copied to a new slice.
|
||||
copy(ip, net.IP(ipbytes))
|
||||
ipProvided = true
|
||||
}
|
||||
if !allowIPSpoofing && r.IP == nil {
|
||||
if !opts.AllowIPSpoofing && r.IP == nil {
|
||||
// We have no IP address to fallback on.
|
||||
return nil, errMalformedIP
|
||||
}
|
||||
|
@ -96,20 +112,29 @@ func ParseAnnounce(r Request, allowIPSpoofing, v6 bool) (*bittorrent.AnnounceReq
|
|||
return nil, err
|
||||
}
|
||||
|
||||
return &bittorrent.AnnounceRequest{
|
||||
request := &bittorrent.AnnounceRequest{
|
||||
Event: eventIDs[eventID],
|
||||
InfoHash: bittorrent.InfoHashFromBytes(infohash),
|
||||
NumWant: uint32(numWant),
|
||||
NumWant: numWant,
|
||||
Left: left,
|
||||
Downloaded: downloaded,
|
||||
Uploaded: uploaded,
|
||||
IPProvided: ipProvided,
|
||||
NumWantProvided: true,
|
||||
EventProvided: true,
|
||||
Peer: bittorrent.Peer{
|
||||
ID: bittorrent.PeerIDFromBytes(peerID),
|
||||
IP: ip,
|
||||
IP: bittorrent.IP{IP: ip},
|
||||
Port: port,
|
||||
},
|
||||
Params: params,
|
||||
}, nil
|
||||
}
|
||||
|
||||
if err := bittorrent.SanitizeAnnounce(request, opts.MaxNumWant, opts.DefaultNumWant); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return request, nil
|
||||
}
|
||||
|
||||
type buffer struct {
|
||||
|
@ -136,7 +161,7 @@ func handleOptionalParameters(packet []byte) (bittorrent.Params, error) {
|
|||
return bittorrent.ParseURLData("")
|
||||
}
|
||||
|
||||
var buf = newBuffer()
|
||||
buf := newBuffer()
|
||||
defer buf.free()
|
||||
|
||||
for i := 0; i < len(packet); {
|
||||
|
@ -174,7 +199,7 @@ func handleOptionalParameters(packet []byte) (bittorrent.Params, error) {
|
|||
}
|
||||
|
||||
// ParseScrape parses a ScrapeRequest from a UDP request.
|
||||
func ParseScrape(r Request) (*bittorrent.ScrapeRequest, error) {
|
||||
func ParseScrape(r Request, opts ParseOptions) (*bittorrent.ScrapeRequest, error) {
|
||||
// If a scrape isn't at least 36 bytes long, it's malformed.
|
||||
if len(r.Packet) < 36 {
|
||||
return nil, errMalformedPacket
|
||||
|
@ -194,7 +219,11 @@ func ParseScrape(r Request) (*bittorrent.ScrapeRequest, error) {
|
|||
r.Packet = r.Packet[20:]
|
||||
}
|
||||
|
||||
return &bittorrent.ScrapeRequest{
|
||||
InfoHashes: infohashes,
|
||||
}, nil
|
||||
// Sanitize the request.
|
||||
request := &bittorrent.ScrapeRequest{InfoHashes: infohashes}
|
||||
if err := bittorrent.SanitizeScrape(request, opts.MaxScrapeInfoHashes); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return request, nil
|
||||
}
|
||||
|
|
|
@ -1,6 +1,10 @@
|
|||
package udp
|
||||
|
||||
import "testing"
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var table = []struct {
|
||||
data []byte
|
||||
|
@ -45,27 +49,29 @@ var table = []struct {
|
|||
}
|
||||
|
||||
func TestHandleOptionalParameters(t *testing.T) {
|
||||
for _, testCase := range table {
|
||||
params, err := handleOptionalParameters(testCase.data)
|
||||
if err != testCase.err {
|
||||
if testCase.err == nil {
|
||||
t.Fatalf("expected no parsing error for %x but got %s", testCase.data, err)
|
||||
for _, tt := range table {
|
||||
t.Run(fmt.Sprintf("%#v as %#v", tt.data, tt.values), func(t *testing.T) {
|
||||
params, err := handleOptionalParameters(tt.data)
|
||||
if !errors.Is(err, tt.err) {
|
||||
if tt.err == nil {
|
||||
t.Fatalf("expected no parsing error for %x but got %s", tt.data, err)
|
||||
} else {
|
||||
t.Fatalf("expected parsing error for %x", testCase.data)
|
||||
t.Fatalf("expected parsing error for %x", tt.data)
|
||||
}
|
||||
}
|
||||
if testCase.values != nil {
|
||||
if tt.values != nil {
|
||||
if params == nil {
|
||||
t.Fatalf("expected values %v for %x", testCase.values, testCase.data)
|
||||
t.Fatalf("expected values %v for %x", tt.values, tt.data)
|
||||
} else {
|
||||
for key, want := range testCase.values {
|
||||
for key, want := range tt.values {
|
||||
if got, ok := params.String(key); !ok {
|
||||
t.Fatalf("params missing entry %s for data %x", key, testCase.data)
|
||||
t.Fatalf("params missing entry %s for data %x", key, tt.data)
|
||||
} else if got != want {
|
||||
t.Fatalf("expected param %s=%s, but was %s for data %x", key, want, got, testCase.data)
|
||||
t.Fatalf("expected param %s=%s, but was %s for data %x", key, want, got, tt.data)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
50
frontend/udp/prometheus.go
Normal file
50
frontend/udp/prometheus.go
Normal file
|
@ -0,0 +1,50 @@
|
|||
package udp
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
"github.com/chihaya/chihaya/bittorrent"
|
||||
)
|
||||
|
||||
func init() {
|
||||
prometheus.MustRegister(promResponseDurationMilliseconds)
|
||||
}
|
||||
|
||||
var promResponseDurationMilliseconds = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "chihaya_udp_response_duration_milliseconds",
|
||||
Help: "The duration of time it takes to receive and write a response to an API request",
|
||||
Buckets: prometheus.ExponentialBuckets(9.375, 2, 10),
|
||||
},
|
||||
[]string{"action", "address_family", "error"},
|
||||
)
|
||||
|
||||
// recordResponseDuration records the duration of time to respond to a UDP
|
||||
// Request in milliseconds.
|
||||
func recordResponseDuration(action string, af *bittorrent.AddressFamily, err error, duration time.Duration) {
|
||||
var errString string
|
||||
if err != nil {
|
||||
var clientErr bittorrent.ClientError
|
||||
if errors.As(err, &clientErr) {
|
||||
errString = clientErr.Error()
|
||||
} else {
|
||||
errString = "internal error"
|
||||
}
|
||||
}
|
||||
|
||||
var afString string
|
||||
if af == nil {
|
||||
afString = "Unknown"
|
||||
} else if *af == bittorrent.IPv4 {
|
||||
afString = "IPv4"
|
||||
} else if *af == bittorrent.IPv6 {
|
||||
afString = "IPv6"
|
||||
}
|
||||
|
||||
promResponseDurationMilliseconds.
|
||||
WithLabelValues(action, afString, errString).
|
||||
Observe(float64(duration.Nanoseconds()) / float64(time.Millisecond))
|
||||
}
|
|
@ -2,6 +2,7 @@ package udp
|
|||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"time"
|
||||
|
@ -12,45 +13,47 @@ import (
|
|||
// WriteError writes the failure reason as a null-terminated string.
|
||||
func WriteError(w io.Writer, txID []byte, err error) {
|
||||
// If the client wasn't at fault, acknowledge it.
|
||||
if _, ok := err.(bittorrent.ClientError); !ok {
|
||||
err = fmt.Errorf("internal error occurred: %s", err.Error())
|
||||
var clientErr bittorrent.ClientError
|
||||
if !errors.As(err, &clientErr) {
|
||||
err = fmt.Errorf("internal error occurred: %w", err)
|
||||
}
|
||||
|
||||
buf := newBuffer()
|
||||
writeHeader(buf, txID, errorActionID)
|
||||
buf.WriteString(err.Error())
|
||||
buf.WriteRune('\000')
|
||||
w.Write(buf.Bytes())
|
||||
_, _ = w.Write(buf.Bytes())
|
||||
buf.free()
|
||||
}
|
||||
|
||||
// WriteAnnounce encodes an announce response according to BEP 15.
|
||||
// The peers returned will be resp.IPv6Peers or resp.IPv4Peers, depending on
|
||||
// whether v6 is set. The action ID will be 4, according to
|
||||
// http://opentracker.blog.h3q.com/2007/12/28/the-ipv6-situation/.
|
||||
func WriteAnnounce(w io.Writer, txID []byte, resp *bittorrent.AnnounceResponse, v6 bool) {
|
||||
// whether v6Peers is set.
|
||||
// If v6Action is set, the action will be 4, according to
|
||||
// https://web.archive.org/web/20170503181830/http://opentracker.blog.h3q.com/2007/12/28/the-ipv6-situation/
|
||||
func WriteAnnounce(w io.Writer, txID []byte, resp *bittorrent.AnnounceResponse, v6Action, v6Peers bool) {
|
||||
buf := newBuffer()
|
||||
|
||||
if v6 {
|
||||
if v6Action {
|
||||
writeHeader(buf, txID, announceV6ActionID)
|
||||
} else {
|
||||
writeHeader(buf, txID, announceActionID)
|
||||
}
|
||||
binary.Write(buf, binary.BigEndian, uint32(resp.Interval/time.Second))
|
||||
binary.Write(buf, binary.BigEndian, resp.Incomplete)
|
||||
binary.Write(buf, binary.BigEndian, resp.Complete)
|
||||
_ = binary.Write(buf, binary.BigEndian, uint32(resp.Interval/time.Second))
|
||||
_ = binary.Write(buf, binary.BigEndian, resp.Incomplete)
|
||||
_ = binary.Write(buf, binary.BigEndian, resp.Complete)
|
||||
|
||||
peers := resp.IPv4Peers
|
||||
if v6 {
|
||||
if v6Peers {
|
||||
peers = resp.IPv6Peers
|
||||
}
|
||||
|
||||
for _, peer := range peers {
|
||||
buf.Write(peer.IP)
|
||||
binary.Write(buf, binary.BigEndian, peer.Port)
|
||||
buf.Write(peer.IP.IP)
|
||||
_ = binary.Write(buf, binary.BigEndian, peer.Port)
|
||||
}
|
||||
|
||||
w.Write(buf.Bytes())
|
||||
_, _ = w.Write(buf.Bytes())
|
||||
buf.free()
|
||||
}
|
||||
|
||||
|
@ -61,12 +64,12 @@ func WriteScrape(w io.Writer, txID []byte, resp *bittorrent.ScrapeResponse) {
|
|||
writeHeader(buf, txID, scrapeActionID)
|
||||
|
||||
for _, scrape := range resp.Files {
|
||||
binary.Write(buf, binary.BigEndian, scrape.Complete)
|
||||
binary.Write(buf, binary.BigEndian, scrape.Snatches)
|
||||
binary.Write(buf, binary.BigEndian, scrape.Incomplete)
|
||||
_ = binary.Write(buf, binary.BigEndian, scrape.Complete)
|
||||
_ = binary.Write(buf, binary.BigEndian, scrape.Snatches)
|
||||
_ = binary.Write(buf, binary.BigEndian, scrape.Incomplete)
|
||||
}
|
||||
|
||||
w.Write(buf.Bytes())
|
||||
_, _ = w.Write(buf.Bytes())
|
||||
buf.free()
|
||||
}
|
||||
|
||||
|
@ -77,13 +80,13 @@ func WriteConnectionID(w io.Writer, txID, connID []byte) {
|
|||
writeHeader(buf, txID, connectActionID)
|
||||
buf.Write(connID)
|
||||
|
||||
w.Write(buf.Bytes())
|
||||
_, _ = w.Write(buf.Bytes())
|
||||
buf.free()
|
||||
}
|
||||
|
||||
// writeHeader writes the action and transaction ID to the provided response
|
||||
// buffer.
|
||||
func writeHeader(w io.Writer, txID []byte, action uint32) {
|
||||
binary.Write(w, binary.BigEndian, action)
|
||||
w.Write(txID)
|
||||
_ = binary.Write(w, binary.BigEndian, action)
|
||||
_, _ = w.Write(txID)
|
||||
}
|
||||
|
|
70
glide.lock
generated
70
glide.lock
generated
|
@ -1,70 +0,0 @@
|
|||
hash: fe839da75efcf365317b1b5eb04bfa15cd1db10265f4947b8aff78932bf4622e
|
||||
updated: 2016-09-05T18:13:39.020799284-04:00
|
||||
imports:
|
||||
- name: github.com/beorn7/perks
|
||||
version: 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
|
||||
subpackages:
|
||||
- quantile
|
||||
- name: github.com/golang/protobuf
|
||||
version: 1f49d83d9aa00e6ce4fc8258c71cc7786aec968a
|
||||
subpackages:
|
||||
- proto
|
||||
- name: github.com/inconshreveable/mousetrap
|
||||
version: 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
|
||||
- name: github.com/julienschmidt/httprouter
|
||||
version: 8c199fb6259ffc1af525cc3ad52ee60ba8359669
|
||||
- name: github.com/matttproud/golang_protobuf_extensions
|
||||
version: c12348ce28de40eed0136aa2b644d0ee0650e56c
|
||||
subpackages:
|
||||
- pbutil
|
||||
- name: github.com/mendsley/gojwk
|
||||
version: 4d5ec6e58103388d6cb0d7d72bc72649be4f0504
|
||||
- name: github.com/prometheus/client_golang
|
||||
version: c5b7fccd204277076155f10851dad72b76a49317
|
||||
subpackages:
|
||||
- prometheus
|
||||
- name: github.com/prometheus/client_model
|
||||
version: fa8ad6fec33561be4280a8f0514318c79d7f6cb6
|
||||
subpackages:
|
||||
- go
|
||||
- name: github.com/prometheus/common
|
||||
version: 616e90af75cc300730196d04f3676f838d70414f
|
||||
subpackages:
|
||||
- expfmt
|
||||
- internal/bitbucket.org/ww/goautoneg
|
||||
- model
|
||||
- name: github.com/prometheus/procfs
|
||||
version: abf152e5f3e97f2fafac028d2cc06c1feb87ffa5
|
||||
- name: github.com/SermoDigital/jose
|
||||
version: 389fea327ef076853db8fae03a0f38e30e6092ab
|
||||
subpackages:
|
||||
- crypto
|
||||
- jws
|
||||
- jwt
|
||||
- name: github.com/Sirupsen/logrus
|
||||
version: 4b6ea7319e214d98c938f12692336f7ca9348d6b
|
||||
- name: github.com/spf13/cobra
|
||||
version: 9c28e4bbd74e5c3ed7aacbc552b2cab7cfdfe744
|
||||
- name: github.com/spf13/pflag
|
||||
version: 103ce5cd2042f2fe629c1957abb64ab3e7f50235
|
||||
- name: github.com/tylerb/graceful
|
||||
version: 50a48b6e73fcc75b45e22c05b79629a67c79e938
|
||||
- name: golang.org/x/sys
|
||||
version: a646d33e2ee3172a661fc09bca23bb4889a41bc8
|
||||
subpackages:
|
||||
- unix
|
||||
- name: gopkg.in/yaml.v2
|
||||
version: e4d366fc3c7938e2958e662b4258c7a89e1f0e3e
|
||||
testImports:
|
||||
- name: github.com/davecgh/go-spew
|
||||
version: 6cf5744a041a0022271cefed95ba843f6d87fd51
|
||||
subpackages:
|
||||
- spew
|
||||
- name: github.com/pmezard/go-difflib
|
||||
version: 792786c7400a136282c1664665ae0a8db921c6c2
|
||||
subpackages:
|
||||
- difflib
|
||||
- name: github.com/stretchr/testify
|
||||
version: f390dcf405f7b83c997eac1b06768bb9f44dec18
|
||||
subpackages:
|
||||
- assert
|
26
glide.yaml
26
glide.yaml
|
@ -1,26 +0,0 @@
|
|||
package: github.com/chihaya/chihaya
|
||||
import:
|
||||
- package: github.com/SermoDigital/jose
|
||||
version: ~1.0.0
|
||||
subpackages:
|
||||
- crypto
|
||||
- jws
|
||||
- jwt
|
||||
- package: github.com/Sirupsen/logrus
|
||||
version: ~0.10.0
|
||||
- package: github.com/julienschmidt/httprouter
|
||||
version: ~1.1.0
|
||||
- package: github.com/mendsley/gojwk
|
||||
- package: github.com/prometheus/client_golang
|
||||
version: ~0.8.0
|
||||
subpackages:
|
||||
- prometheus
|
||||
- package: github.com/spf13/cobra
|
||||
- package: github.com/tylerb/graceful
|
||||
version: ~1.2.13
|
||||
- package: gopkg.in/yaml.v2
|
||||
testImport:
|
||||
- package: github.com/stretchr/testify
|
||||
version: ~1.1.3
|
||||
subpackages:
|
||||
- assert
|
28
go.mod
Normal file
28
go.mod
Normal file
|
@ -0,0 +1,28 @@
|
|||
module github.com/chihaya/chihaya
|
||||
|
||||
go 1.16
|
||||
|
||||
require (
|
||||
github.com/SermoDigital/jose v0.9.2-0.20180104203859-803625baeddc
|
||||
github.com/alicebob/gopher-json v0.0.0-20180125190556-5a6b3ba71ee6 // indirect
|
||||
github.com/alicebob/miniredis v2.5.0+incompatible
|
||||
github.com/anacrolix/dht/v2 v2.15.1 // indirect
|
||||
github.com/anacrolix/missinggo/v2 v2.5.3 // indirect
|
||||
github.com/anacrolix/torrent v1.40.0
|
||||
github.com/go-redsync/redsync/v4 v4.5.0
|
||||
github.com/gomodule/redigo v1.8.8
|
||||
github.com/julienschmidt/httprouter v1.3.0
|
||||
github.com/klauspost/cpuid/v2 v2.0.9 // indirect
|
||||
github.com/mendsley/gojwk v0.0.0-20141217222730-4d5ec6e58103
|
||||
github.com/minio/sha256-simd v1.0.0
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/prometheus/client_golang v1.11.0
|
||||
github.com/prometheus/common v0.32.1 // indirect
|
||||
github.com/prometheus/procfs v0.7.3 // indirect
|
||||
github.com/sirupsen/logrus v1.8.1
|
||||
github.com/spf13/cobra v1.3.0
|
||||
github.com/stretchr/testify v1.7.0
|
||||
github.com/yuin/gopher-lua v0.0.0-20191220021717-ab39c6098bdb // indirect
|
||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
)
|
|
@ -5,11 +5,35 @@ package clientapproval
|
|||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/chihaya/chihaya/bittorrent"
|
||||
"github.com/chihaya/chihaya/middleware"
|
||||
)
|
||||
|
||||
// Name is the name by which this middleware is registered with Chihaya.
|
||||
const Name = "client approval"
|
||||
|
||||
func init() {
|
||||
middleware.RegisterDriver(Name, driver{})
|
||||
}
|
||||
|
||||
var _ middleware.Driver = driver{}
|
||||
|
||||
type driver struct{}
|
||||
|
||||
func (d driver) NewHook(optionBytes []byte) (middleware.Hook, error) {
|
||||
var cfg Config
|
||||
err := yaml.Unmarshal(optionBytes, &cfg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid options for middleware %s: %w", Name, err)
|
||||
}
|
||||
|
||||
return NewHook(cfg)
|
||||
}
|
||||
|
||||
// ErrClientUnapproved is the error returned when a client's PeerID is invalid.
|
||||
var ErrClientUnapproved = bittorrent.ClientError("unapproved client")
|
||||
|
||||
|
@ -32,6 +56,10 @@ func NewHook(cfg Config) (middleware.Hook, error) {
|
|||
unapproved: make(map[bittorrent.ClientID]struct{}),
|
||||
}
|
||||
|
||||
if len(cfg.Whitelist) > 0 && len(cfg.Blacklist) > 0 {
|
||||
return nil, fmt.Errorf("using both whitelist and blacklist is invalid")
|
||||
}
|
||||
|
||||
for _, cidString := range cfg.Whitelist {
|
||||
cidBytes := []byte(cidString)
|
||||
if len(cidBytes) != 6 {
|
||||
|
|
75
middleware/clientapproval/clientapproval_test.go
Normal file
75
middleware/clientapproval/clientapproval_test.go
Normal file
|
@ -0,0 +1,75 @@
|
|||
package clientapproval
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/chihaya/chihaya/bittorrent"
|
||||
)
|
||||
|
||||
var cases = []struct {
|
||||
cfg Config
|
||||
peerID string
|
||||
approved bool
|
||||
}{
|
||||
// Client ID is whitelisted
|
||||
{
|
||||
Config{
|
||||
Whitelist: []string{"010203"},
|
||||
},
|
||||
"01020304050607080900",
|
||||
true,
|
||||
},
|
||||
// Client ID is not whitelisted
|
||||
{
|
||||
Config{
|
||||
Whitelist: []string{"010203"},
|
||||
},
|
||||
"10203040506070809000",
|
||||
false,
|
||||
},
|
||||
// Client ID is not blacklisted
|
||||
{
|
||||
Config{
|
||||
Blacklist: []string{"010203"},
|
||||
},
|
||||
"00000000001234567890",
|
||||
true,
|
||||
},
|
||||
// Client ID is blacklisted
|
||||
{
|
||||
Config{
|
||||
Blacklist: []string{"123456"},
|
||||
},
|
||||
"12345678900000000000",
|
||||
false,
|
||||
},
|
||||
}
|
||||
|
||||
func TestHandleAnnounce(t *testing.T) {
|
||||
for _, tt := range cases {
|
||||
t.Run(fmt.Sprintf("testing peerid %s", tt.peerID), func(t *testing.T) {
|
||||
h, err := NewHook(tt.cfg)
|
||||
require.Nil(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
req := &bittorrent.AnnounceRequest{}
|
||||
resp := &bittorrent.AnnounceResponse{}
|
||||
|
||||
peerid := bittorrent.PeerIDFromString(tt.peerID)
|
||||
|
||||
req.Peer.ID = peerid
|
||||
|
||||
nctx, err := h.HandleAnnounce(ctx, req, resp)
|
||||
require.Equal(t, ctx, nctx)
|
||||
if tt.approved == true {
|
||||
require.NotEqual(t, err, ErrClientUnapproved)
|
||||
} else {
|
||||
require.Equal(t, err, ErrClientUnapproved)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
84
middleware/fixedpeer/fixedpeer.go
Normal file
84
middleware/fixedpeer/fixedpeer.go
Normal file
|
@ -0,0 +1,84 @@
|
|||
// Package fixedpeers implements a Hook that
|
||||
//appends a fixed peer to every Announce request
|
||||
package fixedpeers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/chihaya/chihaya/bittorrent"
|
||||
"github.com/chihaya/chihaya/middleware"
|
||||
)
|
||||
|
||||
// Name is the name by which this middleware is registered with Chihaya.
|
||||
const Name = "fixed peers"
|
||||
|
||||
func init() {
|
||||
middleware.RegisterDriver(Name, driver{})
|
||||
}
|
||||
|
||||
var _ middleware.Driver = driver{}
|
||||
|
||||
type driver struct{}
|
||||
|
||||
func (d driver) NewHook(optionBytes []byte) (middleware.Hook, error) {
|
||||
var cfg Config
|
||||
err := yaml.Unmarshal(optionBytes, &cfg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid options for middleware %s: %w", Name, err)
|
||||
}
|
||||
|
||||
return NewHook(cfg)
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
FixedPeers []string `yaml:"fixed_peers"`
|
||||
}
|
||||
|
||||
type hook struct {
|
||||
peers []bittorrent.Peer
|
||||
}
|
||||
|
||||
// NewHook returns an instance of the torrent approval middleware.
|
||||
func NewHook(cfg Config) (middleware.Hook, error) {
|
||||
var peers []bittorrent.Peer
|
||||
for _, peerString := range cfg.FixedPeers {
|
||||
parts := strings.Split(peerString, ":")
|
||||
port, err := strconv.Atoi(parts[1])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ip := net.ParseIP(parts[0]).To4()
|
||||
if ip == nil {
|
||||
panic("Invalid ip4 on fixed_peers")
|
||||
}
|
||||
peers = append(peers,
|
||||
bittorrent.Peer{
|
||||
ID: bittorrent.PeerID{0},
|
||||
Port: uint16(port),
|
||||
IP: bittorrent.IP{IP: ip},
|
||||
})
|
||||
}
|
||||
h := &hook{
|
||||
peers: peers,
|
||||
}
|
||||
return h, nil
|
||||
}
|
||||
|
||||
func (h *hook) HandleAnnounce(ctx context.Context, req *bittorrent.AnnounceRequest, resp *bittorrent.AnnounceResponse) (context.Context, error) {
|
||||
for _, peer := range h.peers {
|
||||
resp.IPv4Peers = append(resp.IPv4Peers, peer)
|
||||
resp.Complete += 1
|
||||
}
|
||||
return ctx, nil
|
||||
}
|
||||
|
||||
func (h *hook) HandleScrape(ctx context.Context, req *bittorrent.ScrapeRequest, resp *bittorrent.ScrapeResponse) (context.Context, error) {
|
||||
// Scrapes don't require any protection.
|
||||
return ctx, nil
|
||||
}
|
47
middleware/fixedpeer/fixedpeer_test.go
Normal file
47
middleware/fixedpeer/fixedpeer_test.go
Normal file
|
@ -0,0 +1,47 @@
|
|||
package fixedpeers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"net"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/chihaya/chihaya/bittorrent"
|
||||
)
|
||||
|
||||
func TestAppendFixedPeer(t *testing.T) {
|
||||
conf := Config{
|
||||
FixedPeers: []string{"8.8.8.8:4040", "1.1.1.1:111"},
|
||||
}
|
||||
h, err := NewHook(conf)
|
||||
require.Nil(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
req := &bittorrent.AnnounceRequest{}
|
||||
resp := &bittorrent.AnnounceResponse{}
|
||||
|
||||
hashbytes, err := hex.DecodeString("3000000000000000000000000000000000000000")
|
||||
require.Nil(t, err)
|
||||
|
||||
hashinfo := bittorrent.InfoHashFromBytes(hashbytes)
|
||||
|
||||
req.InfoHash = hashinfo
|
||||
|
||||
nctx, err := h.HandleAnnounce(ctx, req, resp)
|
||||
require.Equal(t, ctx, nctx)
|
||||
peers := []bittorrent.Peer{
|
||||
bittorrent.Peer{
|
||||
ID: bittorrent.PeerID{0},
|
||||
Port: 4040,
|
||||
IP: bittorrent.IP{net.ParseIP("8.8.8.8"), bittorrent.IPv4},
|
||||
},
|
||||
bittorrent.Peer{
|
||||
ID: bittorrent.PeerID{0},
|
||||
Port: 111,
|
||||
IP: bittorrent.IP{net.ParseIP("1.1.1.1"), bittorrent.IPv4},
|
||||
},
|
||||
}
|
||||
require.Equal(t, peers, resp.IPv4Peers)
|
||||
}
|
|
@ -3,14 +3,15 @@ package middleware
|
|||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net"
|
||||
|
||||
"github.com/chihaya/chihaya/bittorrent"
|
||||
"github.com/chihaya/chihaya/storage"
|
||||
)
|
||||
|
||||
// Hook abstracts the concept of anything that needs to interact with a
|
||||
// BitTorrent client's request and response to a BitTorrent tracker.
|
||||
// PreHooks and PostHooks both use the same interface.
|
||||
//
|
||||
// A Hook can implement stop.Stopper if clean shutdown is required.
|
||||
type Hook interface {
|
||||
HandleAnnounce(context.Context, *bittorrent.AnnounceRequest, *bittorrent.AnnounceResponse) (context.Context, error)
|
||||
HandleScrape(context.Context, *bittorrent.ScrapeRequest, *bittorrent.ScrapeResponse) (context.Context, error)
|
||||
|
@ -34,14 +35,16 @@ func (h *swarmInteractionHook) HandleAnnounce(ctx context.Context, req *bittorre
|
|||
}
|
||||
|
||||
switch {
|
||||
case req.Port < 100:
|
||||
return ctx, nil
|
||||
case req.Event == bittorrent.Stopped:
|
||||
err = h.store.DeleteSeeder(req.InfoHash, req.Peer)
|
||||
if err != nil && err != storage.ErrResourceDoesNotExist {
|
||||
if err != nil && !errors.Is(err, storage.ErrResourceDoesNotExist) {
|
||||
return ctx, err
|
||||
}
|
||||
|
||||
err = h.store.DeleteLeecher(req.InfoHash, req.Peer)
|
||||
if err != nil && err != storage.ErrResourceDoesNotExist {
|
||||
if err != nil && !errors.Is(err, storage.ErrResourceDoesNotExist) {
|
||||
return ctx, err
|
||||
}
|
||||
case req.Event == bittorrent.Completed:
|
||||
|
@ -67,9 +70,6 @@ func (h *swarmInteractionHook) HandleScrape(ctx context.Context, _ *bittorrent.S
|
|||
return ctx, nil
|
||||
}
|
||||
|
||||
// ErrInvalidIP indicates an invalid IP for an Announce.
|
||||
var ErrInvalidIP = errors.New("invalid IP")
|
||||
|
||||
type skipResponseHook struct{}
|
||||
|
||||
// SkipResponseHookKey is a key for the context of an Announce or Scrape to
|
||||
|
@ -97,9 +97,9 @@ func (h *responseHook) HandleAnnounce(ctx context.Context, req *bittorrent.Annou
|
|||
}
|
||||
|
||||
// Add the Scrape data to the response.
|
||||
s := h.store.ScrapeSwarm(req.InfoHash, len(req.IP) == net.IPv6len)
|
||||
resp.Incomplete = s.Incomplete
|
||||
resp.Complete = s.Complete
|
||||
s := h.store.ScrapeSwarm(req.InfoHash, req.IP.AddressFamily)
|
||||
resp.Incomplete += s.Incomplete
|
||||
resp.Complete += s.Complete
|
||||
|
||||
err = h.appendPeers(req, resp)
|
||||
return ctx, err
|
||||
|
@ -108,7 +108,7 @@ func (h *responseHook) HandleAnnounce(ctx context.Context, req *bittorrent.Annou
|
|||
func (h *responseHook) appendPeers(req *bittorrent.AnnounceRequest, resp *bittorrent.AnnounceResponse) error {
|
||||
seeding := req.Left == 0
|
||||
peers, err := h.store.AnnouncePeers(req.InfoHash, seeding, int(req.NumWant), req.Peer)
|
||||
if err != nil && err != storage.ErrResourceDoesNotExist {
|
||||
if err != nil && !errors.Is(err, storage.ErrResourceDoesNotExist) {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -123,13 +123,13 @@ func (h *responseHook) appendPeers(req *bittorrent.AnnounceRequest, resp *bittor
|
|||
peers = append(peers, req.Peer)
|
||||
}
|
||||
|
||||
switch len(req.IP) {
|
||||
case net.IPv4len:
|
||||
resp.IPv4Peers = peers
|
||||
case net.IPv6len:
|
||||
resp.IPv6Peers = peers
|
||||
switch req.IP.AddressFamily {
|
||||
case bittorrent.IPv4:
|
||||
resp.IPv4Peers = append(resp.IPv4Peers, peers...)
|
||||
case bittorrent.IPv6:
|
||||
resp.IPv6Peers = append(resp.IPv6Peers, peers...)
|
||||
default:
|
||||
panic("peer IP is not IPv4 or IPv6 length")
|
||||
panic("attempted to append peer that is neither IPv4 nor IPv6")
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -140,10 +140,8 @@ func (h *responseHook) HandleScrape(ctx context.Context, req *bittorrent.ScrapeR
|
|||
return ctx, nil
|
||||
}
|
||||
|
||||
v6, _ := ctx.Value(ScrapeIsIPv6Key).(bool)
|
||||
|
||||
for _, infoHash := range req.InfoHashes {
|
||||
resp.Files[infoHash] = h.store.ScrapeSwarm(infoHash, v6)
|
||||
resp.Files = append(resp.Files, h.store.ScrapeSwarm(infoHash, req.AddressFamily))
|
||||
}
|
||||
|
||||
return ctx, nil
|
||||
|
|
|
@ -9,23 +9,47 @@ package jwt
|
|||
import (
|
||||
"context"
|
||||
"crypto"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
jc "github.com/SermoDigital/jose/crypto"
|
||||
"github.com/SermoDigital/jose/jws"
|
||||
"github.com/SermoDigital/jose/jwt"
|
||||
log "github.com/Sirupsen/logrus"
|
||||
"github.com/mendsley/gojwk"
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/chihaya/chihaya/bittorrent"
|
||||
"github.com/chihaya/chihaya/middleware"
|
||||
"github.com/chihaya/chihaya/stopper"
|
||||
"github.com/chihaya/chihaya/pkg/log"
|
||||
"github.com/chihaya/chihaya/pkg/stop"
|
||||
)
|
||||
|
||||
// Name is the name by which this middleware is registered with Chihaya.
|
||||
const Name = "jwt"
|
||||
|
||||
func init() {
|
||||
middleware.RegisterDriver(Name, driver{})
|
||||
}
|
||||
|
||||
var _ middleware.Driver = driver{}
|
||||
|
||||
type driver struct{}
|
||||
|
||||
func (d driver) NewHook(optionBytes []byte) (middleware.Hook, error) {
|
||||
var cfg Config
|
||||
err := yaml.Unmarshal(optionBytes, &cfg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid options for middleware %s: %w", Name, err)
|
||||
}
|
||||
|
||||
return NewHook(cfg)
|
||||
}
|
||||
|
||||
var (
|
||||
// ErrMissingJWT is returned when a JWT is missing from a request.
|
||||
ErrMissingJWT = bittorrent.ClientError("unapproved request: missing jwt")
|
||||
|
@ -43,6 +67,16 @@ type Config struct {
|
|||
JWKUpdateInterval time.Duration `yaml:"jwk_set_update_interval"`
|
||||
}
|
||||
|
||||
// LogFields implements log.Fielder for a Config.
|
||||
func (cfg Config) LogFields() log.Fields {
|
||||
return log.Fields{
|
||||
"issuer": cfg.Issuer,
|
||||
"audience": cfg.Audience,
|
||||
"JWKSetURL": cfg.JWKSetURL,
|
||||
"JWKUpdateInterval": cfg.JWKUpdateInterval,
|
||||
}
|
||||
}
|
||||
|
||||
type hook struct {
|
||||
cfg Config
|
||||
publicKeys map[string]crypto.PublicKey
|
||||
|
@ -51,7 +85,7 @@ type hook struct {
|
|||
|
||||
// NewHook returns an instance of the JWT middleware.
|
||||
func NewHook(cfg Config) (middleware.Hook, error) {
|
||||
log.Debugf("creating new JWT middleware with config: %#v", cfg)
|
||||
log.Debug("creating new JWT middleware", cfg)
|
||||
h := &hook{
|
||||
cfg: cfg,
|
||||
publicKeys: map[string]crypto.PublicKey{},
|
||||
|
@ -59,8 +93,7 @@ func NewHook(cfg Config) (middleware.Hook, error) {
|
|||
}
|
||||
|
||||
log.Debug("performing initial fetch of JWKs")
|
||||
err := h.updateKeys()
|
||||
if err != nil {
|
||||
if err := h.updateKeys(); err != nil {
|
||||
return nil, errors.New("failed to fetch initial JWK Set: " + err.Error())
|
||||
}
|
||||
|
||||
|
@ -71,7 +104,7 @@ func NewHook(cfg Config) (middleware.Hook, error) {
|
|||
return
|
||||
case <-time.After(cfg.JWKUpdateInterval):
|
||||
log.Debug("performing fetch of JWKs")
|
||||
h.updateKeys()
|
||||
_ = h.updateKeys()
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
@ -82,7 +115,7 @@ func NewHook(cfg Config) (middleware.Hook, error) {
|
|||
func (h *hook) updateKeys() error {
|
||||
resp, err := http.Get(h.cfg.JWKSetURL)
|
||||
if err != nil {
|
||||
log.Errorln("failed to fetch JWK Set: " + err.Error())
|
||||
log.Error("failed to fetch JWK Set", log.Err(err))
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -90,7 +123,7 @@ func (h *hook) updateKeys() error {
|
|||
err = json.NewDecoder(resp.Body).Decode(&parsedJWKs)
|
||||
if err != nil {
|
||||
resp.Body.Close()
|
||||
log.Errorln("failed to decode JWK JSON: " + err.Error())
|
||||
log.Error("failed to decode JWK JSON", log.Err(err))
|
||||
return err
|
||||
}
|
||||
resp.Body.Close()
|
||||
|
@ -99,7 +132,7 @@ func (h *hook) updateKeys() error {
|
|||
for _, parsedJWK := range parsedJWKs.Keys {
|
||||
publicKey, err := parsedJWK.DecodePublicKey()
|
||||
if err != nil {
|
||||
log.Errorln("failed to decode JWK into public key: " + err.Error())
|
||||
log.Error("failed to decode JWK into public key", log.Err(err))
|
||||
return err
|
||||
}
|
||||
keys[parsedJWK.Kid] = publicKey
|
||||
|
@ -110,19 +143,19 @@ func (h *hook) updateKeys() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (h *hook) Stop() <-chan error {
|
||||
func (h *hook) Stop() stop.Result {
|
||||
log.Debug("attempting to shutdown JWT middleware")
|
||||
select {
|
||||
case <-h.closing:
|
||||
return stopper.AlreadyStopped
|
||||
return stop.AlreadyStopped
|
||||
default:
|
||||
}
|
||||
c := make(chan error)
|
||||
c := make(stop.Channel)
|
||||
go func() {
|
||||
close(h.closing)
|
||||
close(c)
|
||||
c.Done()
|
||||
}()
|
||||
return c
|
||||
return c.Result()
|
||||
}
|
||||
|
||||
func (h *hook) HandleAnnounce(ctx context.Context, req *bittorrent.AnnounceRequest, resp *bittorrent.AnnounceResponse) (context.Context, error) {
|
||||
|
@ -155,52 +188,64 @@ func validateJWT(ih bittorrent.InfoHash, jwtBytes []byte, cfgIss, cfgAud string,
|
|||
|
||||
claims := parsedJWT.Claims()
|
||||
if iss, ok := claims.Issuer(); !ok || iss != cfgIss {
|
||||
log.Debug("unequal or missing issuer when validating JWT", log.Fields{
|
||||
"exists": ok,
|
||||
"claim": iss,
|
||||
"config": cfgIss,
|
||||
})
|
||||
return jwt.ErrInvalidISSClaim
|
||||
}
|
||||
|
||||
if aud, ok := claims.Audience(); !ok || !validAudience(aud, cfgAud) {
|
||||
if auds, ok := claims.Audience(); !ok || !in(cfgAud, auds) {
|
||||
log.Debug("unequal or missing audience when validating JWT", log.Fields{
|
||||
"exists": ok,
|
||||
"claim": strings.Join(auds, ","),
|
||||
"config": cfgAud,
|
||||
})
|
||||
return jwt.ErrInvalidAUDClaim
|
||||
}
|
||||
|
||||
if ihClaim, ok := claims.Get("infohash").(string); !ok || !validInfoHash(ihClaim, ih) {
|
||||
ihHex := hex.EncodeToString(ih[:])
|
||||
if ihClaim, ok := claims.Get("infohash").(string); !ok || ihClaim != ihHex {
|
||||
log.Debug("unequal or missing infohash when validating JWT", log.Fields{
|
||||
"exists": ok,
|
||||
"claim": ihClaim,
|
||||
"request": ihHex,
|
||||
})
|
||||
return errors.New("claim \"infohash\" is invalid")
|
||||
}
|
||||
|
||||
parsedJWS := parsedJWT.(jws.JWS)
|
||||
kid, ok := parsedJWS.Protected().Get("kid").(string)
|
||||
if !ok {
|
||||
log.Debug("missing kid when validating JWT", log.Fields{
|
||||
"exists": ok,
|
||||
"claim": kid,
|
||||
})
|
||||
return errors.New("invalid kid")
|
||||
}
|
||||
publicKey, ok := publicKeys[kid]
|
||||
if !ok {
|
||||
log.Debug("missing public key forkid when validating JWT", log.Fields{
|
||||
"kid": kid,
|
||||
})
|
||||
return errors.New("signed by unknown kid")
|
||||
}
|
||||
|
||||
return parsedJWS.Verify(publicKey, jc.SigningMethodRS256)
|
||||
}
|
||||
|
||||
func validAudience(aud []string, cfgAud string) bool {
|
||||
for _, a := range aud {
|
||||
if a == cfgAud {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func validInfoHash(claim string, ih bittorrent.InfoHash) bool {
|
||||
if len(claim) == 20 && bittorrent.InfoHashFromString(claim) == ih {
|
||||
return true
|
||||
}
|
||||
|
||||
unescapedClaim, err := url.QueryUnescape(claim)
|
||||
err = parsedJWS.Verify(publicKey, jc.SigningMethodRS256)
|
||||
if err != nil {
|
||||
return false
|
||||
log.Debug("failed to verify signature of JWT", log.Err(err))
|
||||
return err
|
||||
}
|
||||
|
||||
if len(unescapedClaim) == 20 && bittorrent.InfoHashFromString(unescapedClaim) == ih {
|
||||
return nil
|
||||
}
|
||||
|
||||
func in(x string, xs []string) bool {
|
||||
for _, y := range xs {
|
||||
if x == y {
|
||||
return true
|
||||
}
|
||||
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
|
125
middleware/logic.go
Normal file
125
middleware/logic.go
Normal file
|
@ -0,0 +1,125 @@
|
|||
package middleware
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/chihaya/chihaya/bittorrent"
|
||||
"github.com/chihaya/chihaya/frontend"
|
||||
"github.com/chihaya/chihaya/pkg/log"
|
||||
"github.com/chihaya/chihaya/pkg/stop"
|
||||
"github.com/chihaya/chihaya/storage"
|
||||
)
|
||||
|
||||
// ResponseConfig holds the configuration used for the actual response.
|
||||
//
|
||||
// TODO(jzelinskie): Evaluate whether we would like to make this optional.
|
||||
// We can make Chihaya extensible enough that you can program a new response
|
||||
// generator at the cost of making it possible for users to create config that
|
||||
// won't compose a functional tracker.
|
||||
type ResponseConfig struct {
|
||||
AnnounceInterval time.Duration `yaml:"announce_interval"`
|
||||
MinAnnounceInterval time.Duration `yaml:"min_announce_interval"`
|
||||
}
|
||||
|
||||
var _ frontend.TrackerLogic = &Logic{}
|
||||
|
||||
// NewLogic creates a new instance of a TrackerLogic that executes the provided
|
||||
// middleware hooks.
|
||||
func NewLogic(cfg ResponseConfig, peerStore storage.PeerStore, preHooks, postHooks []Hook) *Logic {
|
||||
return &Logic{
|
||||
announceInterval: cfg.AnnounceInterval,
|
||||
minAnnounceInterval: cfg.MinAnnounceInterval,
|
||||
peerStore: peerStore,
|
||||
preHooks: append(preHooks, &responseHook{store: peerStore}),
|
||||
postHooks: append(postHooks, &swarmInteractionHook{store: peerStore}),
|
||||
}
|
||||
}
|
||||
|
||||
// Logic is an implementation of the TrackerLogic that functions by
|
||||
// executing a series of middleware hooks.
|
||||
type Logic struct {
|
||||
announceInterval time.Duration
|
||||
minAnnounceInterval time.Duration
|
||||
peerStore storage.PeerStore
|
||||
preHooks []Hook
|
||||
postHooks []Hook
|
||||
}
|
||||
|
||||
// HandleAnnounce generates a response for an Announce.
|
||||
func (l *Logic) HandleAnnounce(ctx context.Context, req *bittorrent.AnnounceRequest) (_ context.Context, resp *bittorrent.AnnounceResponse, err error) {
|
||||
resp = &bittorrent.AnnounceResponse{
|
||||
Interval: l.announceInterval,
|
||||
MinInterval: l.minAnnounceInterval,
|
||||
Compact: req.Compact,
|
||||
}
|
||||
for _, h := range l.preHooks {
|
||||
if ctx, err = h.HandleAnnounce(ctx, req, resp); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
log.Debug("generated announce response", resp)
|
||||
return ctx, resp, nil
|
||||
}
|
||||
|
||||
// AfterAnnounce does something with the results of an Announce after it has
|
||||
// been completed.
|
||||
func (l *Logic) AfterAnnounce(ctx context.Context, req *bittorrent.AnnounceRequest, resp *bittorrent.AnnounceResponse) {
|
||||
var err error
|
||||
for _, h := range l.postHooks {
|
||||
if ctx, err = h.HandleAnnounce(ctx, req, resp); err != nil {
|
||||
log.Error("post-announce hooks failed", log.Err(err))
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// HandleScrape generates a response for a Scrape.
|
||||
func (l *Logic) HandleScrape(ctx context.Context, req *bittorrent.ScrapeRequest) (_ context.Context, resp *bittorrent.ScrapeResponse, err error) {
|
||||
resp = &bittorrent.ScrapeResponse{
|
||||
Files: make([]bittorrent.Scrape, 0, len(req.InfoHashes)),
|
||||
}
|
||||
for _, h := range l.preHooks {
|
||||
if ctx, err = h.HandleScrape(ctx, req, resp); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
log.Debug("generated scrape response", resp)
|
||||
return ctx, resp, nil
|
||||
}
|
||||
|
||||
// AfterScrape does something with the results of a Scrape after it has been
|
||||
// completed.
|
||||
func (l *Logic) AfterScrape(ctx context.Context, req *bittorrent.ScrapeRequest, resp *bittorrent.ScrapeResponse) {
|
||||
var err error
|
||||
for _, h := range l.postHooks {
|
||||
if ctx, err = h.HandleScrape(ctx, req, resp); err != nil {
|
||||
log.Error("post-scrape hooks failed", log.Err(err))
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Stop stops the Logic.
|
||||
//
|
||||
// This stops any hooks that implement stop.Stopper.
|
||||
func (l *Logic) Stop() stop.Result {
|
||||
stopGroup := stop.NewGroup()
|
||||
for _, hook := range l.preHooks {
|
||||
stoppable, ok := hook.(stop.Stopper)
|
||||
if ok {
|
||||
stopGroup.Add(stoppable)
|
||||
}
|
||||
}
|
||||
|
||||
for _, hook := range l.postHooks {
|
||||
stoppable, ok := hook.(stop.Stopper)
|
||||
if ok {
|
||||
stopGroup.Add(stoppable)
|
||||
}
|
||||
}
|
||||
|
||||
return stopGroup.Stop()
|
||||
}
|
83
middleware/logic_test.go
Normal file
83
middleware/logic_test.go
Normal file
|
@ -0,0 +1,83 @@
|
|||
package middleware
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/chihaya/chihaya/bittorrent"
|
||||
)
|
||||
|
||||
// nopHook is a Hook to measure the overhead of a no-operation Hook through
|
||||
// benchmarks.
|
||||
type nopHook struct{}
|
||||
|
||||
func (h *nopHook) HandleAnnounce(ctx context.Context, req *bittorrent.AnnounceRequest, resp *bittorrent.AnnounceResponse) (context.Context, error) {
|
||||
return ctx, nil
|
||||
}
|
||||
|
||||
func (h *nopHook) HandleScrape(ctx context.Context, req *bittorrent.ScrapeRequest, resp *bittorrent.ScrapeResponse) (context.Context, error) {
|
||||
return ctx, nil
|
||||
}
|
||||
|
||||
type hookList []Hook
|
||||
|
||||
func (hooks hookList) handleAnnounce(ctx context.Context, req *bittorrent.AnnounceRequest) (resp *bittorrent.AnnounceResponse, err error) {
|
||||
resp = &bittorrent.AnnounceResponse{
|
||||
Interval: 60,
|
||||
MinInterval: 60,
|
||||
Compact: true,
|
||||
}
|
||||
|
||||
for _, h := range []Hook(hooks) {
|
||||
if ctx, err = h.HandleAnnounce(ctx, req, resp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func benchHookListV4(b *testing.B, hooks hookList) {
|
||||
req := &bittorrent.AnnounceRequest{Peer: bittorrent.Peer{IP: bittorrent.IP{IP: net.ParseIP("1.2.3.4"), AddressFamily: bittorrent.IPv4}}}
|
||||
benchHookList(b, hooks, req)
|
||||
}
|
||||
|
||||
func benchHookListV6(b *testing.B, hooks hookList) {
|
||||
req := &bittorrent.AnnounceRequest{Peer: bittorrent.Peer{IP: bittorrent.IP{IP: net.ParseIP("fc00::0001"), AddressFamily: bittorrent.IPv6}}}
|
||||
benchHookList(b, hooks, req)
|
||||
}
|
||||
|
||||
func benchHookList(b *testing.B, hooks hookList, req *bittorrent.AnnounceRequest) {
|
||||
ctx := context.Background()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
resp, err := hooks.handleAnnounce(ctx, req)
|
||||
require.Nil(b, err)
|
||||
require.NotNil(b, resp)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkHookOverhead(b *testing.B) {
|
||||
b.Run("none-v4", func(b *testing.B) {
|
||||
benchHookListV4(b, hookList{})
|
||||
})
|
||||
|
||||
b.Run("none-v6", func(b *testing.B) {
|
||||
benchHookListV6(b, hookList{})
|
||||
})
|
||||
|
||||
var nopHooks hookList
|
||||
for i := 1; i < 4; i++ {
|
||||
nopHooks = append(nopHooks, &nopHook{})
|
||||
b.Run(fmt.Sprintf("%dnop-v4", i), func(b *testing.B) {
|
||||
benchHookListV4(b, nopHooks)
|
||||
})
|
||||
b.Run(fmt.Sprintf("%dnop-v6", i), func(b *testing.B) {
|
||||
benchHookListV6(b, nopHooks)
|
||||
})
|
||||
}
|
||||
}
|
|
@ -3,117 +3,92 @@
|
|||
package middleware
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
"errors"
|
||||
"sync"
|
||||
|
||||
log "github.com/Sirupsen/logrus"
|
||||
"github.com/chihaya/chihaya/bittorrent"
|
||||
"github.com/chihaya/chihaya/frontend"
|
||||
"github.com/chihaya/chihaya/stopper"
|
||||
"github.com/chihaya/chihaya/storage"
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
// Config holds the configuration common across all middleware.
|
||||
type Config struct {
|
||||
AnnounceInterval time.Duration `yaml:"announce_interval"`
|
||||
}
|
||||
var (
|
||||
driversM sync.RWMutex
|
||||
drivers = make(map[string]Driver)
|
||||
|
||||
var _ frontend.TrackerLogic = &Logic{}
|
||||
// ErrDriverDoesNotExist is the error returned by NewMiddleware when a
|
||||
// middleware driver with that name does not exist.
|
||||
ErrDriverDoesNotExist = errors.New("middleware driver with that name does not exist")
|
||||
)
|
||||
|
||||
// NewLogic creates a new instance of a TrackerLogic that executes the provided
|
||||
// middleware hooks.
|
||||
func NewLogic(cfg Config, peerStore storage.PeerStore, preHooks, postHooks []Hook) *Logic {
|
||||
l := &Logic{
|
||||
announceInterval: cfg.AnnounceInterval,
|
||||
peerStore: peerStore,
|
||||
preHooks: append(preHooks, &responseHook{store: peerStore}),
|
||||
postHooks: append(postHooks, &swarmInteractionHook{store: peerStore}),
|
||||
}
|
||||
|
||||
return l
|
||||
}
|
||||
|
||||
// Logic is an implementation of the TrackerLogic that functions by
|
||||
// executing a series of middleware hooks.
|
||||
type Logic struct {
|
||||
announceInterval time.Duration
|
||||
peerStore storage.PeerStore
|
||||
preHooks []Hook
|
||||
postHooks []Hook
|
||||
}
|
||||
|
||||
// HandleAnnounce generates a response for an Announce.
|
||||
func (l *Logic) HandleAnnounce(ctx context.Context, req *bittorrent.AnnounceRequest) (resp *bittorrent.AnnounceResponse, err error) {
|
||||
resp = &bittorrent.AnnounceResponse{
|
||||
Interval: l.announceInterval,
|
||||
MinInterval: l.announceInterval,
|
||||
Compact: req.Compact,
|
||||
}
|
||||
for _, h := range l.preHooks {
|
||||
if ctx, err = h.HandleAnnounce(ctx, req, resp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// AfterAnnounce does something with the results of an Announce after it has
|
||||
// been completed.
|
||||
func (l *Logic) AfterAnnounce(ctx context.Context, req *bittorrent.AnnounceRequest, resp *bittorrent.AnnounceResponse) {
|
||||
var err error
|
||||
for _, h := range l.postHooks {
|
||||
if ctx, err = h.HandleAnnounce(ctx, req, resp); err != nil {
|
||||
log.Errorln("chihaya: post-announce hooks failed:", err.Error())
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// HandleScrape generates a response for a Scrape.
|
||||
func (l *Logic) HandleScrape(ctx context.Context, req *bittorrent.ScrapeRequest) (resp *bittorrent.ScrapeResponse, err error) {
|
||||
resp = &bittorrent.ScrapeResponse{
|
||||
Files: make(map[bittorrent.InfoHash]bittorrent.Scrape),
|
||||
}
|
||||
for _, h := range l.preHooks {
|
||||
if ctx, err = h.HandleScrape(ctx, req, resp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// AfterScrape does something with the results of a Scrape after it has been
|
||||
// completed.
|
||||
func (l *Logic) AfterScrape(ctx context.Context, req *bittorrent.ScrapeRequest, resp *bittorrent.ScrapeResponse) {
|
||||
var err error
|
||||
for _, h := range l.postHooks {
|
||||
if ctx, err = h.HandleScrape(ctx, req, resp); err != nil {
|
||||
log.Errorln("chihaya: post-scrape hooks failed:", err.Error())
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Stop stops the Logic.
|
||||
// Driver is the interface used to initialize a new type of middleware.
|
||||
//
|
||||
// This stops any hooks that implement stopper.Stopper.
|
||||
func (l *Logic) Stop() []error {
|
||||
stopGroup := stopper.NewStopGroup()
|
||||
for _, hook := range l.preHooks {
|
||||
stoppable, ok := hook.(stopper.Stopper)
|
||||
if ok {
|
||||
stopGroup.Add(stoppable)
|
||||
}
|
||||
}
|
||||
|
||||
for _, hook := range l.postHooks {
|
||||
stoppable, ok := hook.(stopper.Stopper)
|
||||
if ok {
|
||||
stopGroup.Add(stoppable)
|
||||
}
|
||||
}
|
||||
|
||||
return stopGroup.Stop()
|
||||
// The options parameter is YAML encoded bytes that should be unmarshalled into
|
||||
// the hook's custom configuration.
|
||||
type Driver interface {
|
||||
NewHook(options []byte) (Hook, error)
|
||||
}
|
||||
|
||||
// RegisterDriver makes a Driver available by the provided name.
|
||||
//
|
||||
// If called twice with the same name, the name is blank, or if the provided
|
||||
// Driver is nil, this function panics.
|
||||
func RegisterDriver(name string, d Driver) {
|
||||
if name == "" {
|
||||
panic("middleware: could not register a Driver with an empty name")
|
||||
}
|
||||
if d == nil {
|
||||
panic("middleware: could not register a nil Driver")
|
||||
}
|
||||
|
||||
driversM.Lock()
|
||||
defer driversM.Unlock()
|
||||
|
||||
if _, dup := drivers[name]; dup {
|
||||
panic("middleware: RegisterDriver called twice for " + name)
|
||||
}
|
||||
|
||||
drivers[name] = d
|
||||
}
|
||||
|
||||
// New attempts to initialize a new middleware instance from the
|
||||
// list of registered Drivers.
|
||||
//
|
||||
// If a driver does not exist, returns ErrDriverDoesNotExist.
|
||||
func New(name string, optionBytes []byte) (Hook, error) {
|
||||
driversM.RLock()
|
||||
defer driversM.RUnlock()
|
||||
|
||||
var d Driver
|
||||
d, ok := drivers[name]
|
||||
if !ok {
|
||||
return nil, ErrDriverDoesNotExist
|
||||
}
|
||||
|
||||
return d.NewHook(optionBytes)
|
||||
}
|
||||
|
||||
// HookConfig is the generic configuration format used for all registered Hooks.
|
||||
type HookConfig struct {
|
||||
Name string `yaml:"name"`
|
||||
Options map[string]interface{} `yaml:"options"`
|
||||
}
|
||||
|
||||
// HooksFromHookConfigs is a utility function for initializing Hooks in bulk.
|
||||
func HooksFromHookConfigs(cfgs []HookConfig) (hooks []Hook, err error) {
|
||||
for _, cfg := range cfgs {
|
||||
// Marshal the options back into bytes.
|
||||
var optionBytes []byte
|
||||
optionBytes, err = yaml.Marshal(cfg.Options)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var h Hook
|
||||
h, err = New(cfg.Name, optionBytes)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
hooks = append(hooks, h)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
|
17
middleware/pkg/random/entropy.go
Normal file
17
middleware/pkg/random/entropy.go
Normal file
|
@ -0,0 +1,17 @@
|
|||
package random
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
|
||||
"github.com/chihaya/chihaya/bittorrent"
|
||||
)
|
||||
|
||||
// DeriveEntropyFromRequest generates 2*64 bits of pseudo random state from an
|
||||
// AnnounceRequest.
|
||||
//
|
||||
// Calling DeriveEntropyFromRequest multiple times yields the same values.
|
||||
func DeriveEntropyFromRequest(req *bittorrent.AnnounceRequest) (uint64, uint64) {
|
||||
v0 := binary.BigEndian.Uint64(req.InfoHash[:8]) + binary.BigEndian.Uint64(req.InfoHash[8:16])
|
||||
v1 := binary.BigEndian.Uint64(req.Peer.ID[:8]) + binary.BigEndian.Uint64(req.Peer.ID[8:16])
|
||||
return v0, v1
|
||||
}
|
28
middleware/pkg/random/xorshift.go
Normal file
28
middleware/pkg/random/xorshift.go
Normal file
|
@ -0,0 +1,28 @@
|
|||
// Package random implements the XORShift PRNG and a way to derive random state
|
||||
// from an AnnounceRequest.
|
||||
package random
|
||||
|
||||
// GenerateAndAdvance applies XORShift128Plus on s0 and s1, returning
|
||||
// the new states newS0, newS1 and a pseudo-random number v.
|
||||
func GenerateAndAdvance(s0, s1 uint64) (v, newS0, newS1 uint64) {
|
||||
v = s0 + s1
|
||||
newS0 = s1
|
||||
s0 ^= (s0 << 23)
|
||||
newS1 = s0 ^ s1 ^ (s0 >> 18) ^ (s1 >> 5)
|
||||
return
|
||||
}
|
||||
|
||||
// Intn generates an int k that satisfies k >= 0 && k < n.
|
||||
// n must be > 0.
|
||||
// It returns the generated k and the new state of the generator.
|
||||
func Intn(s0, s1 uint64, n int) (int, uint64, uint64) {
|
||||
if n <= 0 {
|
||||
panic("invalid n <= 0")
|
||||
}
|
||||
v, newS0, newS1 := GenerateAndAdvance(s0, s1)
|
||||
k := int(v)
|
||||
if k < 0 {
|
||||
k = -k
|
||||
}
|
||||
return k % n, newS0, newS1
|
||||
}
|
38
middleware/pkg/random/xorshift_test.go
Normal file
38
middleware/pkg/random/xorshift_test.go
Normal file
|
@ -0,0 +1,38 @@
|
|||
package random
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestIntn(t *testing.T) {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
s0, s1 := rand.Uint64(), rand.Uint64()
|
||||
var k int
|
||||
for i := 0; i < 10000; i++ {
|
||||
k, s0, s1 = Intn(s0, s1, 10)
|
||||
require.True(t, k >= 0, "Intn() must be >= 0")
|
||||
require.True(t, k < 10, "Intn(k) must be < k")
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAdvanceXORShift128Plus(b *testing.B) {
|
||||
s0, s1 := rand.Uint64(), rand.Uint64()
|
||||
var v uint64
|
||||
for i := 0; i < b.N; i++ {
|
||||
v, s0, s1 = GenerateAndAdvance(s0, s1)
|
||||
}
|
||||
_, _, _ = v, s0, s1
|
||||
}
|
||||
|
||||
func BenchmarkIntn(b *testing.B) {
|
||||
s0, s1 := rand.Uint64(), rand.Uint64()
|
||||
var v int
|
||||
for i := 0; i < b.N; i++ {
|
||||
v, s0, s1 = Intn(s0, s1, 1000)
|
||||
}
|
||||
_, _, _ = v, s0, s1
|
||||
}
|
109
middleware/torrentapproval/torrentapproval.go
Normal file
109
middleware/torrentapproval/torrentapproval.go
Normal file
|
@ -0,0 +1,109 @@
|
|||
// Package torrentapproval implements a Hook that fails an Announce based on a
|
||||
// whitelist or blacklist of torrent hash.
|
||||
package torrentapproval
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/chihaya/chihaya/bittorrent"
|
||||
"github.com/chihaya/chihaya/middleware"
|
||||
)
|
||||
|
||||
// Name is the name by which this middleware is registered with Chihaya.
|
||||
const Name = "torrent approval"
|
||||
|
||||
func init() {
|
||||
middleware.RegisterDriver(Name, driver{})
|
||||
}
|
||||
|
||||
var _ middleware.Driver = driver{}
|
||||
|
||||
type driver struct{}
|
||||
|
||||
func (d driver) NewHook(optionBytes []byte) (middleware.Hook, error) {
|
||||
var cfg Config
|
||||
err := yaml.Unmarshal(optionBytes, &cfg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid options for middleware %s: %w", Name, err)
|
||||
}
|
||||
|
||||
return NewHook(cfg)
|
||||
}
|
||||
|
||||
// ErrTorrentUnapproved is the error returned when a torrent hash is invalid.
|
||||
var ErrTorrentUnapproved = bittorrent.ClientError("unapproved torrent")
|
||||
|
||||
// Config represents all the values required by this middleware to validate
|
||||
// torrents based on their hash value.
|
||||
type Config struct {
|
||||
Whitelist []string `yaml:"whitelist"`
|
||||
Blacklist []string `yaml:"blacklist"`
|
||||
}
|
||||
|
||||
type hook struct {
|
||||
approved map[bittorrent.InfoHash]struct{}
|
||||
unapproved map[bittorrent.InfoHash]struct{}
|
||||
}
|
||||
|
||||
// NewHook returns an instance of the torrent approval middleware.
|
||||
func NewHook(cfg Config) (middleware.Hook, error) {
|
||||
h := &hook{
|
||||
approved: make(map[bittorrent.InfoHash]struct{}),
|
||||
unapproved: make(map[bittorrent.InfoHash]struct{}),
|
||||
}
|
||||
|
||||
if len(cfg.Whitelist) > 0 && len(cfg.Blacklist) > 0 {
|
||||
return nil, fmt.Errorf("using both whitelist and blacklist is invalid")
|
||||
}
|
||||
|
||||
for _, hashString := range cfg.Whitelist {
|
||||
hashinfo, err := hex.DecodeString(hashString)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("whitelist : invalid hash %s", hashString)
|
||||
}
|
||||
if len(hashinfo) != 20 {
|
||||
return nil, fmt.Errorf("whitelist : hash %s is not 20 byes", hashString)
|
||||
}
|
||||
h.approved[bittorrent.InfoHashFromBytes(hashinfo)] = struct{}{}
|
||||
}
|
||||
|
||||
for _, hashString := range cfg.Blacklist {
|
||||
hashinfo, err := hex.DecodeString(hashString)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("blacklist : invalid hash %s", hashString)
|
||||
}
|
||||
if len(hashinfo) != 20 {
|
||||
return nil, fmt.Errorf("blacklist : hash %s is not 20 byes", hashString)
|
||||
}
|
||||
h.unapproved[bittorrent.InfoHashFromBytes(hashinfo)] = struct{}{}
|
||||
}
|
||||
|
||||
return h, nil
|
||||
}
|
||||
|
||||
func (h *hook) HandleAnnounce(ctx context.Context, req *bittorrent.AnnounceRequest, resp *bittorrent.AnnounceResponse) (context.Context, error) {
|
||||
infohash := req.InfoHash
|
||||
|
||||
if len(h.approved) > 0 {
|
||||
if _, found := h.approved[infohash]; !found {
|
||||
return ctx, ErrTorrentUnapproved
|
||||
}
|
||||
}
|
||||
|
||||
if len(h.unapproved) > 0 {
|
||||
if _, found := h.unapproved[infohash]; found {
|
||||
return ctx, ErrTorrentUnapproved
|
||||
}
|
||||
}
|
||||
|
||||
return ctx, nil
|
||||
}
|
||||
|
||||
func (h *hook) HandleScrape(ctx context.Context, req *bittorrent.ScrapeRequest, resp *bittorrent.ScrapeResponse) (context.Context, error) {
|
||||
// Scrapes don't require any protection.
|
||||
return ctx, nil
|
||||
}
|
79
middleware/torrentapproval/torrentapproval_test.go
Normal file
79
middleware/torrentapproval/torrentapproval_test.go
Normal file
|
@ -0,0 +1,79 @@
|
|||
package torrentapproval
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/chihaya/chihaya/bittorrent"
|
||||
)
|
||||
|
||||
var cases = []struct {
|
||||
cfg Config
|
||||
ih string
|
||||
approved bool
|
||||
}{
|
||||
// Infohash is whitelisted
|
||||
{
|
||||
Config{
|
||||
Whitelist: []string{"3532cf2d327fad8448c075b4cb42c8136964a435"},
|
||||
},
|
||||
"3532cf2d327fad8448c075b4cb42c8136964a435",
|
||||
true,
|
||||
},
|
||||
// Infohash is not whitelisted
|
||||
{
|
||||
Config{
|
||||
Whitelist: []string{"3532cf2d327fad8448c075b4cb42c8136964a435"},
|
||||
},
|
||||
"4532cf2d327fad8448c075b4cb42c8136964a435",
|
||||
false,
|
||||
},
|
||||
// Infohash is not blacklisted
|
||||
{
|
||||
Config{
|
||||
Blacklist: []string{"3532cf2d327fad8448c075b4cb42c8136964a435"},
|
||||
},
|
||||
"4532cf2d327fad8448c075b4cb42c8136964a435",
|
||||
true,
|
||||
},
|
||||
// Infohash is blacklisted
|
||||
{
|
||||
Config{
|
||||
Blacklist: []string{"3532cf2d327fad8448c075b4cb42c8136964a435"},
|
||||
},
|
||||
"3532cf2d327fad8448c075b4cb42c8136964a435",
|
||||
false,
|
||||
},
|
||||
}
|
||||
|
||||
func TestHandleAnnounce(t *testing.T) {
|
||||
for _, tt := range cases {
|
||||
t.Run(fmt.Sprintf("testing hash %s", tt.ih), func(t *testing.T) {
|
||||
h, err := NewHook(tt.cfg)
|
||||
require.Nil(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
req := &bittorrent.AnnounceRequest{}
|
||||
resp := &bittorrent.AnnounceResponse{}
|
||||
|
||||
hashbytes, err := hex.DecodeString(tt.ih)
|
||||
require.Nil(t, err)
|
||||
|
||||
hashinfo := bittorrent.InfoHashFromBytes(hashbytes)
|
||||
|
||||
req.InfoHash = hashinfo
|
||||
|
||||
nctx, err := h.HandleAnnounce(ctx, req, resp)
|
||||
require.Equal(t, ctx, nctx)
|
||||
if tt.approved == true {
|
||||
require.NotEqual(t, err, ErrTorrentUnapproved)
|
||||
} else {
|
||||
require.Equal(t, err, ErrTorrentUnapproved)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
115
middleware/varinterval/varinterval.go
Normal file
115
middleware/varinterval/varinterval.go
Normal file
|
@ -0,0 +1,115 @@
|
|||
package varinterval
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/chihaya/chihaya/bittorrent"
|
||||
"github.com/chihaya/chihaya/middleware"
|
||||
"github.com/chihaya/chihaya/middleware/pkg/random"
|
||||
)
|
||||
|
||||
// Name is the name by which this middleware is registered with Chihaya.
|
||||
const Name = "interval variation"
|
||||
|
||||
func init() {
|
||||
middleware.RegisterDriver(Name, driver{})
|
||||
}
|
||||
|
||||
var _ middleware.Driver = driver{}
|
||||
|
||||
type driver struct{}
|
||||
|
||||
func (d driver) NewHook(optionBytes []byte) (middleware.Hook, error) {
|
||||
var cfg Config
|
||||
err := yaml.Unmarshal(optionBytes, &cfg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid options for middleware %s: %w", Name, err)
|
||||
}
|
||||
|
||||
return NewHook(cfg)
|
||||
}
|
||||
|
||||
// ErrInvalidModifyResponseProbability is returned for a config with an invalid
|
||||
// ModifyResponseProbability.
|
||||
var ErrInvalidModifyResponseProbability = errors.New("invalid modify_response_probability")
|
||||
|
||||
// ErrInvalidMaxIncreaseDelta is returned for a config with an invalid
|
||||
// MaxIncreaseDelta.
|
||||
var ErrInvalidMaxIncreaseDelta = errors.New("invalid max_increase_delta")
|
||||
|
||||
// Config represents the configuration for the varinterval middleware.
|
||||
type Config struct {
|
||||
// ModifyResponseProbability is the probability by which a response will
|
||||
// be modified.
|
||||
ModifyResponseProbability float32 `yaml:"modify_response_probability"`
|
||||
|
||||
// MaxIncreaseDelta is the amount of seconds that will be added at most.
|
||||
MaxIncreaseDelta int `yaml:"max_increase_delta"`
|
||||
|
||||
// ModifyMinInterval specifies whether min_interval should be increased
|
||||
// as well.
|
||||
ModifyMinInterval bool `yaml:"modify_min_interval"`
|
||||
}
|
||||
|
||||
func checkConfig(cfg Config) error {
|
||||
if cfg.ModifyResponseProbability <= 0 || cfg.ModifyResponseProbability > 1 {
|
||||
return ErrInvalidModifyResponseProbability
|
||||
}
|
||||
|
||||
if cfg.MaxIncreaseDelta <= 0 {
|
||||
return ErrInvalidMaxIncreaseDelta
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type hook struct {
|
||||
cfg Config
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
// NewHook creates a middleware to randomly modify the announce interval from
|
||||
// the given config.
|
||||
func NewHook(cfg Config) (middleware.Hook, error) {
|
||||
if err := checkConfig(cfg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
h := &hook{
|
||||
cfg: cfg,
|
||||
}
|
||||
return h, nil
|
||||
}
|
||||
|
||||
func (h *hook) HandleAnnounce(ctx context.Context, req *bittorrent.AnnounceRequest, resp *bittorrent.AnnounceResponse) (context.Context, error) {
|
||||
s0, s1 := random.DeriveEntropyFromRequest(req)
|
||||
// Generate a probability p < 1.0.
|
||||
v, s0, s1 := random.Intn(s0, s1, 1<<24)
|
||||
p := float32(v) / (1 << 24)
|
||||
if h.cfg.ModifyResponseProbability == 1 || p < h.cfg.ModifyResponseProbability {
|
||||
// Generate the increase delta.
|
||||
v, _, _ = random.Intn(s0, s1, h.cfg.MaxIncreaseDelta)
|
||||
deltaDuration := time.Duration(v+1) * time.Second
|
||||
|
||||
resp.Interval += deltaDuration
|
||||
|
||||
if h.cfg.ModifyMinInterval {
|
||||
resp.MinInterval += deltaDuration
|
||||
}
|
||||
|
||||
return ctx, nil
|
||||
}
|
||||
|
||||
return ctx, nil
|
||||
}
|
||||
|
||||
func (h *hook) HandleScrape(ctx context.Context, req *bittorrent.ScrapeRequest, resp *bittorrent.ScrapeResponse) (context.Context, error) {
|
||||
// Scrapes are not altered.
|
||||
return ctx, nil
|
||||
}
|
61
middleware/varinterval/varinterval_test.go
Normal file
61
middleware/varinterval/varinterval_test.go
Normal file
|
@ -0,0 +1,61 @@
|
|||
package varinterval
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/chihaya/chihaya/bittorrent"
|
||||
)
|
||||
|
||||
var configTests = []struct {
|
||||
cfg Config
|
||||
expected error
|
||||
}{
|
||||
{
|
||||
cfg: Config{0.5, 60, true},
|
||||
expected: nil,
|
||||
}, {
|
||||
cfg: Config{1.0, 60, true},
|
||||
expected: nil,
|
||||
}, {
|
||||
cfg: Config{0.0, 60, true},
|
||||
expected: ErrInvalidModifyResponseProbability,
|
||||
}, {
|
||||
cfg: Config{1.1, 60, true},
|
||||
expected: ErrInvalidModifyResponseProbability,
|
||||
}, {
|
||||
cfg: Config{0.5, 0, true},
|
||||
expected: ErrInvalidMaxIncreaseDelta,
|
||||
}, {
|
||||
cfg: Config{0.5, -10, true},
|
||||
expected: ErrInvalidMaxIncreaseDelta,
|
||||
},
|
||||
}
|
||||
|
||||
func TestCheckConfig(t *testing.T) {
|
||||
for _, tt := range configTests {
|
||||
t.Run(fmt.Sprintf("%#v", tt.cfg), func(t *testing.T) {
|
||||
got := checkConfig(tt.cfg)
|
||||
require.Equal(t, tt.expected, got, "", tt.cfg)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleAnnounce(t *testing.T) {
|
||||
h, err := NewHook(Config{1.0, 10, true})
|
||||
require.Nil(t, err)
|
||||
require.NotNil(t, h)
|
||||
|
||||
ctx := context.Background()
|
||||
req := &bittorrent.AnnounceRequest{}
|
||||
resp := &bittorrent.AnnounceResponse{}
|
||||
|
||||
nCtx, err := h.HandleAnnounce(ctx, req, resp)
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, ctx, nCtx)
|
||||
require.True(t, resp.Interval > 0, "interval should have been increased")
|
||||
require.True(t, resp.MinInterval > 0, "min_interval should have been increased")
|
||||
}
|
134
pkg/log/log.go
Normal file
134
pkg/log/log.go
Normal file
|
@ -0,0 +1,134 @@
|
|||
// Package log adds a thin wrapper around logrus to improve non-debug logging
|
||||
// performance.
|
||||
package log
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
l = logrus.New()
|
||||
debug = false
|
||||
)
|
||||
|
||||
// SetDebug controls debug logging.
|
||||
func SetDebug(to bool) {
|
||||
debug = to
|
||||
l.Level = logrus.DebugLevel
|
||||
}
|
||||
|
||||
// SetFormatter sets the formatter.
|
||||
func SetFormatter(to logrus.Formatter) {
|
||||
l.Formatter = to
|
||||
}
|
||||
|
||||
// SetOutput sets the output.
|
||||
func SetOutput(to io.Writer) {
|
||||
l.Out = to
|
||||
}
|
||||
|
||||
// Fields is a map of logging fields.
|
||||
type Fields map[string]interface{}
|
||||
|
||||
// LogFields implements Fielder for Fields.
|
||||
func (f Fields) LogFields() Fields {
|
||||
return f
|
||||
}
|
||||
|
||||
// A Fielder provides Fields via the LogFields method.
|
||||
type Fielder interface {
|
||||
LogFields() Fields
|
||||
}
|
||||
|
||||
// err is a wrapper around an error.
|
||||
type err struct {
|
||||
e error
|
||||
}
|
||||
|
||||
// LogFields provides Fields for logging.
|
||||
func (e err) LogFields() Fields {
|
||||
return Fields{
|
||||
"error": e.e.Error(),
|
||||
"type": fmt.Sprintf("%T", e.e),
|
||||
}
|
||||
}
|
||||
|
||||
// Err is a wrapper around errors that implements Fielder.
|
||||
func Err(e error) Fielder {
|
||||
return err{e}
|
||||
}
|
||||
|
||||
// mergeFielders merges the Fields of multiple Fielders.
|
||||
// Fields from the first Fielder will be used unchanged, Fields from subsequent
|
||||
// Fielders will be prefixed with "%d.", starting from 1.
|
||||
//
|
||||
// must be called with len(fielders) > 0
|
||||
func mergeFielders(fielders ...Fielder) logrus.Fields {
|
||||
if fielders[0] == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
fields := fielders[0].LogFields()
|
||||
for i := 1; i < len(fielders); i++ {
|
||||
if fielders[i] == nil {
|
||||
continue
|
||||
}
|
||||
prefix := fmt.Sprint(i, ".")
|
||||
ff := fielders[i].LogFields()
|
||||
for k, v := range ff {
|
||||
fields[prefix+k] = v
|
||||
}
|
||||
}
|
||||
|
||||
return logrus.Fields(fields)
|
||||
}
|
||||
|
||||
// Debug logs at the debug level if debug logging is enabled.
|
||||
func Debug(v interface{}, fielders ...Fielder) {
|
||||
if debug {
|
||||
if len(fielders) != 0 {
|
||||
l.WithFields(mergeFielders(fielders...)).Debug(v)
|
||||
} else {
|
||||
l.Debug(v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Info logs at the info level.
|
||||
func Info(v interface{}, fielders ...Fielder) {
|
||||
if len(fielders) != 0 {
|
||||
l.WithFields(mergeFielders(fielders...)).Info(v)
|
||||
} else {
|
||||
l.Info(v)
|
||||
}
|
||||
}
|
||||
|
||||
// Warn logs at the warning level.
|
||||
func Warn(v interface{}, fielders ...Fielder) {
|
||||
if len(fielders) != 0 {
|
||||
l.WithFields(mergeFielders(fielders...)).Warn(v)
|
||||
} else {
|
||||
l.Warn(v)
|
||||
}
|
||||
}
|
||||
|
||||
// Error logs at the error level.
|
||||
func Error(v interface{}, fielders ...Fielder) {
|
||||
if len(fielders) != 0 {
|
||||
l.WithFields(mergeFielders(fielders...)).Error(v)
|
||||
} else {
|
||||
l.Error(v)
|
||||
}
|
||||
}
|
||||
|
||||
// Fatal logs at the fatal level and exits with a status code != 0.
|
||||
func Fatal(v interface{}, fielders ...Fielder) {
|
||||
if len(fielders) != 0 {
|
||||
l.WithFields(mergeFielders(fielders...)).Fatal(v)
|
||||
} else {
|
||||
l.Fatal(v)
|
||||
}
|
||||
}
|
59
pkg/metrics/server.go
Normal file
59
pkg/metrics/server.go
Normal file
|
@ -0,0 +1,59 @@
|
|||
// Package metrics implements a standalone HTTP server for serving pprof
|
||||
// profiles and Prometheus metrics.
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net/http"
|
||||
"net/http/pprof"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
|
||||
"github.com/chihaya/chihaya/pkg/log"
|
||||
"github.com/chihaya/chihaya/pkg/stop"
|
||||
)
|
||||
|
||||
// Server represents a standalone HTTP server for serving a Prometheus metrics
|
||||
// endpoint.
|
||||
type Server struct {
|
||||
srv *http.Server
|
||||
}
|
||||
|
||||
// Stop shuts down the server.
|
||||
func (s *Server) Stop() stop.Result {
|
||||
c := make(stop.Channel)
|
||||
go func() {
|
||||
c.Done(s.srv.Shutdown(context.Background()))
|
||||
}()
|
||||
|
||||
return c.Result()
|
||||
}
|
||||
|
||||
// NewServer creates a new instance of a Prometheus server that asynchronously
|
||||
// serves requests.
|
||||
func NewServer(addr string) *Server {
|
||||
mux := http.NewServeMux()
|
||||
|
||||
mux.Handle("/metrics", promhttp.Handler())
|
||||
mux.HandleFunc("/debug/pprof/", pprof.Index)
|
||||
mux.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline)
|
||||
mux.HandleFunc("/debug/pprof/profile", pprof.Profile)
|
||||
mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
|
||||
mux.HandleFunc("/debug/pprof/trace", pprof.Trace)
|
||||
|
||||
s := &Server{
|
||||
srv: &http.Server{
|
||||
Addr: addr,
|
||||
Handler: mux,
|
||||
},
|
||||
}
|
||||
|
||||
go func() {
|
||||
if err := s.srv.ListenAndServe(); !errors.Is(err, http.ErrServerClosed) {
|
||||
log.Fatal("failed while serving prometheus", log.Err(err))
|
||||
}
|
||||
}()
|
||||
|
||||
return s
|
||||
}
|
125
pkg/stop/stop.go
Normal file
125
pkg/stop/stop.go
Normal file
|
@ -0,0 +1,125 @@
|
|||
// Package stop implements a pattern for shutting down a group of processes.
|
||||
package stop
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Channel is used to return zero or more errors asynchronously. Call Done()
|
||||
// once to pass errors to the Channel.
|
||||
type Channel chan []error
|
||||
|
||||
// Result is a receive-only version of Channel. Call Wait() once to receive any
|
||||
// returned errors.
|
||||
type Result <-chan []error
|
||||
|
||||
// Done adds zero or more errors to the Channel and closes it, indicating the
|
||||
// caller has finished stopping. It should be called exactly once.
|
||||
func (ch Channel) Done(errs ...error) {
|
||||
if len(errs) > 0 && errs[0] != nil {
|
||||
ch <- errs
|
||||
}
|
||||
close(ch)
|
||||
}
|
||||
|
||||
// Result converts a Channel to a Result.
|
||||
func (ch Channel) Result() <-chan []error {
|
||||
return ch
|
||||
}
|
||||
|
||||
// Wait blocks until Done() is called on the underlying Channel and returns any
|
||||
// errors. It should be called exactly once.
|
||||
func (r Result) Wait() []error {
|
||||
return <-r
|
||||
}
|
||||
|
||||
// AlreadyStopped is a closed error channel to be used by Funcs when
|
||||
// an element was already stopped.
|
||||
var AlreadyStopped Result
|
||||
|
||||
// AlreadyStoppedFunc is a Func that returns AlreadyStopped.
|
||||
var AlreadyStoppedFunc = func() Result { return AlreadyStopped }
|
||||
|
||||
func init() {
|
||||
closeMe := make(Channel)
|
||||
close(closeMe)
|
||||
AlreadyStopped = closeMe.Result()
|
||||
}
|
||||
|
||||
// Stopper is an interface that allows a clean shutdown.
|
||||
type Stopper interface {
|
||||
// Stop returns a channel that indicates whether the stop was
|
||||
// successful.
|
||||
//
|
||||
// The channel can either return one error or be closed.
|
||||
// Closing the channel signals a clean shutdown.
|
||||
// Stop() should return immediately and perform the actual shutdown in a
|
||||
// separate goroutine.
|
||||
Stop() Result
|
||||
}
|
||||
|
||||
// Func is a function that can be used to provide a clean shutdown.
|
||||
type Func func() Result
|
||||
|
||||
// Group is a collection of Stoppers that can be stopped all at once.
|
||||
type Group struct {
|
||||
stoppables []Func
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
// NewGroup allocates a new Group.
|
||||
func NewGroup() *Group {
|
||||
return &Group{
|
||||
stoppables: make([]Func, 0),
|
||||
}
|
||||
}
|
||||
|
||||
// Add appends a Stopper to the Group.
|
||||
func (cg *Group) Add(toAdd Stopper) {
|
||||
cg.Lock()
|
||||
defer cg.Unlock()
|
||||
|
||||
cg.stoppables = append(cg.stoppables, toAdd.Stop)
|
||||
}
|
||||
|
||||
// AddFunc appends a Func to the Group.
|
||||
func (cg *Group) AddFunc(toAddFunc Func) {
|
||||
cg.Lock()
|
||||
defer cg.Unlock()
|
||||
|
||||
cg.stoppables = append(cg.stoppables, toAddFunc)
|
||||
}
|
||||
|
||||
// Stop stops all members of the Group.
|
||||
//
|
||||
// Stopping will be done in a concurrent fashion.
|
||||
// The slice of errors returned contains all errors returned by stopping the
|
||||
// members.
|
||||
func (cg *Group) Stop() Result {
|
||||
cg.Lock()
|
||||
defer cg.Unlock()
|
||||
|
||||
whenDone := make(Channel)
|
||||
|
||||
waitChannels := make([]Result, 0, len(cg.stoppables))
|
||||
for _, toStop := range cg.stoppables {
|
||||
waitFor := toStop()
|
||||
if waitFor == nil {
|
||||
panic("received a nil chan from Stop")
|
||||
}
|
||||
waitChannels = append(waitChannels, waitFor)
|
||||
}
|
||||
|
||||
go func() {
|
||||
var errors []error
|
||||
for _, waitForMe := range waitChannels {
|
||||
childErrors := waitForMe.Wait()
|
||||
if len(childErrors) > 0 {
|
||||
errors = append(errors, childErrors...)
|
||||
}
|
||||
}
|
||||
whenDone.Done(errors...)
|
||||
}()
|
||||
|
||||
return whenDone.Result()
|
||||
}
|
127
pkg/timecache/timecache.go
Normal file
127
pkg/timecache/timecache.go
Normal file
|
@ -0,0 +1,127 @@
|
|||
// Package timecache provides a cache for the system clock, to avoid calls to
|
||||
// time.Now().
|
||||
// The time is stored as one int64 which holds the number of nanoseconds since
|
||||
// the Unix Epoch. The value is accessed using atomic primitives, without
|
||||
// locking.
|
||||
// The package runs a global singleton TimeCache that is is updated every
|
||||
// second.
|
||||
package timecache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
// t is the global TimeCache.
|
||||
var t *TimeCache
|
||||
|
||||
func init() {
|
||||
t = &TimeCache{
|
||||
clock: time.Now().UnixNano(),
|
||||
closed: make(chan struct{}),
|
||||
running: make(chan struct{}),
|
||||
}
|
||||
|
||||
go t.Run(1 * time.Second)
|
||||
}
|
||||
|
||||
// A TimeCache is a cache for the current system time.
|
||||
// The cached time has nanosecond precision.
|
||||
type TimeCache struct {
|
||||
// clock saves the current time's nanoseconds since the Epoch.
|
||||
// Must be accessed atomically.
|
||||
clock int64
|
||||
|
||||
closed chan struct{}
|
||||
running chan struct{}
|
||||
m sync.Mutex
|
||||
}
|
||||
|
||||
// New returns a new TimeCache instance.
|
||||
// The TimeCache must be started to update the time.
|
||||
func New() *TimeCache {
|
||||
return &TimeCache{
|
||||
clock: time.Now().UnixNano(),
|
||||
closed: make(chan struct{}),
|
||||
running: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
// Run runs the TimeCache, updating the cached clock value once every interval
|
||||
// and blocks until Stop is called.
|
||||
func (t *TimeCache) Run(interval time.Duration) {
|
||||
t.m.Lock()
|
||||
select {
|
||||
case <-t.running:
|
||||
panic("Run called multiple times")
|
||||
default:
|
||||
}
|
||||
close(t.running)
|
||||
t.m.Unlock()
|
||||
|
||||
tick := time.NewTicker(interval)
|
||||
defer tick.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-t.closed:
|
||||
tick.Stop()
|
||||
return
|
||||
case now := <-tick.C:
|
||||
atomic.StoreInt64(&t.clock, now.UnixNano())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Stop stops the TimeCache.
|
||||
// The cached time remains valid but will not be updated anymore.
|
||||
// A TimeCache can not be restarted. Construct a new one instead.
|
||||
// Calling Stop again is a no-op.
|
||||
func (t *TimeCache) Stop() {
|
||||
t.m.Lock()
|
||||
defer t.m.Unlock()
|
||||
|
||||
select {
|
||||
case <-t.closed:
|
||||
return
|
||||
default:
|
||||
}
|
||||
close(t.closed)
|
||||
}
|
||||
|
||||
// Now returns the cached time as a time.Time value.
|
||||
func (t *TimeCache) Now() time.Time {
|
||||
return time.Unix(0, atomic.LoadInt64(&t.clock))
|
||||
}
|
||||
|
||||
// NowUnixNano returns the cached time as nanoseconds since the Unix Epoch.
|
||||
func (t *TimeCache) NowUnixNano() int64 {
|
||||
return atomic.LoadInt64(&t.clock)
|
||||
}
|
||||
|
||||
// NowUnix returns the cached time as seconds since the Unix Epoch.
|
||||
func (t *TimeCache) NowUnix() int64 {
|
||||
// Adopted from time.Unix
|
||||
nsec := atomic.LoadInt64(&t.clock)
|
||||
sec := nsec / 1e9
|
||||
nsec -= sec * 1e9
|
||||
if nsec < 0 {
|
||||
sec--
|
||||
}
|
||||
return sec
|
||||
}
|
||||
|
||||
// Now calls Now on the global TimeCache instance.
|
||||
func Now() time.Time {
|
||||
return t.Now()
|
||||
}
|
||||
|
||||
// NowUnixNano calls NowUnixNano on the global TimeCache instance.
|
||||
func NowUnixNano() int64 {
|
||||
return t.NowUnixNano()
|
||||
}
|
||||
|
||||
// NowUnix calls NowUnix on the global TimeCache instance.
|
||||
func NowUnix() int64 {
|
||||
return t.NowUnix()
|
||||
}
|
148
pkg/timecache/timecache_test.go
Normal file
148
pkg/timecache/timecache_test.go
Normal file
|
@ -0,0 +1,148 @@
|
|||
package timecache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestNew(t *testing.T) {
|
||||
c := New()
|
||||
require.NotNil(t, c)
|
||||
|
||||
now := c.Now()
|
||||
require.False(t, now.IsZero())
|
||||
|
||||
nsec := c.NowUnixNano()
|
||||
require.NotEqual(t, 0, nsec)
|
||||
|
||||
sec := c.NowUnix()
|
||||
require.NotEqual(t, 0, sec)
|
||||
}
|
||||
|
||||
func TestRunStop(t *testing.T) {
|
||||
c := New()
|
||||
require.NotNil(t, c)
|
||||
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
c.Run(1 * time.Second)
|
||||
}()
|
||||
|
||||
c.Stop()
|
||||
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func TestMultipleStop(t *testing.T) {
|
||||
c := New()
|
||||
require.NotNil(t, c)
|
||||
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
c.Run(1 * time.Second)
|
||||
}()
|
||||
|
||||
c.Stop()
|
||||
c.Stop()
|
||||
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func doBenchmark(b *testing.B, f func(tc *TimeCache) func(*testing.PB)) {
|
||||
tc := New()
|
||||
require.NotNil(b, tc)
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
tc.Run(1 * time.Second)
|
||||
}()
|
||||
|
||||
b.RunParallel(f(tc))
|
||||
|
||||
tc.Stop()
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func BenchmarkNow(b *testing.B) {
|
||||
doBenchmark(b, func(tc *TimeCache) func(pb *testing.PB) {
|
||||
return func(pb *testing.PB) {
|
||||
var now time.Time
|
||||
for pb.Next() {
|
||||
now = tc.Now()
|
||||
}
|
||||
_ = now
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkNowUnix(b *testing.B) {
|
||||
doBenchmark(b, func(tc *TimeCache) func(pb *testing.PB) {
|
||||
return func(pb *testing.PB) {
|
||||
var now int64
|
||||
for pb.Next() {
|
||||
now = tc.NowUnix()
|
||||
}
|
||||
_ = now
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkNowUnixNano(b *testing.B) {
|
||||
doBenchmark(b, func(tc *TimeCache) func(pb *testing.PB) {
|
||||
return func(pb *testing.PB) {
|
||||
var now int64
|
||||
for pb.Next() {
|
||||
now = tc.NowUnixNano()
|
||||
}
|
||||
_ = now
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkNowGlobal(b *testing.B) {
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
var now time.Time
|
||||
for pb.Next() {
|
||||
now = Now()
|
||||
}
|
||||
_ = now
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkNowUnixGlobal(b *testing.B) {
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
var now int64
|
||||
for pb.Next() {
|
||||
now = NowUnix()
|
||||
}
|
||||
_ = now
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkNowUnixNanoGlobal(b *testing.B) {
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
var now int64
|
||||
for pb.Next() {
|
||||
now = NowUnixNano()
|
||||
}
|
||||
_ = now
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkTimeNow(b *testing.B) {
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
var now time.Time
|
||||
for pb.Next() {
|
||||
now = time.Now()
|
||||
}
|
||||
_ = now
|
||||
})
|
||||
}
|
|
@ -1,97 +0,0 @@
|
|||
package stopper
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
// AlreadyStopped is a closed error channel to be used by Funcs when
|
||||
// an element was already stopped.
|
||||
var AlreadyStopped <-chan error
|
||||
|
||||
// AlreadyStoppedFunc is a Func that returns AlreadyStopped.
|
||||
var AlreadyStoppedFunc = func() <-chan error { return AlreadyStopped }
|
||||
|
||||
func init() {
|
||||
closeMe := make(chan error)
|
||||
close(closeMe)
|
||||
AlreadyStopped = closeMe
|
||||
}
|
||||
|
||||
// Stopper is an interface that allows a clean shutdown.
|
||||
type Stopper interface {
|
||||
// Stop returns a channel that indicates whether the stop was
|
||||
// successful.
|
||||
// The channel can either return one error or be closed. Closing the
|
||||
// channel signals a clean shutdown.
|
||||
// The Stop function should return immediately and perform the actual
|
||||
// shutdown in a separate goroutine.
|
||||
Stop() <-chan error
|
||||
}
|
||||
|
||||
// StopGroup is a group that can be stopped.
|
||||
type StopGroup struct {
|
||||
stoppables []Func
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
// Func is a function that can be used to provide a clean shutdown.
|
||||
type Func func() <-chan error
|
||||
|
||||
// NewStopGroup creates a new StopGroup.
|
||||
func NewStopGroup() *StopGroup {
|
||||
return &StopGroup{
|
||||
stoppables: make([]Func, 0),
|
||||
}
|
||||
}
|
||||
|
||||
// Add adds a Stopper to the StopGroup.
|
||||
// On the next call to Stop(), the Stopper will be stopped.
|
||||
func (cg *StopGroup) Add(toAdd Stopper) {
|
||||
cg.Lock()
|
||||
defer cg.Unlock()
|
||||
|
||||
cg.stoppables = append(cg.stoppables, toAdd.Stop)
|
||||
}
|
||||
|
||||
// AddFunc adds a Func to the StopGroup.
|
||||
// On the next call to Stop(), the Func will be called.
|
||||
func (cg *StopGroup) AddFunc(toAddFunc Func) {
|
||||
cg.Lock()
|
||||
defer cg.Unlock()
|
||||
|
||||
cg.stoppables = append(cg.stoppables, toAddFunc)
|
||||
}
|
||||
|
||||
// Stop stops all members of the StopGroup.
|
||||
// Stopping will be done in a concurrent fashion.
|
||||
// The slice of errors returned contains all errors returned by stopping the
|
||||
// members.
|
||||
func (cg *StopGroup) Stop() []error {
|
||||
cg.Lock()
|
||||
defer cg.Unlock()
|
||||
|
||||
var errors []error
|
||||
whenDone := make(chan struct{})
|
||||
|
||||
waitChannels := make([]<-chan error, 0, len(cg.stoppables))
|
||||
for _, toStop := range cg.stoppables {
|
||||
waitFor := toStop()
|
||||
if waitFor == nil {
|
||||
panic("received a nil chan from Stop")
|
||||
}
|
||||
waitChannels = append(waitChannels, waitFor)
|
||||
}
|
||||
|
||||
go func() {
|
||||
for _, waitForMe := range waitChannels {
|
||||
err := <-waitForMe
|
||||
if err != nil {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
}
|
||||
close(whenDone)
|
||||
}()
|
||||
|
||||
<-whenDone
|
||||
return errors
|
||||
}
|
|
@ -1,61 +1,167 @@
|
|||
// Package memory implements the storage interface for a Chihaya
|
||||
// BitTorrent tracker keeping peer data in memory.
|
||||
package memory
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"math"
|
||||
"net"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
log "github.com/Sirupsen/logrus"
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/chihaya/chihaya/bittorrent"
|
||||
"github.com/chihaya/chihaya/pkg/log"
|
||||
"github.com/chihaya/chihaya/pkg/stop"
|
||||
"github.com/chihaya/chihaya/pkg/timecache"
|
||||
"github.com/chihaya/chihaya/storage"
|
||||
)
|
||||
|
||||
// ErrInvalidGCInterval is returned for a GarbageCollectionInterval that is
|
||||
// less than or equal to zero.
|
||||
var ErrInvalidGCInterval = errors.New("invalid garbage collection interval")
|
||||
// Name is the name by which this peer store is registered with Chihaya.
|
||||
const Name = "memory"
|
||||
|
||||
// Default config constants.
|
||||
const (
|
||||
defaultShardCount = 1024
|
||||
defaultPrometheusReportingInterval = time.Second * 1
|
||||
defaultGarbageCollectionInterval = time.Minute * 3
|
||||
defaultPeerLifetime = time.Minute * 30
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Register the storage driver.
|
||||
storage.RegisterDriver(Name, driver{})
|
||||
}
|
||||
|
||||
type driver struct{}
|
||||
|
||||
func (d driver) NewPeerStore(icfg interface{}) (storage.PeerStore, error) {
|
||||
// Marshal the config back into bytes.
|
||||
bytes, err := yaml.Marshal(icfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Unmarshal the bytes into the proper config type.
|
||||
var cfg Config
|
||||
err = yaml.Unmarshal(bytes, &cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return New(cfg)
|
||||
}
|
||||
|
||||
// Config holds the configuration of a memory PeerStore.
|
||||
type Config struct {
|
||||
GarbageCollectionInterval time.Duration `yaml:"gc_interval"`
|
||||
PrometheusReportingInterval time.Duration `yaml:"prometheus_reporting_interval"`
|
||||
PeerLifetime time.Duration `yaml:"peer_lifetime"`
|
||||
ShardCount int `yaml:"shard_count"`
|
||||
MaxNumWant int `yaml:"max_numwant"`
|
||||
}
|
||||
|
||||
// New creates a new PeerStore backed by memory.
|
||||
func New(cfg Config) (storage.PeerStore, error) {
|
||||
shardCount := 1
|
||||
if cfg.ShardCount > 0 {
|
||||
shardCount = cfg.ShardCount
|
||||
// LogFields renders the current config as a set of Logrus fields.
|
||||
func (cfg Config) LogFields() log.Fields {
|
||||
return log.Fields{
|
||||
"name": Name,
|
||||
"gcInterval": cfg.GarbageCollectionInterval,
|
||||
"promReportInterval": cfg.PrometheusReportingInterval,
|
||||
"peerLifetime": cfg.PeerLifetime,
|
||||
"shardCount": cfg.ShardCount,
|
||||
}
|
||||
}
|
||||
|
||||
// Validate sanity checks values set in a config and returns a new config with
|
||||
// default values replacing anything that is invalid.
|
||||
//
|
||||
// This function warns to the logger when a value is changed.
|
||||
func (cfg Config) Validate() Config {
|
||||
validcfg := cfg
|
||||
|
||||
if cfg.ShardCount <= 0 || cfg.ShardCount > (math.MaxInt/2) {
|
||||
validcfg.ShardCount = defaultShardCount
|
||||
log.Warn("falling back to default configuration", log.Fields{
|
||||
"name": Name + ".ShardCount",
|
||||
"provided": cfg.ShardCount,
|
||||
"default": validcfg.ShardCount,
|
||||
})
|
||||
}
|
||||
|
||||
if cfg.GarbageCollectionInterval <= 0 {
|
||||
return nil, ErrInvalidGCInterval
|
||||
validcfg.GarbageCollectionInterval = defaultGarbageCollectionInterval
|
||||
log.Warn("falling back to default configuration", log.Fields{
|
||||
"name": Name + ".GarbageCollectionInterval",
|
||||
"provided": cfg.GarbageCollectionInterval,
|
||||
"default": validcfg.GarbageCollectionInterval,
|
||||
})
|
||||
}
|
||||
|
||||
if cfg.PrometheusReportingInterval <= 0 {
|
||||
validcfg.PrometheusReportingInterval = defaultPrometheusReportingInterval
|
||||
log.Warn("falling back to default configuration", log.Fields{
|
||||
"name": Name + ".PrometheusReportingInterval",
|
||||
"provided": cfg.PrometheusReportingInterval,
|
||||
"default": validcfg.PrometheusReportingInterval,
|
||||
})
|
||||
}
|
||||
|
||||
if cfg.PeerLifetime <= 0 {
|
||||
validcfg.PeerLifetime = defaultPeerLifetime
|
||||
log.Warn("falling back to default configuration", log.Fields{
|
||||
"name": Name + ".PeerLifetime",
|
||||
"provided": cfg.PeerLifetime,
|
||||
"default": validcfg.PeerLifetime,
|
||||
})
|
||||
}
|
||||
|
||||
return validcfg
|
||||
}
|
||||
|
||||
// New creates a new PeerStore backed by memory.
|
||||
func New(provided Config) (storage.PeerStore, error) {
|
||||
cfg := provided.Validate()
|
||||
ps := &peerStore{
|
||||
shards: make([]*peerShard, shardCount*2),
|
||||
cfg: cfg,
|
||||
shards: make([]*peerShard, cfg.ShardCount*2),
|
||||
closed: make(chan struct{}),
|
||||
maxNumWant: cfg.MaxNumWant,
|
||||
}
|
||||
|
||||
for i := 0; i < shardCount*2; i++ {
|
||||
for i := 0; i < cfg.ShardCount*2; i++ {
|
||||
ps.shards[i] = &peerShard{swarms: make(map[bittorrent.InfoHash]swarm)}
|
||||
}
|
||||
|
||||
// Start a goroutine for garbage collection.
|
||||
ps.wg.Add(1)
|
||||
go func() {
|
||||
defer ps.wg.Done()
|
||||
for {
|
||||
select {
|
||||
case <-ps.closed:
|
||||
return
|
||||
case <-time.After(cfg.GarbageCollectionInterval):
|
||||
before := time.Now().Add(-cfg.PeerLifetime)
|
||||
log.Debugln("memory: purging peers with no announces since", before)
|
||||
ps.collectGarbage(before)
|
||||
log.Debug("storage: purging peers with no announces since", log.Fields{"before": before})
|
||||
_ = ps.collectGarbage(before)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Start a goroutine for reporting statistics to Prometheus.
|
||||
ps.wg.Add(1)
|
||||
go func() {
|
||||
defer ps.wg.Done()
|
||||
t := time.NewTicker(cfg.PrometheusReportingInterval)
|
||||
for {
|
||||
select {
|
||||
case <-ps.closed:
|
||||
t.Stop()
|
||||
return
|
||||
case <-t.C:
|
||||
before := time.Now()
|
||||
ps.populateProm()
|
||||
log.Debug("storage: populateProm() finished", log.Fields{"timeTaken": time.Since(before)})
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
@ -65,8 +171,38 @@ func New(cfg Config) (storage.PeerStore, error) {
|
|||
|
||||
type serializedPeer string
|
||||
|
||||
func newPeerKey(p bittorrent.Peer) serializedPeer {
|
||||
b := make([]byte, 20+2+len(p.IP.IP))
|
||||
copy(b[:20], p.ID[:])
|
||||
binary.BigEndian.PutUint16(b[20:22], p.Port)
|
||||
copy(b[22:], p.IP.IP)
|
||||
|
||||
return serializedPeer(b)
|
||||
}
|
||||
|
||||
func decodePeerKey(pk serializedPeer) bittorrent.Peer {
|
||||
peer := bittorrent.Peer{
|
||||
ID: bittorrent.PeerIDFromString(string(pk[:20])),
|
||||
Port: binary.BigEndian.Uint16([]byte(pk[20:22])),
|
||||
IP: bittorrent.IP{IP: net.IP(pk[22:])},
|
||||
}
|
||||
|
||||
if ip := peer.IP.To4(); ip != nil {
|
||||
peer.IP.IP = ip
|
||||
peer.IP.AddressFamily = bittorrent.IPv4
|
||||
} else if len(peer.IP.IP) == net.IPv6len { // implies toReturn.IP.To4() == nil
|
||||
peer.IP.AddressFamily = bittorrent.IPv6
|
||||
} else {
|
||||
panic("IP is neither v4 nor v6")
|
||||
}
|
||||
|
||||
return peer
|
||||
}
|
||||
|
||||
type peerShard struct {
|
||||
swarms map[bittorrent.InfoHash]swarm
|
||||
numSeeders uint64
|
||||
numLeechers uint64
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
|
@ -77,51 +213,63 @@ type swarm struct {
|
|||
}
|
||||
|
||||
type peerStore struct {
|
||||
cfg Config
|
||||
shards []*peerShard
|
||||
|
||||
closed chan struct{}
|
||||
maxNumWant int
|
||||
wg sync.WaitGroup
|
||||
}
|
||||
|
||||
var _ storage.PeerStore = &peerStore{}
|
||||
|
||||
func (s *peerStore) shardIndex(infoHash bittorrent.InfoHash, v6 bool) uint32 {
|
||||
// populateProm aggregates metrics over all shards and then posts them to
|
||||
// prometheus.
|
||||
func (ps *peerStore) populateProm() {
|
||||
var numInfohashes, numSeeders, numLeechers uint64
|
||||
|
||||
for _, s := range ps.shards {
|
||||
s.RLock()
|
||||
numInfohashes += uint64(len(s.swarms))
|
||||
numSeeders += s.numSeeders
|
||||
numLeechers += s.numLeechers
|
||||
s.RUnlock()
|
||||
}
|
||||
|
||||
storage.PromInfohashesCount.Set(float64(numInfohashes))
|
||||
storage.PromSeedersCount.Set(float64(numSeeders))
|
||||
storage.PromLeechersCount.Set(float64(numLeechers))
|
||||
}
|
||||
|
||||
// recordGCDuration records the duration of a GC sweep.
|
||||
func recordGCDuration(duration time.Duration) {
|
||||
storage.PromGCDurationMilliseconds.Observe(float64(duration.Nanoseconds()) / float64(time.Millisecond))
|
||||
}
|
||||
|
||||
func (ps *peerStore) getClock() int64 {
|
||||
return timecache.NowUnixNano()
|
||||
}
|
||||
|
||||
func (ps *peerStore) shardIndex(infoHash bittorrent.InfoHash, af bittorrent.AddressFamily) uint32 {
|
||||
// There are twice the amount of shards specified by the user, the first
|
||||
// half is dedicated to IPv4 swarms and the second half is dedicated to
|
||||
// IPv6 swarms.
|
||||
idx := binary.BigEndian.Uint32(infoHash[:4]) % (uint32(len(s.shards)) / 2)
|
||||
if v6 {
|
||||
idx += uint32(len(s.shards) / 2)
|
||||
idx := binary.BigEndian.Uint32(infoHash[:4]) % (uint32(len(ps.shards)) / 2)
|
||||
if af == bittorrent.IPv6 {
|
||||
idx += uint32(len(ps.shards) / 2)
|
||||
}
|
||||
return idx
|
||||
}
|
||||
|
||||
func newPeerKey(p bittorrent.Peer) serializedPeer {
|
||||
b := make([]byte, 20+2+len(p.IP))
|
||||
copy(b[:20], p.ID[:])
|
||||
binary.BigEndian.PutUint16(b[20:22], p.Port)
|
||||
copy(b[22:], p.IP)
|
||||
|
||||
return serializedPeer(b)
|
||||
}
|
||||
|
||||
func decodePeerKey(pk serializedPeer) bittorrent.Peer {
|
||||
return bittorrent.Peer{
|
||||
ID: bittorrent.PeerIDFromString(string(pk[:20])),
|
||||
Port: binary.BigEndian.Uint16([]byte(pk[20:22])),
|
||||
IP: net.IP(pk[22:]),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *peerStore) PutSeeder(ih bittorrent.InfoHash, p bittorrent.Peer) error {
|
||||
func (ps *peerStore) PutSeeder(ih bittorrent.InfoHash, p bittorrent.Peer) error {
|
||||
select {
|
||||
case <-s.closed:
|
||||
case <-ps.closed:
|
||||
panic("attempted to interact with stopped memory store")
|
||||
default:
|
||||
}
|
||||
|
||||
pk := newPeerKey(p)
|
||||
|
||||
shard := s.shards[s.shardIndex(ih, len(p.IP) == net.IPv6len)]
|
||||
shard := ps.shards[ps.shardIndex(ih, p.IP.AddressFamily)]
|
||||
shard.Lock()
|
||||
|
||||
if _, ok := shard.swarms[ih]; !ok {
|
||||
|
@ -131,22 +279,28 @@ func (s *peerStore) PutSeeder(ih bittorrent.InfoHash, p bittorrent.Peer) error {
|
|||
}
|
||||
}
|
||||
|
||||
shard.swarms[ih].seeders[pk] = time.Now().UnixNano()
|
||||
// If this peer isn't already a seeder, update the stats for the swarm.
|
||||
if _, ok := shard.swarms[ih].seeders[pk]; !ok {
|
||||
shard.numSeeders++
|
||||
}
|
||||
|
||||
// Update the peer in the swarm.
|
||||
shard.swarms[ih].seeders[pk] = ps.getClock()
|
||||
|
||||
shard.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *peerStore) DeleteSeeder(ih bittorrent.InfoHash, p bittorrent.Peer) error {
|
||||
func (ps *peerStore) DeleteSeeder(ih bittorrent.InfoHash, p bittorrent.Peer) error {
|
||||
select {
|
||||
case <-s.closed:
|
||||
case <-ps.closed:
|
||||
panic("attempted to interact with stopped memory store")
|
||||
default:
|
||||
}
|
||||
|
||||
pk := newPeerKey(p)
|
||||
|
||||
shard := s.shards[s.shardIndex(ih, len(p.IP) == net.IPv6len)]
|
||||
shard := ps.shards[ps.shardIndex(ih, p.IP.AddressFamily)]
|
||||
shard.Lock()
|
||||
|
||||
if _, ok := shard.swarms[ih]; !ok {
|
||||
|
@ -159,6 +313,7 @@ func (s *peerStore) DeleteSeeder(ih bittorrent.InfoHash, p bittorrent.Peer) erro
|
|||
return storage.ErrResourceDoesNotExist
|
||||
}
|
||||
|
||||
shard.numSeeders--
|
||||
delete(shard.swarms[ih].seeders, pk)
|
||||
|
||||
if len(shard.swarms[ih].seeders)|len(shard.swarms[ih].leechers) == 0 {
|
||||
|
@ -169,16 +324,16 @@ func (s *peerStore) DeleteSeeder(ih bittorrent.InfoHash, p bittorrent.Peer) erro
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *peerStore) PutLeecher(ih bittorrent.InfoHash, p bittorrent.Peer) error {
|
||||
func (ps *peerStore) PutLeecher(ih bittorrent.InfoHash, p bittorrent.Peer) error {
|
||||
select {
|
||||
case <-s.closed:
|
||||
case <-ps.closed:
|
||||
panic("attempted to interact with stopped memory store")
|
||||
default:
|
||||
}
|
||||
|
||||
pk := newPeerKey(p)
|
||||
|
||||
shard := s.shards[s.shardIndex(ih, len(p.IP) == net.IPv6len)]
|
||||
shard := ps.shards[ps.shardIndex(ih, p.IP.AddressFamily)]
|
||||
shard.Lock()
|
||||
|
||||
if _, ok := shard.swarms[ih]; !ok {
|
||||
|
@ -188,22 +343,28 @@ func (s *peerStore) PutLeecher(ih bittorrent.InfoHash, p bittorrent.Peer) error
|
|||
}
|
||||
}
|
||||
|
||||
shard.swarms[ih].leechers[pk] = time.Now().UnixNano()
|
||||
// If this peer isn't already a leecher, update the stats for the swarm.
|
||||
if _, ok := shard.swarms[ih].leechers[pk]; !ok {
|
||||
shard.numLeechers++
|
||||
}
|
||||
|
||||
// Update the peer in the swarm.
|
||||
shard.swarms[ih].leechers[pk] = ps.getClock()
|
||||
|
||||
shard.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *peerStore) DeleteLeecher(ih bittorrent.InfoHash, p bittorrent.Peer) error {
|
||||
func (ps *peerStore) DeleteLeecher(ih bittorrent.InfoHash, p bittorrent.Peer) error {
|
||||
select {
|
||||
case <-s.closed:
|
||||
case <-ps.closed:
|
||||
panic("attempted to interact with stopped memory store")
|
||||
default:
|
||||
}
|
||||
|
||||
pk := newPeerKey(p)
|
||||
|
||||
shard := s.shards[s.shardIndex(ih, len(p.IP) == net.IPv6len)]
|
||||
shard := ps.shards[ps.shardIndex(ih, p.IP.AddressFamily)]
|
||||
shard.Lock()
|
||||
|
||||
if _, ok := shard.swarms[ih]; !ok {
|
||||
|
@ -216,6 +377,7 @@ func (s *peerStore) DeleteLeecher(ih bittorrent.InfoHash, p bittorrent.Peer) err
|
|||
return storage.ErrResourceDoesNotExist
|
||||
}
|
||||
|
||||
shard.numLeechers--
|
||||
delete(shard.swarms[ih].leechers, pk)
|
||||
|
||||
if len(shard.swarms[ih].seeders)|len(shard.swarms[ih].leechers) == 0 {
|
||||
|
@ -226,16 +388,16 @@ func (s *peerStore) DeleteLeecher(ih bittorrent.InfoHash, p bittorrent.Peer) err
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *peerStore) GraduateLeecher(ih bittorrent.InfoHash, p bittorrent.Peer) error {
|
||||
func (ps *peerStore) GraduateLeecher(ih bittorrent.InfoHash, p bittorrent.Peer) error {
|
||||
select {
|
||||
case <-s.closed:
|
||||
case <-ps.closed:
|
||||
panic("attempted to interact with stopped memory store")
|
||||
default:
|
||||
}
|
||||
|
||||
pk := newPeerKey(p)
|
||||
|
||||
shard := s.shards[s.shardIndex(ih, len(p.IP) == net.IPv6len)]
|
||||
shard := ps.shards[ps.shardIndex(ih, p.IP.AddressFamily)]
|
||||
shard.Lock()
|
||||
|
||||
if _, ok := shard.swarms[ih]; !ok {
|
||||
|
@ -245,26 +407,32 @@ func (s *peerStore) GraduateLeecher(ih bittorrent.InfoHash, p bittorrent.Peer) e
|
|||
}
|
||||
}
|
||||
|
||||
// If this peer is a leecher, update the stats for the swarm and remove them.
|
||||
if _, ok := shard.swarms[ih].leechers[pk]; ok {
|
||||
shard.numLeechers--
|
||||
delete(shard.swarms[ih].leechers, pk)
|
||||
}
|
||||
|
||||
shard.swarms[ih].seeders[pk] = time.Now().UnixNano()
|
||||
// If this peer isn't already a seeder, update the stats for the swarm.
|
||||
if _, ok := shard.swarms[ih].seeders[pk]; !ok {
|
||||
shard.numSeeders++
|
||||
}
|
||||
|
||||
// Update the peer in the swarm.
|
||||
shard.swarms[ih].seeders[pk] = ps.getClock()
|
||||
|
||||
shard.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *peerStore) AnnouncePeers(ih bittorrent.InfoHash, seeder bool, numWant int, announcer bittorrent.Peer) (peers []bittorrent.Peer, err error) {
|
||||
func (ps *peerStore) AnnouncePeers(ih bittorrent.InfoHash, seeder bool, numWant int, announcer bittorrent.Peer) (peers []bittorrent.Peer, err error) {
|
||||
select {
|
||||
case <-s.closed:
|
||||
case <-ps.closed:
|
||||
panic("attempted to interact with stopped memory store")
|
||||
default:
|
||||
}
|
||||
|
||||
if numWant > s.maxNumWant {
|
||||
numWant = s.maxNumWant
|
||||
}
|
||||
|
||||
shard := s.shards[s.shardIndex(ih, len(announcer.IP) == net.IPv6len)]
|
||||
shard := ps.shards[ps.shardIndex(ih, announcer.IP.AddressFamily)]
|
||||
shard.RLock()
|
||||
|
||||
if _, ok := shard.swarms[ih]; !ok {
|
||||
|
@ -275,41 +443,40 @@ func (s *peerStore) AnnouncePeers(ih bittorrent.InfoHash, seeder bool, numWant i
|
|||
if seeder {
|
||||
// Append leechers as possible.
|
||||
leechers := shard.swarms[ih].leechers
|
||||
for p := range leechers {
|
||||
decodedPeer := decodePeerKey(p)
|
||||
for pk := range leechers {
|
||||
if numWant == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
peers = append(peers, decodedPeer)
|
||||
peers = append(peers, decodePeerKey(pk))
|
||||
numWant--
|
||||
}
|
||||
} else {
|
||||
// Append as many seeders as possible.
|
||||
seeders := shard.swarms[ih].seeders
|
||||
for p := range seeders {
|
||||
decodedPeer := decodePeerKey(p)
|
||||
for pk := range seeders {
|
||||
if numWant == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
peers = append(peers, decodedPeer)
|
||||
peers = append(peers, decodePeerKey(pk))
|
||||
numWant--
|
||||
}
|
||||
|
||||
// Append leechers until we reach numWant.
|
||||
leechers := shard.swarms[ih].leechers
|
||||
if numWant > 0 {
|
||||
for p := range leechers {
|
||||
decodedPeer := decodePeerKey(p)
|
||||
leechers := shard.swarms[ih].leechers
|
||||
announcerPK := newPeerKey(announcer)
|
||||
for pk := range leechers {
|
||||
if pk == announcerPK {
|
||||
continue
|
||||
}
|
||||
|
||||
if numWant == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
if decodedPeer.Equal(announcer) {
|
||||
continue
|
||||
}
|
||||
peers = append(peers, decodedPeer)
|
||||
peers = append(peers, decodePeerKey(pk))
|
||||
numWant--
|
||||
}
|
||||
}
|
||||
|
@ -319,23 +486,25 @@ func (s *peerStore) AnnouncePeers(ih bittorrent.InfoHash, seeder bool, numWant i
|
|||
return
|
||||
}
|
||||
|
||||
func (s *peerStore) ScrapeSwarm(ih bittorrent.InfoHash, v6 bool) (resp bittorrent.Scrape) {
|
||||
func (ps *peerStore) ScrapeSwarm(ih bittorrent.InfoHash, addressFamily bittorrent.AddressFamily) (resp bittorrent.Scrape) {
|
||||
select {
|
||||
case <-s.closed:
|
||||
case <-ps.closed:
|
||||
panic("attempted to interact with stopped memory store")
|
||||
default:
|
||||
}
|
||||
|
||||
shard := s.shards[s.shardIndex(ih, v6)]
|
||||
resp.InfoHash = ih
|
||||
shard := ps.shards[ps.shardIndex(ih, addressFamily)]
|
||||
shard.RLock()
|
||||
|
||||
if _, ok := shard.swarms[ih]; !ok {
|
||||
swarm, ok := shard.swarms[ih]
|
||||
if !ok {
|
||||
shard.RUnlock()
|
||||
return
|
||||
}
|
||||
|
||||
resp.Incomplete = uint32(len(shard.swarms[ih].leechers))
|
||||
resp.Complete = uint32(len(shard.swarms[ih].seeders))
|
||||
resp.Incomplete = uint32(len(swarm.leechers))
|
||||
resp.Complete = uint32(len(swarm.seeders))
|
||||
shard.RUnlock()
|
||||
|
||||
return
|
||||
|
@ -346,15 +515,17 @@ func (s *peerStore) ScrapeSwarm(ih bittorrent.InfoHash, v6 bool) (resp bittorren
|
|||
//
|
||||
// This function must be able to execute while other methods on this interface
|
||||
// are being executed in parallel.
|
||||
func (s *peerStore) collectGarbage(cutoff time.Time) error {
|
||||
func (ps *peerStore) collectGarbage(cutoff time.Time) error {
|
||||
select {
|
||||
case <-s.closed:
|
||||
panic("attempted to interact with stopped memory store")
|
||||
case <-ps.closed:
|
||||
return nil
|
||||
default:
|
||||
}
|
||||
|
||||
cutoffUnix := cutoff.UnixNano()
|
||||
for _, shard := range s.shards {
|
||||
start := time.Now()
|
||||
|
||||
for _, shard := range ps.shards {
|
||||
shard.RLock()
|
||||
var infohashes []bittorrent.InfoHash
|
||||
for ih := range shard.swarms {
|
||||
|
@ -374,12 +545,14 @@ func (s *peerStore) collectGarbage(cutoff time.Time) error {
|
|||
|
||||
for pk, mtime := range shard.swarms[ih].leechers {
|
||||
if mtime <= cutoffUnix {
|
||||
shard.numLeechers--
|
||||
delete(shard.swarms[ih].leechers, pk)
|
||||
}
|
||||
}
|
||||
|
||||
for pk, mtime := range shard.swarms[ih].seeders {
|
||||
if mtime <= cutoffUnix {
|
||||
shard.numSeeders--
|
||||
delete(shard.swarms[ih].seeders, pk)
|
||||
}
|
||||
}
|
||||
|
@ -395,19 +568,30 @@ func (s *peerStore) collectGarbage(cutoff time.Time) error {
|
|||
runtime.Gosched()
|
||||
}
|
||||
|
||||
recordGCDuration(time.Since(start))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *peerStore) Stop() <-chan error {
|
||||
toReturn := make(chan error)
|
||||
func (ps *peerStore) Stop() stop.Result {
|
||||
c := make(stop.Channel)
|
||||
go func() {
|
||||
shards := make([]*peerShard, len(s.shards))
|
||||
for i := 0; i < len(s.shards); i++ {
|
||||
close(ps.closed)
|
||||
ps.wg.Wait()
|
||||
|
||||
// Explicitly deallocate our storage.
|
||||
shards := make([]*peerShard, len(ps.shards))
|
||||
for i := 0; i < len(ps.shards); i++ {
|
||||
shards[i] = &peerShard{swarms: make(map[bittorrent.InfoHash]swarm)}
|
||||
}
|
||||
s.shards = shards
|
||||
close(s.closed)
|
||||
close(toReturn)
|
||||
ps.shards = shards
|
||||
|
||||
c.Done()
|
||||
}()
|
||||
return toReturn
|
||||
|
||||
return c.Result()
|
||||
}
|
||||
|
||||
func (ps *peerStore) LogFields() log.Fields {
|
||||
return ps.cfg.LogFields()
|
||||
}
|
||||
|
|
|
@ -2,20 +2,27 @@ package memory
|
|||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"time"
|
||||
|
||||
s "github.com/chihaya/chihaya/storage"
|
||||
)
|
||||
|
||||
func createNew() s.PeerStore {
|
||||
ps, err := New(Config{ShardCount: 1024, GarbageCollectionInterval: 10 * time.Minute})
|
||||
ps, err := New(Config{
|
||||
ShardCount: 1024,
|
||||
GarbageCollectionInterval: 10 * time.Minute,
|
||||
PrometheusReportingInterval: 10 * time.Minute,
|
||||
PeerLifetime: 30 * time.Minute,
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return ps
|
||||
}
|
||||
|
||||
func TestPeerStore(t *testing.T) { s.TestPeerStore(t, createNew()) }
|
||||
|
||||
func BenchmarkNop(b *testing.B) { s.Nop(b, createNew()) }
|
||||
func BenchmarkPut(b *testing.B) { s.Put(b, createNew()) }
|
||||
func BenchmarkPut1k(b *testing.B) { s.Put1k(b, createNew()) }
|
||||
func BenchmarkPut1kInfohash(b *testing.B) { s.Put1kInfohash(b, createNew()) }
|
||||
|
@ -40,3 +47,5 @@ func BenchmarkAnnounceLeecher(b *testing.B) { s.AnnounceLeecher(b, cr
|
|||
func BenchmarkAnnounceLeecher1kInfohash(b *testing.B) { s.AnnounceLeecher1kInfohash(b, createNew()) }
|
||||
func BenchmarkAnnounceSeeder(b *testing.B) { s.AnnounceSeeder(b, createNew()) }
|
||||
func BenchmarkAnnounceSeeder1kInfohash(b *testing.B) { s.AnnounceSeeder1kInfohash(b, createNew()) }
|
||||
func BenchmarkScrapeSwarm(b *testing.B) { s.ScrapeSwarm(b, createNew()) }
|
||||
func BenchmarkScrapeSwarm1kInfohash(b *testing.B) { s.ScrapeSwarm1kInfohash(b, createNew()) }
|
||||
|
|
44
storage/prometheus.go
Normal file
44
storage/prometheus.go
Normal file
|
@ -0,0 +1,44 @@
|
|||
package storage
|
||||
|
||||
import "github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
func init() {
|
||||
// Register the metrics.
|
||||
prometheus.MustRegister(
|
||||
PromGCDurationMilliseconds,
|
||||
PromInfohashesCount,
|
||||
PromSeedersCount,
|
||||
PromLeechersCount,
|
||||
)
|
||||
}
|
||||
|
||||
var (
|
||||
// PromGCDurationMilliseconds is a histogram used by storage to record the
|
||||
// durations of execution time required for removing expired peers.
|
||||
PromGCDurationMilliseconds = prometheus.NewHistogram(prometheus.HistogramOpts{
|
||||
Name: "chihaya_storage_gc_duration_milliseconds",
|
||||
Help: "The time it takes to perform storage garbage collection",
|
||||
Buckets: prometheus.ExponentialBuckets(9.375, 2, 10),
|
||||
})
|
||||
|
||||
// PromInfohashesCount is a gauge used to hold the current total amount of
|
||||
// unique swarms being tracked by a storage.
|
||||
PromInfohashesCount = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "chihaya_storage_infohashes_count",
|
||||
Help: "The number of Infohashes tracked",
|
||||
})
|
||||
|
||||
// PromSeedersCount is a gauge used to hold the current total amount of
|
||||
// unique seeders per swarm.
|
||||
PromSeedersCount = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "chihaya_storage_seeders_count",
|
||||
Help: "The number of seeders tracked",
|
||||
})
|
||||
|
||||
// PromLeechersCount is a gauge used to hold the current total amount of
|
||||
// unique leechers per swarm.
|
||||
PromLeechersCount = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "chihaya_storage_leechers_count",
|
||||
Help: "The number of leechers tracked",
|
||||
})
|
||||
)
|
829
storage/redis/peer_store.go
Normal file
829
storage/redis/peer_store.go
Normal file
|
@ -0,0 +1,829 @@
|
|||
// Package redis implements the storage interface for a Chihaya
|
||||
// BitTorrent tracker keeping peer data in redis with hash.
|
||||
// There two categories of hash:
|
||||
//
|
||||
// - IPv{4,6}_{L,S}_infohash
|
||||
// To save peers that hold the infohash, used for fast searching,
|
||||
// deleting, and timeout handling
|
||||
//
|
||||
// - IPv{4,6}
|
||||
// To save all the infohashes, used for garbage collection,
|
||||
// metrics aggregation and leecher graduation
|
||||
//
|
||||
// Tree keys are used to record the count of swarms, seeders
|
||||
// and leechers for each group (IPv4, IPv6).
|
||||
//
|
||||
// - IPv{4,6}_infohash_count
|
||||
// To record the number of infohashes.
|
||||
//
|
||||
// - IPv{4,6}_S_count
|
||||
// To record the number of seeders.
|
||||
//
|
||||
// - IPv{4,6}_L_count
|
||||
// To record the number of leechers.
|
||||
package redis
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"net"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/gomodule/redigo/redis"
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/chihaya/chihaya/bittorrent"
|
||||
"github.com/chihaya/chihaya/pkg/log"
|
||||
"github.com/chihaya/chihaya/pkg/stop"
|
||||
"github.com/chihaya/chihaya/pkg/timecache"
|
||||
"github.com/chihaya/chihaya/storage"
|
||||
)
|
||||
|
||||
// Name is the name by which this peer store is registered with Chihaya.
|
||||
const Name = "redis"
|
||||
|
||||
// Default config constants.
|
||||
const (
|
||||
defaultPrometheusReportingInterval = time.Second * 1
|
||||
defaultGarbageCollectionInterval = time.Minute * 3
|
||||
defaultPeerLifetime = time.Minute * 30
|
||||
defaultRedisBroker = "redis://myRedis@127.0.0.1:6379/0"
|
||||
defaultRedisReadTimeout = time.Second * 15
|
||||
defaultRedisWriteTimeout = time.Second * 15
|
||||
defaultRedisConnectTimeout = time.Second * 15
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Register the storage driver.
|
||||
storage.RegisterDriver(Name, driver{})
|
||||
}
|
||||
|
||||
type driver struct{}
|
||||
|
||||
func (d driver) NewPeerStore(icfg interface{}) (storage.PeerStore, error) {
|
||||
// Marshal the config back into bytes.
|
||||
bytes, err := yaml.Marshal(icfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Unmarshal the bytes into the proper config type.
|
||||
var cfg Config
|
||||
err = yaml.Unmarshal(bytes, &cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return New(cfg)
|
||||
}
|
||||
|
||||
// Config holds the configuration of a redis PeerStore.
|
||||
type Config struct {
|
||||
GarbageCollectionInterval time.Duration `yaml:"gc_interval"`
|
||||
PrometheusReportingInterval time.Duration `yaml:"prometheus_reporting_interval"`
|
||||
PeerLifetime time.Duration `yaml:"peer_lifetime"`
|
||||
RedisBroker string `yaml:"redis_broker"`
|
||||
RedisReadTimeout time.Duration `yaml:"redis_read_timeout"`
|
||||
RedisWriteTimeout time.Duration `yaml:"redis_write_timeout"`
|
||||
RedisConnectTimeout time.Duration `yaml:"redis_connect_timeout"`
|
||||
}
|
||||
|
||||
// LogFields renders the current config as a set of Logrus fields.
|
||||
func (cfg Config) LogFields() log.Fields {
|
||||
return log.Fields{
|
||||
"name": Name,
|
||||
"gcInterval": cfg.GarbageCollectionInterval,
|
||||
"promReportInterval": cfg.PrometheusReportingInterval,
|
||||
"peerLifetime": cfg.PeerLifetime,
|
||||
"redisBroker": cfg.RedisBroker,
|
||||
"redisReadTimeout": cfg.RedisReadTimeout,
|
||||
"redisWriteTimeout": cfg.RedisWriteTimeout,
|
||||
"redisConnectTimeout": cfg.RedisConnectTimeout,
|
||||
}
|
||||
}
|
||||
|
||||
// Validate sanity checks values set in a config and returns a new config with
|
||||
// default values replacing anything that is invalid.
|
||||
//
|
||||
// This function warns to the logger when a value is changed.
|
||||
func (cfg Config) Validate() Config {
|
||||
validcfg := cfg
|
||||
|
||||
if cfg.RedisBroker == "" {
|
||||
validcfg.RedisBroker = defaultRedisBroker
|
||||
log.Warn("falling back to default configuration", log.Fields{
|
||||
"name": Name + ".RedisBroker",
|
||||
"provided": cfg.RedisBroker,
|
||||
"default": validcfg.RedisBroker,
|
||||
})
|
||||
}
|
||||
|
||||
if cfg.RedisReadTimeout <= 0 {
|
||||
validcfg.RedisReadTimeout = defaultRedisReadTimeout
|
||||
log.Warn("falling back to default configuration", log.Fields{
|
||||
"name": Name + ".RedisReadTimeout",
|
||||
"provided": cfg.RedisReadTimeout,
|
||||
"default": validcfg.RedisReadTimeout,
|
||||
})
|
||||
}
|
||||
|
||||
if cfg.RedisWriteTimeout <= 0 {
|
||||
validcfg.RedisWriteTimeout = defaultRedisWriteTimeout
|
||||
log.Warn("falling back to default configuration", log.Fields{
|
||||
"name": Name + ".RedisWriteTimeout",
|
||||
"provided": cfg.RedisWriteTimeout,
|
||||
"default": validcfg.RedisWriteTimeout,
|
||||
})
|
||||
}
|
||||
|
||||
if cfg.RedisConnectTimeout <= 0 {
|
||||
validcfg.RedisConnectTimeout = defaultRedisConnectTimeout
|
||||
log.Warn("falling back to default configuration", log.Fields{
|
||||
"name": Name + ".RedisConnectTimeout",
|
||||
"provided": cfg.RedisConnectTimeout,
|
||||
"default": validcfg.RedisConnectTimeout,
|
||||
})
|
||||
}
|
||||
|
||||
if cfg.GarbageCollectionInterval <= 0 {
|
||||
validcfg.GarbageCollectionInterval = defaultGarbageCollectionInterval
|
||||
log.Warn("falling back to default configuration", log.Fields{
|
||||
"name": Name + ".GarbageCollectionInterval",
|
||||
"provided": cfg.GarbageCollectionInterval,
|
||||
"default": validcfg.GarbageCollectionInterval,
|
||||
})
|
||||
}
|
||||
|
||||
if cfg.PrometheusReportingInterval <= 0 {
|
||||
validcfg.PrometheusReportingInterval = defaultPrometheusReportingInterval
|
||||
log.Warn("falling back to default configuration", log.Fields{
|
||||
"name": Name + ".PrometheusReportingInterval",
|
||||
"provided": cfg.PrometheusReportingInterval,
|
||||
"default": validcfg.PrometheusReportingInterval,
|
||||
})
|
||||
}
|
||||
|
||||
if cfg.PeerLifetime <= 0 {
|
||||
validcfg.PeerLifetime = defaultPeerLifetime
|
||||
log.Warn("falling back to default configuration", log.Fields{
|
||||
"name": Name + ".PeerLifetime",
|
||||
"provided": cfg.PeerLifetime,
|
||||
"default": validcfg.PeerLifetime,
|
||||
})
|
||||
}
|
||||
|
||||
return validcfg
|
||||
}
|
||||
|
||||
// New creates a new PeerStore backed by redis.
|
||||
func New(provided Config) (storage.PeerStore, error) {
|
||||
cfg := provided.Validate()
|
||||
|
||||
u, err := parseRedisURL(cfg.RedisBroker)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ps := &peerStore{
|
||||
cfg: cfg,
|
||||
rb: newRedisBackend(&provided, u, ""),
|
||||
closed: make(chan struct{}),
|
||||
}
|
||||
|
||||
// Start a goroutine for garbage collection.
|
||||
ps.wg.Add(1)
|
||||
go func() {
|
||||
defer ps.wg.Done()
|
||||
for {
|
||||
select {
|
||||
case <-ps.closed:
|
||||
return
|
||||
case <-time.After(cfg.GarbageCollectionInterval):
|
||||
before := time.Now().Add(-cfg.PeerLifetime)
|
||||
log.Debug("storage: purging peers with no announces since", log.Fields{"before": before})
|
||||
if err = ps.collectGarbage(before); err != nil {
|
||||
log.Error("storage: collectGarbage error", log.Fields{"before": before, "error": err})
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Start a goroutine for reporting statistics to Prometheus.
|
||||
ps.wg.Add(1)
|
||||
go func() {
|
||||
defer ps.wg.Done()
|
||||
t := time.NewTicker(cfg.PrometheusReportingInterval)
|
||||
for {
|
||||
select {
|
||||
case <-ps.closed:
|
||||
t.Stop()
|
||||
return
|
||||
case <-t.C:
|
||||
before := time.Now()
|
||||
ps.populateProm()
|
||||
log.Debug("storage: populateProm() finished", log.Fields{"timeTaken": time.Since(before)})
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return ps, nil
|
||||
}
|
||||
|
||||
type serializedPeer string
|
||||
|
||||
func newPeerKey(p bittorrent.Peer) serializedPeer {
|
||||
b := make([]byte, 20+2+len(p.IP.IP))
|
||||
copy(b[:20], p.ID[:])
|
||||
binary.BigEndian.PutUint16(b[20:22], p.Port)
|
||||
copy(b[22:], p.IP.IP)
|
||||
|
||||
return serializedPeer(b)
|
||||
}
|
||||
|
||||
func decodePeerKey(pk serializedPeer) bittorrent.Peer {
|
||||
peer := bittorrent.Peer{
|
||||
ID: bittorrent.PeerIDFromString(string(pk[:20])),
|
||||
Port: binary.BigEndian.Uint16([]byte(pk[20:22])),
|
||||
IP: bittorrent.IP{IP: net.IP(pk[22:])},
|
||||
}
|
||||
|
||||
if ip := peer.IP.To4(); ip != nil {
|
||||
peer.IP.IP = ip
|
||||
peer.IP.AddressFamily = bittorrent.IPv4
|
||||
} else if len(peer.IP.IP) == net.IPv6len { // implies toReturn.IP.To4() == nil
|
||||
peer.IP.AddressFamily = bittorrent.IPv6
|
||||
} else {
|
||||
panic("IP is neither v4 nor v6")
|
||||
}
|
||||
|
||||
return peer
|
||||
}
|
||||
|
||||
type peerStore struct {
|
||||
cfg Config
|
||||
rb *redisBackend
|
||||
|
||||
closed chan struct{}
|
||||
wg sync.WaitGroup
|
||||
}
|
||||
|
||||
func (ps *peerStore) groups() []string {
|
||||
return []string{bittorrent.IPv4.String(), bittorrent.IPv6.String()}
|
||||
}
|
||||
|
||||
func (ps *peerStore) leecherInfohashKey(af, ih string) string {
|
||||
return af + "_L_" + ih
|
||||
}
|
||||
|
||||
func (ps *peerStore) seederInfohashKey(af, ih string) string {
|
||||
return af + "_S_" + ih
|
||||
}
|
||||
|
||||
func (ps *peerStore) infohashCountKey(af string) string {
|
||||
return af + "_infohash_count"
|
||||
}
|
||||
|
||||
func (ps *peerStore) seederCountKey(af string) string {
|
||||
return af + "_S_count"
|
||||
}
|
||||
|
||||
func (ps *peerStore) leecherCountKey(af string) string {
|
||||
return af + "_L_count"
|
||||
}
|
||||
|
||||
// populateProm aggregates metrics over all groups and then posts them to
|
||||
// prometheus.
|
||||
func (ps *peerStore) populateProm() {
|
||||
var numInfohashes, numSeeders, numLeechers int64
|
||||
|
||||
conn := ps.rb.open()
|
||||
defer conn.Close()
|
||||
|
||||
for _, group := range ps.groups() {
|
||||
if n, err := redis.Int64(conn.Do("GET", ps.infohashCountKey(group))); err != nil && !errors.Is(err, redis.ErrNil) {
|
||||
log.Error("storage: GET counter failure", log.Fields{
|
||||
"key": ps.infohashCountKey(group),
|
||||
"error": err,
|
||||
})
|
||||
} else {
|
||||
numInfohashes += n
|
||||
}
|
||||
if n, err := redis.Int64(conn.Do("GET", ps.seederCountKey(group))); err != nil && !errors.Is(err, redis.ErrNil) {
|
||||
log.Error("storage: GET counter failure", log.Fields{
|
||||
"key": ps.seederCountKey(group),
|
||||
"error": err,
|
||||
})
|
||||
} else {
|
||||
numSeeders += n
|
||||
}
|
||||
if n, err := redis.Int64(conn.Do("GET", ps.leecherCountKey(group))); err != nil && !errors.Is(err, redis.ErrNil) {
|
||||
log.Error("storage: GET counter failure", log.Fields{
|
||||
"key": ps.leecherCountKey(group),
|
||||
"error": err,
|
||||
})
|
||||
} else {
|
||||
numLeechers += n
|
||||
}
|
||||
}
|
||||
|
||||
storage.PromInfohashesCount.Set(float64(numInfohashes))
|
||||
storage.PromSeedersCount.Set(float64(numSeeders))
|
||||
storage.PromLeechersCount.Set(float64(numLeechers))
|
||||
}
|
||||
|
||||
func (ps *peerStore) getClock() int64 {
|
||||
return timecache.NowUnixNano()
|
||||
}
|
||||
|
||||
func (ps *peerStore) PutSeeder(ih bittorrent.InfoHash, p bittorrent.Peer) error {
|
||||
addressFamily := p.IP.AddressFamily.String()
|
||||
log.Debug("storage: PutSeeder", log.Fields{
|
||||
"InfoHash": ih.String(),
|
||||
"Peer": p,
|
||||
})
|
||||
|
||||
select {
|
||||
case <-ps.closed:
|
||||
panic("attempted to interact with stopped redis store")
|
||||
default:
|
||||
}
|
||||
|
||||
pk := newPeerKey(p)
|
||||
|
||||
encodedSeederInfoHash := ps.seederInfohashKey(addressFamily, ih.String())
|
||||
ct := ps.getClock()
|
||||
|
||||
conn := ps.rb.open()
|
||||
defer conn.Close()
|
||||
|
||||
_ = conn.Send("MULTI")
|
||||
_ = conn.Send("HSET", encodedSeederInfoHash, pk, ct)
|
||||
_ = conn.Send("HSET", addressFamily, encodedSeederInfoHash, ct)
|
||||
reply, err := redis.Int64s(conn.Do("EXEC"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// pk is a new field.
|
||||
if reply[0] == 1 {
|
||||
_, err = conn.Do("INCR", ps.seederCountKey(addressFamily))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// encodedSeederInfoHash is a new field.
|
||||
if reply[1] == 1 {
|
||||
_, err = conn.Do("INCR", ps.infohashCountKey(addressFamily))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ps *peerStore) DeleteSeeder(ih bittorrent.InfoHash, p bittorrent.Peer) error {
|
||||
addressFamily := p.IP.AddressFamily.String()
|
||||
log.Debug("storage: DeleteSeeder", log.Fields{
|
||||
"InfoHash": ih.String(),
|
||||
"Peer": p,
|
||||
})
|
||||
|
||||
select {
|
||||
case <-ps.closed:
|
||||
panic("attempted to interact with stopped redis store")
|
||||
default:
|
||||
}
|
||||
|
||||
pk := newPeerKey(p)
|
||||
|
||||
conn := ps.rb.open()
|
||||
defer conn.Close()
|
||||
|
||||
encodedSeederInfoHash := ps.seederInfohashKey(addressFamily, ih.String())
|
||||
|
||||
delNum, err := redis.Int64(conn.Do("HDEL", encodedSeederInfoHash, pk))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if delNum == 0 {
|
||||
return storage.ErrResourceDoesNotExist
|
||||
}
|
||||
if _, err := conn.Do("DECR", ps.seederCountKey(addressFamily)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ps *peerStore) PutLeecher(ih bittorrent.InfoHash, p bittorrent.Peer) error {
|
||||
addressFamily := p.IP.AddressFamily.String()
|
||||
log.Debug("storage: PutLeecher", log.Fields{
|
||||
"InfoHash": ih.String(),
|
||||
"Peer": p,
|
||||
})
|
||||
|
||||
select {
|
||||
case <-ps.closed:
|
||||
panic("attempted to interact with stopped redis store")
|
||||
default:
|
||||
}
|
||||
|
||||
// Update the peer in the swarm.
|
||||
encodedLeecherInfoHash := ps.leecherInfohashKey(addressFamily, ih.String())
|
||||
pk := newPeerKey(p)
|
||||
ct := ps.getClock()
|
||||
|
||||
conn := ps.rb.open()
|
||||
defer conn.Close()
|
||||
|
||||
_ = conn.Send("MULTI")
|
||||
_ = conn.Send("HSET", encodedLeecherInfoHash, pk, ct)
|
||||
_ = conn.Send("HSET", addressFamily, encodedLeecherInfoHash, ct)
|
||||
reply, err := redis.Int64s(conn.Do("EXEC"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// pk is a new field.
|
||||
if reply[0] == 1 {
|
||||
_, err = conn.Do("INCR", ps.leecherCountKey(addressFamily))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ps *peerStore) DeleteLeecher(ih bittorrent.InfoHash, p bittorrent.Peer) error {
|
||||
addressFamily := p.IP.AddressFamily.String()
|
||||
log.Debug("storage: DeleteLeecher", log.Fields{
|
||||
"InfoHash": ih.String(),
|
||||
"Peer": p,
|
||||
})
|
||||
|
||||
select {
|
||||
case <-ps.closed:
|
||||
panic("attempted to interact with stopped redis store")
|
||||
default:
|
||||
}
|
||||
|
||||
conn := ps.rb.open()
|
||||
defer conn.Close()
|
||||
|
||||
pk := newPeerKey(p)
|
||||
encodedLeecherInfoHash := ps.leecherInfohashKey(addressFamily, ih.String())
|
||||
|
||||
delNum, err := redis.Int64(conn.Do("HDEL", encodedLeecherInfoHash, pk))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if delNum == 0 {
|
||||
return storage.ErrResourceDoesNotExist
|
||||
}
|
||||
if _, err := conn.Do("DECR", ps.leecherCountKey(addressFamily)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ps *peerStore) GraduateLeecher(ih bittorrent.InfoHash, p bittorrent.Peer) error {
|
||||
addressFamily := p.IP.AddressFamily.String()
|
||||
log.Debug("storage: GraduateLeecher", log.Fields{
|
||||
"InfoHash": ih.String(),
|
||||
"Peer": p,
|
||||
})
|
||||
|
||||
select {
|
||||
case <-ps.closed:
|
||||
panic("attempted to interact with stopped redis store")
|
||||
default:
|
||||
}
|
||||
|
||||
encodedInfoHash := ih.String()
|
||||
encodedLeecherInfoHash := ps.leecherInfohashKey(addressFamily, encodedInfoHash)
|
||||
encodedSeederInfoHash := ps.seederInfohashKey(addressFamily, encodedInfoHash)
|
||||
pk := newPeerKey(p)
|
||||
ct := ps.getClock()
|
||||
|
||||
conn := ps.rb.open()
|
||||
defer conn.Close()
|
||||
|
||||
_ = conn.Send("MULTI")
|
||||
_ = conn.Send("HDEL", encodedLeecherInfoHash, pk)
|
||||
_ = conn.Send("HSET", encodedSeederInfoHash, pk, ct)
|
||||
_ = conn.Send("HSET", addressFamily, encodedSeederInfoHash, ct)
|
||||
reply, err := redis.Int64s(conn.Do("EXEC"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if reply[0] == 1 {
|
||||
_, err = conn.Do("DECR", ps.leecherCountKey(addressFamily))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if reply[1] == 1 {
|
||||
_, err = conn.Do("INCR", ps.seederCountKey(addressFamily))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if reply[2] == 1 {
|
||||
_, err = conn.Do("INCR", ps.infohashCountKey(addressFamily))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ps *peerStore) AnnouncePeers(ih bittorrent.InfoHash, seeder bool, numWant int, announcer bittorrent.Peer) (peers []bittorrent.Peer, err error) {
|
||||
addressFamily := announcer.IP.AddressFamily.String()
|
||||
log.Debug("storage: AnnouncePeers", log.Fields{
|
||||
"InfoHash": ih.String(),
|
||||
"seeder": seeder,
|
||||
"numWant": numWant,
|
||||
"Peer": announcer,
|
||||
})
|
||||
|
||||
select {
|
||||
case <-ps.closed:
|
||||
panic("attempted to interact with stopped redis store")
|
||||
default:
|
||||
}
|
||||
|
||||
encodedInfoHash := ih.String()
|
||||
encodedLeecherInfoHash := ps.leecherInfohashKey(addressFamily, encodedInfoHash)
|
||||
encodedSeederInfoHash := ps.seederInfohashKey(addressFamily, encodedInfoHash)
|
||||
|
||||
conn := ps.rb.open()
|
||||
defer conn.Close()
|
||||
|
||||
leechers, err := conn.Do("HKEYS", encodedLeecherInfoHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
conLeechers := leechers.([]interface{})
|
||||
|
||||
seeders, err := conn.Do("HKEYS", encodedSeederInfoHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
conSeeders := seeders.([]interface{})
|
||||
|
||||
if len(conLeechers) == 0 && len(conSeeders) == 0 {
|
||||
return nil, storage.ErrResourceDoesNotExist
|
||||
}
|
||||
|
||||
if seeder {
|
||||
// Append leechers as possible.
|
||||
for _, pk := range conLeechers {
|
||||
if numWant == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
peers = append(peers, decodePeerKey(serializedPeer(pk.([]byte))))
|
||||
numWant--
|
||||
}
|
||||
} else {
|
||||
// Append as many seeders as possible.
|
||||
for _, pk := range conSeeders {
|
||||
if numWant == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
peers = append(peers, decodePeerKey(serializedPeer(pk.([]byte))))
|
||||
numWant--
|
||||
}
|
||||
|
||||
// Append leechers until we reach numWant.
|
||||
if numWant > 0 {
|
||||
announcerPK := newPeerKey(announcer)
|
||||
for _, pk := range conLeechers {
|
||||
if pk == announcerPK {
|
||||
continue
|
||||
}
|
||||
|
||||
if numWant == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
peers = append(peers, decodePeerKey(serializedPeer(pk.([]byte))))
|
||||
numWant--
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (ps *peerStore) ScrapeSwarm(ih bittorrent.InfoHash, af bittorrent.AddressFamily) (resp bittorrent.Scrape) {
|
||||
select {
|
||||
case <-ps.closed:
|
||||
panic("attempted to interact with stopped redis store")
|
||||
default:
|
||||
}
|
||||
|
||||
resp.InfoHash = ih
|
||||
addressFamily := af.String()
|
||||
encodedInfoHash := ih.String()
|
||||
encodedLeecherInfoHash := ps.leecherInfohashKey(addressFamily, encodedInfoHash)
|
||||
encodedSeederInfoHash := ps.seederInfohashKey(addressFamily, encodedInfoHash)
|
||||
|
||||
conn := ps.rb.open()
|
||||
defer conn.Close()
|
||||
|
||||
leechersLen, err := redis.Int64(conn.Do("HLEN", encodedLeecherInfoHash))
|
||||
if err != nil {
|
||||
log.Error("storage: Redis HLEN failure", log.Fields{
|
||||
"Hkey": encodedLeecherInfoHash,
|
||||
"error": err,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
seedersLen, err := redis.Int64(conn.Do("HLEN", encodedSeederInfoHash))
|
||||
if err != nil {
|
||||
log.Error("storage: Redis HLEN failure", log.Fields{
|
||||
"Hkey": encodedSeederInfoHash,
|
||||
"error": err,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
resp.Incomplete = uint32(leechersLen)
|
||||
resp.Complete = uint32(seedersLen)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// collectGarbage deletes all Peers from the PeerStore which are older than the
|
||||
// cutoff time.
|
||||
//
|
||||
// This function must be able to execute while other methods on this interface
|
||||
// are being executed in parallel.
|
||||
//
|
||||
// - The Delete(Seeder|Leecher) and GraduateLeecher methods never delete an
|
||||
// infohash key from an addressFamily hash. They also never decrement the
|
||||
// infohash counter.
|
||||
// - The Put(Seeder|Leecher) and GraduateLeecher methods only ever add infohash
|
||||
// keys to addressFamily hashes and increment the infohash counter.
|
||||
// - The only method that deletes from the addressFamily hashes is
|
||||
// collectGarbage, which also decrements the counters. That means that,
|
||||
// even if a Delete(Seeder|Leecher) call removes the last peer from a swarm,
|
||||
// the infohash counter is not changed and the infohash is left in the
|
||||
// addressFamily hash until it will be cleaned up by collectGarbage.
|
||||
// - collectGarbage must run regularly.
|
||||
// - A WATCH ... MULTI ... EXEC block fails, if between the WATCH and the 'EXEC'
|
||||
// any of the watched keys have changed. The location of the 'MULTI' doesn't
|
||||
// matter.
|
||||
//
|
||||
// We have to analyze four cases to prove our algorithm works. I'll characterize
|
||||
// them by a tuple (number of peers in a swarm before WATCH, number of peers in
|
||||
// the swarm during the transaction).
|
||||
//
|
||||
// 1. (0,0), the easy case: The swarm is empty, we watch the key, we execute
|
||||
// HLEN and find it empty. We remove it and decrement the counter. It stays
|
||||
// empty the entire time, the transaction goes through.
|
||||
// 2. (1,n > 0): The swarm is not empty, we watch the key, we find it non-empty,
|
||||
// we unwatch the key. All good. No transaction is made, no transaction fails.
|
||||
// 3. (0,1): We have to analyze this in two ways.
|
||||
// - If the change happens before the HLEN call, we will see that the swarm is
|
||||
// not empty and start no transaction.
|
||||
// - If the change happens after the HLEN, we will attempt a transaction and it
|
||||
// will fail. This is okay, the swarm is not empty, we will try cleaning it up
|
||||
// next time collectGarbage runs.
|
||||
// 4. (1,0): Again, two ways:
|
||||
// - If the change happens before the HLEN, we will see an empty swarm. This
|
||||
// situation happens if a call to Delete(Seeder|Leecher) removed the last
|
||||
// peer asynchronously. We will attempt a transaction, but the transaction
|
||||
// will fail. This is okay, the infohash key will remain in the addressFamily
|
||||
// hash, we will attempt to clean it up the next time 'collectGarbage` runs.
|
||||
// - If the change happens after the HLEN, we will not even attempt to make the
|
||||
// transaction. The infohash key will remain in the addressFamil hash and
|
||||
// we'll attempt to clean it up the next time collectGarbage runs.
|
||||
func (ps *peerStore) collectGarbage(cutoff time.Time) error {
|
||||
select {
|
||||
case <-ps.closed:
|
||||
return nil
|
||||
default:
|
||||
}
|
||||
|
||||
conn := ps.rb.open()
|
||||
defer conn.Close()
|
||||
|
||||
cutoffUnix := cutoff.UnixNano()
|
||||
start := time.Now()
|
||||
|
||||
for _, group := range ps.groups() {
|
||||
// list all infohashes in the group
|
||||
infohashesList, err := redis.Strings(conn.Do("HKEYS", group))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, ihStr := range infohashesList {
|
||||
isSeeder := len(ihStr) > 5 && ihStr[5:6] == "S"
|
||||
|
||||
// list all (peer, timeout) pairs for the ih
|
||||
ihList, err := redis.Strings(conn.Do("HGETALL", ihStr))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var pk serializedPeer
|
||||
var removedPeerCount int64
|
||||
for index, ihField := range ihList {
|
||||
if index%2 == 1 { // value
|
||||
mtime, err := strconv.ParseInt(ihField, 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if mtime <= cutoffUnix {
|
||||
log.Debug("storage: deleting peer", log.Fields{
|
||||
"Peer": decodePeerKey(pk).String(),
|
||||
})
|
||||
ret, err := redis.Int64(conn.Do("HDEL", ihStr, pk))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
removedPeerCount += ret
|
||||
}
|
||||
} else { // key
|
||||
pk = serializedPeer([]byte(ihField))
|
||||
}
|
||||
}
|
||||
// DECR seeder/leecher counter
|
||||
decrCounter := ps.leecherCountKey(group)
|
||||
if isSeeder {
|
||||
decrCounter = ps.seederCountKey(group)
|
||||
}
|
||||
if removedPeerCount > 0 {
|
||||
if _, err := conn.Do("DECRBY", decrCounter, removedPeerCount); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// use WATCH to avoid race condition
|
||||
// https://redis.io/topics/transactions
|
||||
_, err = conn.Do("WATCH", ihStr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ihLen, err := redis.Int64(conn.Do("HLEN", ihStr))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ihLen == 0 {
|
||||
// Empty hashes are not shown among existing keys,
|
||||
// in other words, it's removed automatically after `HDEL` the last field.
|
||||
//_, err := conn.Do("DEL", ihStr)
|
||||
|
||||
_ = conn.Send("MULTI")
|
||||
_ = conn.Send("HDEL", group, ihStr)
|
||||
if isSeeder {
|
||||
_ = conn.Send("DECR", ps.infohashCountKey(group))
|
||||
}
|
||||
_, err = redis.Values(conn.Do("EXEC"))
|
||||
if err != nil && !errors.Is(err, redis.ErrNil) {
|
||||
log.Error("storage: Redis EXEC failure", log.Fields{
|
||||
"group": group,
|
||||
"infohash": ihStr,
|
||||
"error": err,
|
||||
})
|
||||
}
|
||||
} else {
|
||||
if _, err = conn.Do("UNWATCH"); err != nil && !errors.Is(err, redis.ErrNil) {
|
||||
log.Error("storage: Redis UNWATCH failure", log.Fields{"error": err})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
duration := float64(time.Since(start).Nanoseconds()) / float64(time.Millisecond)
|
||||
log.Debug("storage: recordGCDuration", log.Fields{"timeTaken(ms)": duration})
|
||||
storage.PromGCDurationMilliseconds.Observe(duration)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ps *peerStore) Stop() stop.Result {
|
||||
c := make(stop.Channel)
|
||||
go func() {
|
||||
close(ps.closed)
|
||||
ps.wg.Wait()
|
||||
log.Info("storage: exiting. chihaya does not clear data in redis when exiting. chihaya keys have prefix 'IPv{4,6}_'.")
|
||||
c.Done()
|
||||
}()
|
||||
|
||||
return c.Result()
|
||||
}
|
||||
|
||||
func (ps *peerStore) LogFields() log.Fields {
|
||||
return ps.cfg.LogFields()
|
||||
}
|
62
storage/redis/peer_store_test.go
Normal file
62
storage/redis/peer_store_test.go
Normal file
|
@ -0,0 +1,62 @@
|
|||
package redis
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/alicebob/miniredis"
|
||||
|
||||
s "github.com/chihaya/chihaya/storage"
|
||||
)
|
||||
|
||||
func createNew() s.PeerStore {
|
||||
rs, err := miniredis.Run()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
redisURL := fmt.Sprintf("redis://@%s/0", rs.Addr())
|
||||
ps, err := New(Config{
|
||||
GarbageCollectionInterval: 10 * time.Minute,
|
||||
PrometheusReportingInterval: 10 * time.Minute,
|
||||
PeerLifetime: 30 * time.Minute,
|
||||
RedisBroker: redisURL,
|
||||
RedisReadTimeout: 10 * time.Second,
|
||||
RedisWriteTimeout: 10 * time.Second,
|
||||
RedisConnectTimeout: 10 * time.Second,
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return ps
|
||||
}
|
||||
|
||||
func TestPeerStore(t *testing.T) { s.TestPeerStore(t, createNew()) }
|
||||
|
||||
func BenchmarkNop(b *testing.B) { s.Nop(b, createNew()) }
|
||||
func BenchmarkPut(b *testing.B) { s.Put(b, createNew()) }
|
||||
func BenchmarkPut1k(b *testing.B) { s.Put1k(b, createNew()) }
|
||||
func BenchmarkPut1kInfohash(b *testing.B) { s.Put1kInfohash(b, createNew()) }
|
||||
func BenchmarkPut1kInfohash1k(b *testing.B) { s.Put1kInfohash1k(b, createNew()) }
|
||||
func BenchmarkPutDelete(b *testing.B) { s.PutDelete(b, createNew()) }
|
||||
func BenchmarkPutDelete1k(b *testing.B) { s.PutDelete1k(b, createNew()) }
|
||||
func BenchmarkPutDelete1kInfohash(b *testing.B) { s.PutDelete1kInfohash(b, createNew()) }
|
||||
func BenchmarkPutDelete1kInfohash1k(b *testing.B) { s.PutDelete1kInfohash1k(b, createNew()) }
|
||||
func BenchmarkDeleteNonexist(b *testing.B) { s.DeleteNonexist(b, createNew()) }
|
||||
func BenchmarkDeleteNonexist1k(b *testing.B) { s.DeleteNonexist1k(b, createNew()) }
|
||||
func BenchmarkDeleteNonexist1kInfohash(b *testing.B) { s.DeleteNonexist1kInfohash(b, createNew()) }
|
||||
func BenchmarkDeleteNonexist1kInfohash1k(b *testing.B) { s.DeleteNonexist1kInfohash1k(b, createNew()) }
|
||||
func BenchmarkPutGradDelete(b *testing.B) { s.PutGradDelete(b, createNew()) }
|
||||
func BenchmarkPutGradDelete1k(b *testing.B) { s.PutGradDelete1k(b, createNew()) }
|
||||
func BenchmarkPutGradDelete1kInfohash(b *testing.B) { s.PutGradDelete1kInfohash(b, createNew()) }
|
||||
func BenchmarkPutGradDelete1kInfohash1k(b *testing.B) { s.PutGradDelete1kInfohash1k(b, createNew()) }
|
||||
func BenchmarkGradNonexist(b *testing.B) { s.GradNonexist(b, createNew()) }
|
||||
func BenchmarkGradNonexist1k(b *testing.B) { s.GradNonexist1k(b, createNew()) }
|
||||
func BenchmarkGradNonexist1kInfohash(b *testing.B) { s.GradNonexist1kInfohash(b, createNew()) }
|
||||
func BenchmarkGradNonexist1kInfohash1k(b *testing.B) { s.GradNonexist1kInfohash1k(b, createNew()) }
|
||||
func BenchmarkAnnounceLeecher(b *testing.B) { s.AnnounceLeecher(b, createNew()) }
|
||||
func BenchmarkAnnounceLeecher1kInfohash(b *testing.B) { s.AnnounceLeecher1kInfohash(b, createNew()) }
|
||||
func BenchmarkAnnounceSeeder(b *testing.B) { s.AnnounceSeeder(b, createNew()) }
|
||||
func BenchmarkAnnounceSeeder1kInfohash(b *testing.B) { s.AnnounceSeeder1kInfohash(b, createNew()) }
|
||||
func BenchmarkScrapeSwarm(b *testing.B) { s.ScrapeSwarm(b, createNew()) }
|
||||
func BenchmarkScrapeSwarm1kInfohash(b *testing.B) { s.ScrapeSwarm1kInfohash(b, createNew()) }
|
136
storage/redis/redis.go
Normal file
136
storage/redis/redis.go
Normal file
|
@ -0,0 +1,136 @@
|
|||
package redis
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-redsync/redsync/v4"
|
||||
"github.com/go-redsync/redsync/v4/redis/redigo"
|
||||
redigolib "github.com/gomodule/redigo/redis"
|
||||
)
|
||||
|
||||
// redisBackend represents a redis handler.
|
||||
type redisBackend struct {
|
||||
pool *redigolib.Pool
|
||||
redsync *redsync.Redsync
|
||||
}
|
||||
|
||||
// newRedisBackend creates a redisBackend instance.
|
||||
func newRedisBackend(cfg *Config, u *redisURL, socketPath string) *redisBackend {
|
||||
rc := &redisConnector{
|
||||
URL: u,
|
||||
SocketPath: socketPath,
|
||||
ReadTimeout: cfg.RedisReadTimeout,
|
||||
WriteTimeout: cfg.RedisWriteTimeout,
|
||||
ConnectTimeout: cfg.RedisConnectTimeout,
|
||||
}
|
||||
pool := rc.NewPool()
|
||||
redsync := redsync.New(redigo.NewPool(pool))
|
||||
return &redisBackend{
|
||||
pool: pool,
|
||||
redsync: redsync,
|
||||
}
|
||||
}
|
||||
|
||||
// open returns or creates instance of Redis connection.
|
||||
func (rb *redisBackend) open() redigolib.Conn {
|
||||
return rb.pool.Get()
|
||||
}
|
||||
|
||||
type redisConnector struct {
|
||||
URL *redisURL
|
||||
SocketPath string
|
||||
ReadTimeout time.Duration
|
||||
WriteTimeout time.Duration
|
||||
ConnectTimeout time.Duration
|
||||
}
|
||||
|
||||
// NewPool returns a new pool of Redis connections
|
||||
func (rc *redisConnector) NewPool() *redigolib.Pool {
|
||||
return &redigolib.Pool{
|
||||
MaxIdle: 3,
|
||||
IdleTimeout: 240 * time.Second,
|
||||
Dial: func() (redigolib.Conn, error) {
|
||||
c, err := rc.open()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if rc.URL.DB != 0 {
|
||||
_, err = c.Do("SELECT", rc.URL.DB)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return c, err
|
||||
},
|
||||
// PINGs connections that have been idle more than 10 seconds
|
||||
TestOnBorrow: func(c redigolib.Conn, t time.Time) error {
|
||||
if time.Since(t) < 10*time.Second {
|
||||
return nil
|
||||
}
|
||||
_, err := c.Do("PING")
|
||||
return err
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Open a new Redis connection
|
||||
func (rc *redisConnector) open() (redigolib.Conn, error) {
|
||||
opts := []redigolib.DialOption{
|
||||
redigolib.DialDatabase(rc.URL.DB),
|
||||
redigolib.DialReadTimeout(rc.ReadTimeout),
|
||||
redigolib.DialWriteTimeout(rc.WriteTimeout),
|
||||
redigolib.DialConnectTimeout(rc.ConnectTimeout),
|
||||
}
|
||||
|
||||
if rc.URL.Password != "" {
|
||||
opts = append(opts, redigolib.DialPassword(rc.URL.Password))
|
||||
}
|
||||
|
||||
if rc.SocketPath != "" {
|
||||
return redigolib.Dial("unix", rc.SocketPath, opts...)
|
||||
}
|
||||
|
||||
return redigolib.Dial("tcp", rc.URL.Host, opts...)
|
||||
}
|
||||
|
||||
// A redisURL represents a parsed redisURL
|
||||
// The general form represented is:
|
||||
//
|
||||
// redis://[password@]host][/][db]
|
||||
type redisURL struct {
|
||||
Host string
|
||||
Password string
|
||||
DB int
|
||||
}
|
||||
|
||||
// parseRedisURL parse rawurl into redisURL
|
||||
func parseRedisURL(target string) (*redisURL, error) {
|
||||
var u *url.URL
|
||||
u, err := url.Parse(target)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if u.Scheme != "redis" {
|
||||
return nil, errors.New("no redis scheme found")
|
||||
}
|
||||
|
||||
db := 0 // default redis db
|
||||
parts := strings.Split(u.Path, "/")
|
||||
if len(parts) != 1 {
|
||||
db, err = strconv.Atoi(parts[1])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return &redisURL{
|
||||
Host: u.Host,
|
||||
Password: u.User.String(),
|
||||
DB: db,
|
||||
}, nil
|
||||
}
|
|
@ -1,49 +1,88 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sync"
|
||||
|
||||
"github.com/chihaya/chihaya/bittorrent"
|
||||
"github.com/chihaya/chihaya/stopper"
|
||||
"github.com/chihaya/chihaya/pkg/log"
|
||||
"github.com/chihaya/chihaya/pkg/stop"
|
||||
)
|
||||
|
||||
// ErrResourceDoesNotExist is the error returned by all delete methods in the
|
||||
// store if the requested resource does not exist.
|
||||
var (
|
||||
driversM sync.RWMutex
|
||||
drivers = make(map[string]Driver)
|
||||
)
|
||||
|
||||
// Driver is the interface used to initialize a new type of PeerStore.
|
||||
type Driver interface {
|
||||
NewPeerStore(cfg interface{}) (PeerStore, error)
|
||||
}
|
||||
|
||||
// ErrResourceDoesNotExist is the error returned by all delete methods and the
|
||||
// AnnouncePeers method of the PeerStore interface if the requested resource
|
||||
// does not exist.
|
||||
var ErrResourceDoesNotExist = bittorrent.ClientError("resource does not exist")
|
||||
|
||||
// ErrDriverDoesNotExist is the error returned by NewPeerStore when a peer
|
||||
// store driver with that name does not exist.
|
||||
var ErrDriverDoesNotExist = errors.New("peer store driver with that name does not exist")
|
||||
|
||||
// PeerStore is an interface that abstracts the interactions of storing and
|
||||
// manipulating Peers such that it can be implemented for various data stores.
|
||||
//
|
||||
// Implementations of the PeerStore interface must do the following in addition
|
||||
// to implementing the methods of the interface in the way documented:
|
||||
//
|
||||
// - Implement a garbage-collection strategy that ensures stale data is removed.
|
||||
// For example, a timestamp on each InfoHash/Peer combination can be used
|
||||
// to track the last activity for that Peer. The entire database can then
|
||||
// be scanned periodically and too old Peers removed. The intervals and
|
||||
// durations involved should be configurable.
|
||||
// - IPv4 and IPv6 swarms must be isolated from each other.
|
||||
// A PeerStore must be able to transparently handle IPv4 and IPv6 Peers, but
|
||||
// must separate them. AnnouncePeers and ScrapeSwarm must return information
|
||||
// about the Swarm matching the given AddressFamily only.
|
||||
//
|
||||
// Implementations can be tested against this interface using the tests in
|
||||
// storage_tests.go and the benchmarks in storage_bench.go.
|
||||
type PeerStore interface {
|
||||
// PutSeeder adds a Seeder to the Swarm identified by the provided
|
||||
// infoHash.
|
||||
// InfoHash.
|
||||
PutSeeder(infoHash bittorrent.InfoHash, p bittorrent.Peer) error
|
||||
|
||||
// DeleteSeeder removes a Seeder from the Swarm identified by the
|
||||
// provided infoHash.
|
||||
// provided InfoHash.
|
||||
//
|
||||
// If the Swarm or Peer does not exist, this function should return
|
||||
// If the Swarm or Peer does not exist, this function returns
|
||||
// ErrResourceDoesNotExist.
|
||||
DeleteSeeder(infoHash bittorrent.InfoHash, p bittorrent.Peer) error
|
||||
|
||||
// PutLeecher adds a Leecher to the Swarm identified by the provided
|
||||
// infoHash.
|
||||
// InfoHash.
|
||||
// If the Swarm does not exist already, it is created.
|
||||
PutLeecher(infoHash bittorrent.InfoHash, p bittorrent.Peer) error
|
||||
|
||||
// DeleteLeecher removes a Leecher from the Swarm identified by the
|
||||
// provided infoHash.
|
||||
// provided InfoHash.
|
||||
//
|
||||
// If the Swarm or Peer does not exist, this function should return
|
||||
// If the Swarm or Peer does not exist, this function returns
|
||||
// ErrResourceDoesNotExist.
|
||||
DeleteLeecher(infoHash bittorrent.InfoHash, p bittorrent.Peer) error
|
||||
|
||||
// GraduateLeecher promotes a Leecher to a Seeder in the Swarm
|
||||
// identified by the provided infoHash.
|
||||
// identified by the provided InfoHash.
|
||||
//
|
||||
// If the given Peer is not present as a Leecher, add the Peer as a
|
||||
// Seeder and return no error.
|
||||
// If the given Peer is not present as a Leecher or the swarm does not exist
|
||||
// already, the Peer is added as a Seeder and no error is returned.
|
||||
GraduateLeecher(infoHash bittorrent.InfoHash, p bittorrent.Peer) error
|
||||
|
||||
// AnnouncePeers is a best effort attempt to return Peers from the Swarm
|
||||
// identified by the provided infoHash. The returned Peers are required
|
||||
// to be either all IPv4 or all IPv6.
|
||||
// identified by the provided InfoHash.
|
||||
// The numWant parameter indicates the number of peers requested by the
|
||||
// announcing Peer p. The seeder flag determines whether the Peer announced
|
||||
// as a Seeder.
|
||||
// The returned Peers are required to be either all IPv4 or all IPv6.
|
||||
//
|
||||
// The returned Peers should strive be:
|
||||
// - as close to length equal to numWant as possible without going over
|
||||
|
@ -52,21 +91,64 @@ type PeerStore interface {
|
|||
// - if seeder is false, should ideally return more seeders than
|
||||
// leechers
|
||||
//
|
||||
// Returns ErrResourceDoesNotExist if the provided infoHash is not tracked.
|
||||
// Returns ErrResourceDoesNotExist if the provided InfoHash is not tracked.
|
||||
AnnouncePeers(infoHash bittorrent.InfoHash, seeder bool, numWant int, p bittorrent.Peer) (peers []bittorrent.Peer, err error)
|
||||
|
||||
// ScrapeSwarm returns information required to answer a scrape request
|
||||
// about a swarm identified by the given infohash.
|
||||
// The v6 flag indicates whether or not the IPv6 swarm should be
|
||||
// ScrapeSwarm returns information required to answer a Scrape request
|
||||
// about a Swarm identified by the given InfoHash.
|
||||
// The AddressFamily indicates whether or not the IPv6 swarm should be
|
||||
// scraped.
|
||||
// The Complete and Incomplete fields of the Scrape must be filled,
|
||||
// filling the Snatches field is optional.
|
||||
// If the infohash is unknown to the PeerStore, an empty Scrape is
|
||||
// returned.
|
||||
ScrapeSwarm(infoHash bittorrent.InfoHash, v6 bool) bittorrent.Scrape
|
||||
//
|
||||
// If the Swarm does not exist, an empty Scrape and no error is returned.
|
||||
ScrapeSwarm(infoHash bittorrent.InfoHash, addressFamily bittorrent.AddressFamily) bittorrent.Scrape
|
||||
|
||||
// Stopper is an interface that expects a Stop method to stop the
|
||||
// stop.Stopper is an interface that expects a Stop method to stop the
|
||||
// PeerStore.
|
||||
// For more details see the documentation in the stopper package.
|
||||
stopper.Stopper
|
||||
// For more details see the documentation in the stop package.
|
||||
stop.Stopper
|
||||
|
||||
// log.Fielder returns a loggable version of the data used to configure and
|
||||
// operate a particular PeerStore.
|
||||
log.Fielder
|
||||
}
|
||||
|
||||
// RegisterDriver makes a Driver available by the provided name.
|
||||
//
|
||||
// If called twice with the same name, the name is blank, or if the provided
|
||||
// Driver is nil, this function panics.
|
||||
func RegisterDriver(name string, d Driver) {
|
||||
if name == "" {
|
||||
panic("storage: could not register a Driver with an empty name")
|
||||
}
|
||||
if d == nil {
|
||||
panic("storage: could not register a nil Driver")
|
||||
}
|
||||
|
||||
driversM.Lock()
|
||||
defer driversM.Unlock()
|
||||
|
||||
if _, dup := drivers[name]; dup {
|
||||
panic("storage: RegisterDriver called twice for " + name)
|
||||
}
|
||||
|
||||
drivers[name] = d
|
||||
}
|
||||
|
||||
// NewPeerStore attempts to initialize a new PeerStore instance from
|
||||
// the list of registered Drivers.
|
||||
//
|
||||
// If a driver does not exist, returns ErrDriverDoesNotExist.
|
||||
func NewPeerStore(name string, cfg interface{}) (ps PeerStore, err error) {
|
||||
driversM.RLock()
|
||||
defer driversM.RUnlock()
|
||||
|
||||
var d Driver
|
||||
d, ok := drivers[name]
|
||||
if !ok {
|
||||
return nil, ErrDriverDoesNotExist
|
||||
}
|
||||
|
||||
return d.NewPeerStore(cfg)
|
||||
}
|
||||
|
|
|
@ -45,7 +45,7 @@ func generatePeers() (a [1000]bittorrent.Peer) {
|
|||
port := uint16(r.Uint32())
|
||||
a[i] = bittorrent.Peer{
|
||||
ID: bittorrent.PeerID(id),
|
||||
IP: net.IP(ip),
|
||||
IP: bittorrent.IP{IP: net.IP(ip), AddressFamily: bittorrent.IPv4},
|
||||
Port: port,
|
||||
}
|
||||
}
|
||||
|
@ -53,8 +53,10 @@ func generatePeers() (a [1000]bittorrent.Peer) {
|
|||
return
|
||||
}
|
||||
|
||||
type executionFunc func(int, PeerStore, *benchData) error
|
||||
type setupFunc func(PeerStore, *benchData) error
|
||||
type (
|
||||
executionFunc func(int, PeerStore, *benchData) error
|
||||
setupFunc func(PeerStore, *benchData) error
|
||||
)
|
||||
|
||||
func runBenchmark(b *testing.B, ps PeerStore, parallel bool, sf setupFunc, ef executionFunc) {
|
||||
bd := &benchData{generateInfohashes(), generatePeers()}
|
||||
|
@ -95,6 +97,19 @@ func runBenchmark(b *testing.B, ps PeerStore, parallel bool, sf setupFunc, ef ex
|
|||
}
|
||||
}
|
||||
|
||||
// Nop executes a no-op for each iteration.
|
||||
// It should produce the same results for each PeerStore.
|
||||
// This can be used to get an estimate of the impact of the benchmark harness
|
||||
// on benchmark results and an estimate of the general performance of the system
|
||||
// benchmarked on.
|
||||
//
|
||||
// Nop can run in parallel.
|
||||
func Nop(b *testing.B, ps PeerStore) {
|
||||
runBenchmark(b, ps, true, putPeers, func(i int, ps PeerStore, bd *benchData) error {
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// Put benchmarks the PutSeeder method of a PeerStore by repeatedly Putting the
|
||||
// same Peer for the same InfoHash.
|
||||
//
|
||||
|
@ -172,6 +187,7 @@ func PutDelete1kInfohash(b *testing.B, ps PeerStore) {
|
|||
runBenchmark(b, ps, false, nil, func(i int, ps PeerStore, bd *benchData) error {
|
||||
err := ps.PutSeeder(bd.infohashes[i%1000], bd.peers[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return ps.DeleteSeeder(bd.infohashes[i%1000], bd.peers[0])
|
||||
})
|
||||
|
@ -198,7 +214,7 @@ func PutDelete1kInfohash1k(b *testing.B, ps PeerStore) {
|
|||
// DeleteNonexist can run in parallel.
|
||||
func DeleteNonexist(b *testing.B, ps PeerStore) {
|
||||
runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error {
|
||||
ps.DeleteSeeder(bd.infohashes[0], bd.peers[0])
|
||||
_ = ps.DeleteSeeder(bd.infohashes[0], bd.peers[0])
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
@ -209,7 +225,7 @@ func DeleteNonexist(b *testing.B, ps PeerStore) {
|
|||
// DeleteNonexist can run in parallel.
|
||||
func DeleteNonexist1k(b *testing.B, ps PeerStore) {
|
||||
runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error {
|
||||
ps.DeleteSeeder(bd.infohashes[0], bd.peers[i%1000])
|
||||
_ = ps.DeleteSeeder(bd.infohashes[0], bd.peers[i%1000])
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
@ -220,7 +236,7 @@ func DeleteNonexist1k(b *testing.B, ps PeerStore) {
|
|||
// DeleteNonexist1kInfohash can run in parallel.
|
||||
func DeleteNonexist1kInfohash(b *testing.B, ps PeerStore) {
|
||||
runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error {
|
||||
ps.DeleteSeeder(bd.infohashes[i%1000], bd.peers[0])
|
||||
_ = ps.DeleteSeeder(bd.infohashes[i%1000], bd.peers[0])
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
@ -231,7 +247,7 @@ func DeleteNonexist1kInfohash(b *testing.B, ps PeerStore) {
|
|||
// DeleteNonexist1kInfohash1k can run in parallel.
|
||||
func DeleteNonexist1kInfohash1k(b *testing.B, ps PeerStore) {
|
||||
runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error {
|
||||
ps.DeleteSeeder(bd.infohashes[i%1000], bd.peers[(i*3)%1000])
|
||||
_ = ps.DeleteSeeder(bd.infohashes[i%1000], bd.peers[(i*3)%1000])
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
@ -242,7 +258,7 @@ func DeleteNonexist1kInfohash1k(b *testing.B, ps PeerStore) {
|
|||
// GradNonexist can run in parallel.
|
||||
func GradNonexist(b *testing.B, ps PeerStore) {
|
||||
runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error {
|
||||
ps.GraduateLeecher(bd.infohashes[0], bd.peers[0])
|
||||
_ = ps.GraduateLeecher(bd.infohashes[0], bd.peers[0])
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
@ -253,7 +269,7 @@ func GradNonexist(b *testing.B, ps PeerStore) {
|
|||
// GradNonexist1k can run in parallel.
|
||||
func GradNonexist1k(b *testing.B, ps PeerStore) {
|
||||
runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error {
|
||||
ps.GraduateLeecher(bd.infohashes[0], bd.peers[i%1000])
|
||||
_ = ps.GraduateLeecher(bd.infohashes[0], bd.peers[i%1000])
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
@ -264,7 +280,7 @@ func GradNonexist1k(b *testing.B, ps PeerStore) {
|
|||
// GradNonexist1kInfohash can run in parallel.
|
||||
func GradNonexist1kInfohash(b *testing.B, ps PeerStore) {
|
||||
runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error {
|
||||
ps.GraduateLeecher(bd.infohashes[i%1000], bd.peers[0])
|
||||
_ = ps.GraduateLeecher(bd.infohashes[i%1000], bd.peers[0])
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
@ -276,7 +292,7 @@ func GradNonexist1kInfohash(b *testing.B, ps PeerStore) {
|
|||
// GradNonexist1kInfohash1k can run in parallel.
|
||||
func GradNonexist1kInfohash1k(b *testing.B, ps PeerStore) {
|
||||
runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error {
|
||||
ps.GraduateLeecher(bd.infohashes[i%1000], bd.peers[(i*3)%1000])
|
||||
_ = ps.GraduateLeecher(bd.infohashes[i%1000], bd.peers[(i*3)%1000])
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
@ -415,3 +431,24 @@ func AnnounceSeeder1kInfohash(b *testing.B, ps PeerStore) {
|
|||
return err
|
||||
})
|
||||
}
|
||||
|
||||
// ScrapeSwarm benchmarks the ScrapeSwarm method of a PeerStore.
|
||||
// The swarm scraped has 500 seeders and 500 leechers.
|
||||
//
|
||||
// ScrapeSwarm can run in parallel.
|
||||
func ScrapeSwarm(b *testing.B, ps PeerStore) {
|
||||
runBenchmark(b, ps, true, putPeers, func(i int, ps PeerStore, bd *benchData) error {
|
||||
ps.ScrapeSwarm(bd.infohashes[0], bittorrent.IPv4)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// ScrapeSwarm1kInfohash behaves like ScrapeSwarm with one of 1000 infohashes.
|
||||
//
|
||||
// ScrapeSwarm1kInfohash can run in parallel.
|
||||
func ScrapeSwarm1kInfohash(b *testing.B, ps PeerStore) {
|
||||
runBenchmark(b, ps, true, putPeers, func(i int, ps PeerStore, bd *benchData) error {
|
||||
ps.ScrapeSwarm(bd.infohashes[i%1000], bittorrent.IPv4)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
|
163
storage/storage_tests.go
Normal file
163
storage/storage_tests.go
Normal file
|
@ -0,0 +1,163 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"net"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/chihaya/chihaya/bittorrent"
|
||||
)
|
||||
|
||||
// PeerEqualityFunc is the boolean function to use to check two Peers for
|
||||
// equality.
|
||||
// Depending on the implementation of the PeerStore, this can be changed to
|
||||
// use (Peer).EqualEndpoint instead.
|
||||
var PeerEqualityFunc = func(p1, p2 bittorrent.Peer) bool { return p1.Equal(p2) }
|
||||
|
||||
// TestPeerStore tests a PeerStore implementation against the interface.
|
||||
func TestPeerStore(t *testing.T, p PeerStore) {
|
||||
testData := []struct {
|
||||
ih bittorrent.InfoHash
|
||||
peer bittorrent.Peer
|
||||
}{
|
||||
{
|
||||
bittorrent.InfoHashFromString("00000000000000000001"),
|
||||
bittorrent.Peer{ID: bittorrent.PeerIDFromString("00000000000000000001"), Port: 1, IP: bittorrent.IP{IP: net.ParseIP("1.1.1.1").To4(), AddressFamily: bittorrent.IPv4}},
|
||||
},
|
||||
{
|
||||
bittorrent.InfoHashFromString("00000000000000000002"),
|
||||
bittorrent.Peer{ID: bittorrent.PeerIDFromString("00000000000000000002"), Port: 2, IP: bittorrent.IP{IP: net.ParseIP("abab::0001"), AddressFamily: bittorrent.IPv6}},
|
||||
},
|
||||
}
|
||||
|
||||
v4Peer := bittorrent.Peer{ID: bittorrent.PeerIDFromString("99999999999999999994"), IP: bittorrent.IP{IP: net.ParseIP("99.99.99.99").To4(), AddressFamily: bittorrent.IPv4}, Port: 9994}
|
||||
v6Peer := bittorrent.Peer{ID: bittorrent.PeerIDFromString("99999999999999999996"), IP: bittorrent.IP{IP: net.ParseIP("fc00::0001"), AddressFamily: bittorrent.IPv6}, Port: 9996}
|
||||
|
||||
for _, c := range testData {
|
||||
peer := v4Peer
|
||||
if c.peer.IP.AddressFamily == bittorrent.IPv6 {
|
||||
peer = v6Peer
|
||||
}
|
||||
|
||||
// Test ErrDNE for non-existent swarms.
|
||||
err := p.DeleteLeecher(c.ih, c.peer)
|
||||
require.Equal(t, ErrResourceDoesNotExist, err)
|
||||
|
||||
err = p.DeleteSeeder(c.ih, c.peer)
|
||||
require.Equal(t, ErrResourceDoesNotExist, err)
|
||||
|
||||
_, err = p.AnnouncePeers(c.ih, false, 50, peer)
|
||||
require.Equal(t, ErrResourceDoesNotExist, err)
|
||||
|
||||
// Test empty scrape response for non-existent swarms.
|
||||
scrape := p.ScrapeSwarm(c.ih, c.peer.IP.AddressFamily)
|
||||
require.Equal(t, uint32(0), scrape.Complete)
|
||||
require.Equal(t, uint32(0), scrape.Incomplete)
|
||||
require.Equal(t, uint32(0), scrape.Snatches)
|
||||
|
||||
// Insert dummy Peer to keep swarm active
|
||||
// Has the same address family as c.peer
|
||||
err = p.PutLeecher(c.ih, peer)
|
||||
require.Nil(t, err)
|
||||
|
||||
// Test ErrDNE for non-existent seeder.
|
||||
err = p.DeleteSeeder(c.ih, peer)
|
||||
require.Equal(t, ErrResourceDoesNotExist, err)
|
||||
|
||||
// Test PutLeecher -> Announce -> DeleteLeecher -> Announce
|
||||
|
||||
err = p.PutLeecher(c.ih, c.peer)
|
||||
require.Nil(t, err)
|
||||
|
||||
peers, err := p.AnnouncePeers(c.ih, true, 50, peer)
|
||||
require.Nil(t, err)
|
||||
require.True(t, containsPeer(peers, c.peer))
|
||||
|
||||
// non-seeder announce should still return the leecher
|
||||
peers, err = p.AnnouncePeers(c.ih, false, 50, peer)
|
||||
require.Nil(t, err)
|
||||
require.True(t, containsPeer(peers, c.peer))
|
||||
|
||||
scrape = p.ScrapeSwarm(c.ih, c.peer.IP.AddressFamily)
|
||||
require.Equal(t, uint32(2), scrape.Incomplete)
|
||||
require.Equal(t, uint32(0), scrape.Complete)
|
||||
|
||||
err = p.DeleteLeecher(c.ih, c.peer)
|
||||
require.Nil(t, err)
|
||||
|
||||
peers, err = p.AnnouncePeers(c.ih, true, 50, peer)
|
||||
require.Nil(t, err)
|
||||
require.False(t, containsPeer(peers, c.peer))
|
||||
|
||||
// Test PutSeeder -> Announce -> DeleteSeeder -> Announce
|
||||
|
||||
err = p.PutSeeder(c.ih, c.peer)
|
||||
require.Nil(t, err)
|
||||
|
||||
// Should be leecher to see the seeder
|
||||
peers, err = p.AnnouncePeers(c.ih, false, 50, peer)
|
||||
require.Nil(t, err)
|
||||
require.True(t, containsPeer(peers, c.peer))
|
||||
|
||||
scrape = p.ScrapeSwarm(c.ih, c.peer.IP.AddressFamily)
|
||||
require.Equal(t, uint32(1), scrape.Incomplete)
|
||||
require.Equal(t, uint32(1), scrape.Complete)
|
||||
|
||||
err = p.DeleteSeeder(c.ih, c.peer)
|
||||
require.Nil(t, err)
|
||||
|
||||
peers, err = p.AnnouncePeers(c.ih, false, 50, peer)
|
||||
require.Nil(t, err)
|
||||
require.False(t, containsPeer(peers, c.peer))
|
||||
|
||||
// Test PutLeecher -> Graduate -> Announce -> DeleteLeecher -> Announce
|
||||
|
||||
err = p.PutLeecher(c.ih, c.peer)
|
||||
require.Nil(t, err)
|
||||
|
||||
err = p.GraduateLeecher(c.ih, c.peer)
|
||||
require.Nil(t, err)
|
||||
|
||||
// Has to be leecher to see the graduated seeder
|
||||
peers, err = p.AnnouncePeers(c.ih, false, 50, peer)
|
||||
require.Nil(t, err)
|
||||
require.True(t, containsPeer(peers, c.peer))
|
||||
|
||||
// Deleting the Peer as a Leecher should have no effect
|
||||
err = p.DeleteLeecher(c.ih, c.peer)
|
||||
require.Equal(t, ErrResourceDoesNotExist, err)
|
||||
|
||||
// Verify it's still there
|
||||
peers, err = p.AnnouncePeers(c.ih, false, 50, peer)
|
||||
require.Nil(t, err)
|
||||
require.True(t, containsPeer(peers, c.peer))
|
||||
|
||||
// Clean up
|
||||
|
||||
err = p.DeleteLeecher(c.ih, peer)
|
||||
require.Nil(t, err)
|
||||
|
||||
// Test ErrDNE for missing leecher
|
||||
err = p.DeleteLeecher(c.ih, peer)
|
||||
require.Equal(t, ErrResourceDoesNotExist, err)
|
||||
|
||||
err = p.DeleteSeeder(c.ih, c.peer)
|
||||
require.Nil(t, err)
|
||||
|
||||
err = p.DeleteSeeder(c.ih, c.peer)
|
||||
require.Equal(t, ErrResourceDoesNotExist, err)
|
||||
}
|
||||
|
||||
e := p.Stop()
|
||||
require.Nil(t, <-e)
|
||||
}
|
||||
|
||||
func containsPeer(peers []bittorrent.Peer, p bittorrent.Peer) bool {
|
||||
for _, peer := range peers {
|
||||
if PeerEqualityFunc(peer, p) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
Loading…
Add table
Reference in a new issue