Compare commits

...

438 commits

Author SHA1 Message Date
Victor Shyba
226d6eace4
fix ports and conf header 2022-05-25 18:47:51 -03:00
Alex Grin
48b28d0b07
Merge pull request #2 from lbryio/readme_changes
Readme changes
2022-05-25 11:07:59 -04:00
Victor Shyba
494d67fae4 add docker and systemd sections 2022-05-25 11:26:38 -03:00
Victor Shyba
a7b4e6a559 add simple just-udp example 2022-05-25 11:21:35 -03:00
Victor Shyba
1b6b7f1b5b use the contribute link from lbry.tech 2022-05-18 09:10:15 -03:00
Alex Grin
6baeb76ce2
Update LICENSE 2022-05-17 12:15:45 -04:00
Alex Grin
c1fcd3d624
Update README.md 2022-05-17 12:14:55 -04:00
Alex Grin
9330b3fd59
Update README.md 2022-05-17 12:11:59 -04:00
Victor Shyba
c8f0c1eed8 preserve Incomplete/Complete 2022-04-13 21:01:25 -03:00
Victor Shyba
737053bd0e accept port 0 but do not store (anonymous mode) 2022-04-13 21:01:05 -03:00
Victor Shyba
441b06169f add fixedpeer middleware 2022-04-13 17:37:28 -03:00
Jimmy Zelinskie
7455c2ad4a
Merge pull request #555 from jzelinskie/deps
gomod: bump all deps (mostly redis)
2022-01-19 11:49:06 -05:00
Jimmy Zelinskie
bb5460fccc gomod: bump all deps (mostly redis) 2022-01-18 22:00:57 -05:00
Jimmy Zelinskie
828edb8fd8
Merge pull request #549 from jzelinskie/fix-vet
.github: split workflows into build/lint, add new linters
2022-01-18 13:49:14 -05:00
Jimmy Zelinskie
07e4db8baf golangci-lint: include golint docstring exceptions 2022-01-18 13:33:46 -05:00
Jimmy Zelinskie
4b5e39f83c bittorrent: rename qp.Uint64 to qp.Uint 2022-01-17 15:54:56 -05:00
Jimmy Zelinskie
301dd22f15 lint: actually lint all go files 2022-01-15 23:28:52 -05:00
Jimmy Zelinskie
7166c1da17 storage/memory: avoid overflow in shard counts 2022-01-15 14:33:45 -05:00
Jimmy Zelinskie
f3468edf19 frontend/http: avoid overflows parsing queryparams 2022-01-15 14:25:39 -05:00
Jimmy Zelinskie
d1b90c0139 lint: gofumpt files 2022-01-15 14:01:23 -05:00
Jimmy Zelinskie
b81a310eea bytepool: store *[]byte
This avoids allocations for the slice metadata.

For more details, see:
https://staticcheck.io/docs/checks#SA6002
2022-01-15 13:58:06 -05:00
Jimmy Zelinskie
65ce7c7c6b lint: lint files with golangci-lint 2022-01-15 13:31:14 -05:00
Jimmy Zelinskie
e2991d59e4 lint: lint yaml files 2022-01-15 13:13:21 -05:00
Jimmy Zelinskie
c28d7ad788 lint: lint markdown files 2022-01-15 13:03:41 -05:00
Jimmy Zelinskie
6e2c095ce4 .github: separate CI into two workflows build/lint
This adds lots of new linting steps and fixes older ones.
2022-01-15 12:56:15 -05:00
Jimmy Zelinskie
129aac230a cmd: use os.SignalContext for signal management
This fixes a `go vet` failure for using an unbuffered channel with
os.Signal. This also simplifies platform-specific code.

GoFmt added new forward-compatible build tags.
2022-01-02 15:26:05 -05:00
Jimmy Zelinskie
dc34044973
Merge pull request #544 from chihaya/dependabot/go_modules/github.com/spf13/cobra-1.2.1
build(deps): bump github.com/spf13/cobra from 1.1.3 to 1.2.1
2022-01-02 14:54:27 -05:00
Jimmy Zelinskie
d57c348b6c
Merge pull request #545 from jzelinskie/dependabot-labels
.github: specify labels for dependabot PRs
2021-08-23 18:00:12 -04:00
Jimmy Zelinskie
9c44135610 .github: specify labels for dependabot PRs 2021-08-20 01:04:26 -04:00
dependabot[bot]
8bf717fa4e
build(deps): bump github.com/spf13/cobra from 1.1.3 to 1.2.1
Bumps [github.com/spf13/cobra](https://github.com/spf13/cobra) from 1.1.3 to 1.2.1.
- [Release notes](https://github.com/spf13/cobra/releases)
- [Changelog](https://github.com/spf13/cobra/blob/master/CHANGELOG.md)
- [Commits](https://github.com/spf13/cobra/compare/v1.1.3...v1.2.1)

---
updated-dependencies:
- dependency-name: github.com/spf13/cobra
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2021-08-01 08:16:10 +00:00
Jimmy Zelinskie
c1f523e855
Merge pull request #539 from chihaya/dependabot/go_modules/github.com/prometheus/client_golang-1.11.0
build(deps): bump github.com/prometheus/client_golang from 1.10.0 to 1.11.0
2021-07-02 13:16:32 -04:00
dependabot[bot]
7498ef3f4a
build(deps): bump github.com/prometheus/client_golang
Bumps [github.com/prometheus/client_golang](https://github.com/prometheus/client_golang) from 1.10.0 to 1.11.0.
- [Release notes](https://github.com/prometheus/client_golang/releases)
- [Changelog](https://github.com/prometheus/client_golang/blob/master/CHANGELOG.md)
- [Commits](https://github.com/prometheus/client_golang/compare/v1.10.0...v1.11.0)

---
updated-dependencies:
- dependency-name: github.com/prometheus/client_golang
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2021-07-01 08:16:57 +00:00
Jimmy Zelinskie
035e66f155
Merge pull request #537 from jzelinskie/libera-badge
README: refresh badges & move release disclaimer
2021-06-23 14:54:28 -04:00
Jimmy Zelinskie
5c58456d9f README: refresh badges & move release disclaimer
Fixes #536.
2021-06-22 20:43:12 -04:00
Jimmy Zelinskie
057f7afefc
Merge pull request #535 from chihaya/dependabot/go_modules/github.com/anacrolix/torrent-1.28.0
build(deps): bump github.com/anacrolix/torrent from 1.26.1 to 1.28.0
2021-06-01 21:12:27 -04:00
dependabot[bot]
2d747cfac4
build(deps): bump github.com/anacrolix/torrent from 1.26.1 to 1.28.0
Bumps [github.com/anacrolix/torrent](https://github.com/anacrolix/torrent) from 1.26.1 to 1.28.0.
- [Release notes](https://github.com/anacrolix/torrent/releases)
- [Commits](https://github.com/anacrolix/torrent/compare/v1.26.1...v1.28.0)

Signed-off-by: dependabot[bot] <support@github.com>
2021-06-01 06:09:28 +00:00
Jimmy Zelinskie
205694d901
Merge pull request #533 from chihaya/dependabot/go_modules/github.com/anacrolix/torrent-1.26.1
build(deps): bump github.com/anacrolix/torrent from 1.26.0 to 1.26.1
2021-05-12 16:25:02 -04:00
Jimmy Zelinskie
7c888a171e
Merge pull request #532 from chihaya/dependabot/github_actions/Jerome1337/gofmt-action-v1.0.4
build(deps): bump Jerome1337/gofmt-action from v1.0.2 to v1.0.4
2021-05-12 16:24:44 -04:00
Jimmy Zelinskie
b9572c1770
Merge pull request #531 from chihaya/dependabot/github_actions/Jerome1337/goimports-action-v1.0.3
build(deps): bump Jerome1337/goimports-action from v1.0.2 to v1.0.3
2021-05-12 16:24:26 -04:00
dependabot[bot]
5f8229ad12
build(deps): bump github.com/anacrolix/torrent from 1.26.0 to 1.26.1
Bumps [github.com/anacrolix/torrent](https://github.com/anacrolix/torrent) from 1.26.0 to 1.26.1.
- [Release notes](https://github.com/anacrolix/torrent/releases)
- [Commits](https://github.com/anacrolix/torrent/compare/v1.26.0...v1.26.1)

Signed-off-by: dependabot[bot] <support@github.com>
2021-05-11 00:35:54 +00:00
dependabot[bot]
b70ddbef91
build(deps): bump Jerome1337/gofmt-action from v1.0.2 to v1.0.4
Bumps [Jerome1337/gofmt-action](https://github.com/Jerome1337/gofmt-action) from v1.0.2 to v1.0.4.
- [Release notes](https://github.com/Jerome1337/gofmt-action/releases)
- [Commits](https://github.com/Jerome1337/gofmt-action/compare/v1.0.2...4899d680cd7d4a959becfe74f97170c5847f859c)

Signed-off-by: dependabot[bot] <support@github.com>
2021-05-11 00:29:09 +00:00
dependabot[bot]
9fbf669fcd
build(deps): bump Jerome1337/goimports-action from v1.0.2 to v1.0.3
Bumps [Jerome1337/goimports-action](https://github.com/Jerome1337/goimports-action) from v1.0.2 to v1.0.3.
- [Release notes](https://github.com/Jerome1337/goimports-action/releases)
- [Commits](https://github.com/Jerome1337/goimports-action/compare/v1.0.2...cdf3ff946ea5f2c631af978819e8ce567c12dcaa)

Signed-off-by: dependabot[bot] <support@github.com>
2021-05-11 00:29:06 +00:00
Jimmy Zelinskie
d998ce556e
Merge pull request #530 from jzelinskie/fix-indent
.github: fix yaml indentation
2021-05-10 20:28:12 -04:00
Jimmy Zelinskie
6375e7c735 .github: fix yaml indentation 2021-05-10 14:26:49 -04:00
Jimmy Zelinskie
313db5027b
Merge pull request #529 from jzelinskie/explicit-dependabot
.github: add explicit dependabot
2021-05-10 14:25:41 -04:00
Jimmy Zelinskie
4224e1ac6b .github: add explicit dependabot
This also updates the existing dependencies.
2021-05-06 15:26:11 -04:00
dependabot-preview[bot]
e7f43ee924
Merge pull request #518 from chihaya/dependabot/go_modules/github.com/anacrolix/torrent-1.25.0 2021-03-04 17:14:00 +00:00
dependabot-preview[bot]
3b330213ad
build(deps): bump github.com/anacrolix/torrent from 1.22.0 to 1.25.0
Bumps [github.com/anacrolix/torrent](https://github.com/anacrolix/torrent) from 1.22.0 to 1.25.0.
- [Release notes](https://github.com/anacrolix/torrent/releases)
- [Commits](https://github.com/anacrolix/torrent/compare/v1.22.0...v1.25.0)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2021-03-04 07:21:51 +00:00
dependabot-preview[bot]
696a5e51bb
Merge pull request #516 from chihaya/dependabot/go_modules/github.com/sirupsen/logrus-1.8.0 2021-03-04 07:11:53 +00:00
dependabot-preview[bot]
592d487a67
build(deps): bump github.com/sirupsen/logrus from 1.7.0 to 1.8.0
Bumps [github.com/sirupsen/logrus](https://github.com/sirupsen/logrus) from 1.7.0 to 1.8.0.
- [Release notes](https://github.com/sirupsen/logrus/releases)
- [Changelog](https://github.com/sirupsen/logrus/blob/master/CHANGELOG.md)
- [Commits](https://github.com/sirupsen/logrus/compare/v1.7.0...v1.8.0)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2021-03-04 04:46:45 +00:00
dependabot-preview[bot]
dfcda607fc
Merge pull request #517 from chihaya/dependabot/go_modules/github.com/spf13/cobra-1.1.3 2021-03-04 04:37:15 +00:00
dependabot-preview[bot]
8c7a4fd117
build(deps): bump github.com/spf13/cobra from 1.1.1 to 1.1.3
Bumps [github.com/spf13/cobra](https://github.com/spf13/cobra) from 1.1.1 to 1.1.3.
- [Release notes](https://github.com/spf13/cobra/releases)
- [Changelog](https://github.com/spf13/cobra/blob/master/CHANGELOG.md)
- [Commits](https://github.com/spf13/cobra/compare/v1.1.1...v1.1.3)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2021-03-01 11:30:03 +00:00
Jimmy Zelinskie
89c83d2e3c
Merge pull request #515 from jzelinskie/http-profiles
Move profiling into the metrics server
2021-02-28 21:46:10 -05:00
Jimmy Zelinskie
425662fa93 .github: add a job for testing helm 2021-02-28 14:38:39 -05:00
Jimmy Zelinskie
0f2cfb2fdd gomod: bump to go1.16 2021-02-28 14:26:24 -05:00
Jimmy Zelinskie
456f9de190 pkg/metrics: move profiles into the metrics server
This change:
- renames pkg/prometheus into pkg/metrics
- renames the prometheus_addr config to metrics_addr
- adds pprof endpoints to the metrics server
- removes profile/trace cli flags
- adds endpoints for profiling to the metrics server
2021-02-27 12:49:24 -05:00
dependabot-preview[bot]
cf30ad8b6a
Merge pull request #513 from chihaya/dependabot/go_modules/github.com/stretchr/testify-1.7.0 2021-02-03 18:42:46 +00:00
dependabot-preview[bot]
6f65feef4b
Merge pull request #512 from chihaya/dependabot/go_modules/github.com/anacrolix/torrent-1.22.0 2021-02-03 18:42:14 +00:00
dependabot-preview[bot]
7be396d299
build(deps): bump github.com/stretchr/testify from 1.6.1 to 1.7.0
Bumps [github.com/stretchr/testify](https://github.com/stretchr/testify) from 1.6.1 to 1.7.0.
- [Release notes](https://github.com/stretchr/testify/releases)
- [Commits](https://github.com/stretchr/testify/compare/v1.6.1...v1.7.0)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2021-02-01 08:58:03 +00:00
dependabot-preview[bot]
606361d9c1
build(deps): bump github.com/anacrolix/torrent from 1.19.2 to 1.22.0
Bumps [github.com/anacrolix/torrent](https://github.com/anacrolix/torrent) from 1.19.2 to 1.22.0.
- [Release notes](https://github.com/anacrolix/torrent/releases)
- [Commits](https://github.com/anacrolix/torrent/compare/v1.19.2...v1.22.0)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2021-02-01 08:57:16 +00:00
Leo Balduf
c50c6b15b2
Merge pull request #511 from shish/patch-1
readme: fix typo
2021-01-20 14:13:57 +01:00
Shish
535c0fdd07
e
(A tiny tiny typo - but it's on the front page of the project, so I thought it worth fixing)
2021-01-20 12:06:28 +00:00
dependabot-preview[bot]
83f79e5202
Merge pull request #509 from chihaya/dependabot/go_modules/github.com/anacrolix/torrent-1.19.2 2021-01-06 03:00:03 +00:00
dependabot-preview[bot]
25d39698f2
build(deps): bump github.com/anacrolix/torrent from 1.18.1 to 1.19.2
Bumps [github.com/anacrolix/torrent](https://github.com/anacrolix/torrent) from 1.18.1 to 1.19.2.
- [Release notes](https://github.com/anacrolix/torrent/releases)
- [Commits](https://github.com/anacrolix/torrent/compare/v1.18.1...v1.19.2)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2021-01-01 07:32:08 +00:00
dependabot-preview[bot]
56fd2818b2
Merge pull request #506 from chihaya/dependabot/go_modules/gopkg.in/yaml.v2-2.4.0 2020-12-30 23:47:22 +00:00
dependabot-preview[bot]
7ac177257c
build(deps): bump gopkg.in/yaml.v2 from 2.3.0 to 2.4.0
Bumps [gopkg.in/yaml.v2](https://github.com/go-yaml/yaml) from 2.3.0 to 2.4.0.
- [Release notes](https://github.com/go-yaml/yaml/releases)
- [Commits](https://github.com/go-yaml/yaml/compare/v2.3.0...v2.4.0)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2020-12-30 23:33:02 +00:00
dependabot-preview[bot]
49d69140aa
Merge pull request #508 from chihaya/dependabot/go_modules/github.com/prometheus/client_golang-1.9.0 2020-12-30 23:23:59 +00:00
dependabot-preview[bot]
e6339590f0
build(deps): bump github.com/prometheus/client_golang
Bumps [github.com/prometheus/client_golang](https://github.com/prometheus/client_golang) from 1.7.1 to 1.9.0.
- [Release notes](https://github.com/prometheus/client_golang/releases)
- [Changelog](https://github.com/prometheus/client_golang/blob/master/CHANGELOG.md)
- [Commits](https://github.com/prometheus/client_golang/compare/v1.7.1...v1.9.0)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2020-12-30 23:18:19 +00:00
dependabot-preview[bot]
99aeb7cebe
Merge pull request #504 from chihaya/dependabot/go_modules/github.com/spf13/cobra-1.1.1 2020-12-30 23:09:18 +00:00
dependabot-preview[bot]
bc7b3bc738
build(deps): bump github.com/spf13/cobra from 1.0.0 to 1.1.1
Bumps [github.com/spf13/cobra](https://github.com/spf13/cobra) from 1.0.0 to 1.1.1.
- [Release notes](https://github.com/spf13/cobra/releases)
- [Changelog](https://github.com/spf13/cobra/blob/master/CHANGELOG.md)
- [Commits](https://github.com/spf13/cobra/compare/v1.0.0...v1.1.1)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2020-12-30 23:05:19 +00:00
Jimmy Zelinskie
3a76f09ea9
Merge pull request #507 from jzelinskie/fix-actions
github: migrate to setup-go v2 action
2020-12-30 17:56:43 -05:00
Jimmy Zelinskie
fad3541bd9 github: migrate to setup-go v2 action 2020-12-30 17:53:51 -05:00
Jimmy Zelinskie
ff0fe9e28d
Merge pull request #503 from chihaya/dependabot/go_modules/github.com/anacrolix/torrent-1.18.1
build(deps): bump github.com/anacrolix/torrent from 1.16.0 to 1.18.1
2020-12-30 15:46:10 -05:00
dependabot-preview[bot]
06eaf570ca
build(deps): bump github.com/anacrolix/torrent from 1.16.0 to 1.18.1
Bumps [github.com/anacrolix/torrent](https://github.com/anacrolix/torrent) from 1.16.0 to 1.18.1.
- [Release notes](https://github.com/anacrolix/torrent/releases)
- [Commits](https://github.com/anacrolix/torrent/compare/v1.16.0...v1.18.1)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2020-11-01 06:14:39 +00:00
Jimmy Zelinskie
24c72cdacc
Merge pull request #502 from chihaya/dependabot/go_modules/github.com/anacrolix/torrent-1.16.0
build(deps): bump github.com/anacrolix/torrent from 1.15.2 to 1.16.0
2020-10-05 13:04:13 -04:00
dependabot-preview[bot]
bd24c5b3fc
build(deps): bump github.com/anacrolix/torrent from 1.15.2 to 1.16.0
Bumps [github.com/anacrolix/torrent](https://github.com/anacrolix/torrent) from 1.15.2 to 1.16.0.
- [Release notes](https://github.com/anacrolix/torrent/releases)
- [Commits](https://github.com/anacrolix/torrent/compare/v1.15.2...v1.16.0)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2020-10-01 16:33:02 +00:00
Jimmy Zelinskie
4dbba4862c
Merge pull request #501 from chihaya/dependabot/go_modules/github.com/sirupsen/logrus-1.7.0
build(deps): bump github.com/sirupsen/logrus from 1.6.0 to 1.7.0
2020-10-01 12:25:28 -04:00
dependabot-preview[bot]
b6aa407213
build(deps): bump github.com/sirupsen/logrus from 1.6.0 to 1.7.0
Bumps [github.com/sirupsen/logrus](https://github.com/sirupsen/logrus) from 1.6.0 to 1.7.0.
- [Release notes](https://github.com/sirupsen/logrus/releases)
- [Changelog](https://github.com/sirupsen/logrus/blob/master/CHANGELOG.md)
- [Commits](https://github.com/sirupsen/logrus/compare/v1.6.0...v1.7.0)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2020-10-01 07:38:32 +00:00
Jimmy Zelinskie
326832e479
Merge pull request #497 from chihaya/dependabot/go_modules/github.com/prometheus/client_golang-1.7.1
build(deps): bump github.com/prometheus/client_golang from 1.6.0 to 1.7.1
2020-07-01 12:58:00 -04:00
Jimmy Zelinskie
f1713d6524
Merge pull request #496 from chihaya/dependabot/go_modules/github.com/stretchr/testify-1.6.1
build(deps): bump github.com/stretchr/testify from 1.6.0 to 1.6.1
2020-07-01 12:57:35 -04:00
dependabot-preview[bot]
a9f094749d
build(deps): bump github.com/prometheus/client_golang
Bumps [github.com/prometheus/client_golang](https://github.com/prometheus/client_golang) from 1.6.0 to 1.7.1.
- [Release notes](https://github.com/prometheus/client_golang/releases)
- [Changelog](https://github.com/prometheus/client_golang/blob/master/CHANGELOG.md)
- [Commits](https://github.com/prometheus/client_golang/compare/v1.6.0...v1.7.1)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2020-07-01 09:36:00 +00:00
dependabot-preview[bot]
f7e8116f33
build(deps): bump github.com/stretchr/testify from 1.6.0 to 1.6.1
Bumps [github.com/stretchr/testify](https://github.com/stretchr/testify) from 1.6.0 to 1.6.1.
- [Release notes](https://github.com/stretchr/testify/releases)
- [Commits](https://github.com/stretchr/testify/compare/v1.6.0...v1.6.1)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2020-07-01 09:35:19 +00:00
Jimmy Zelinskie
aa5b97dc5a
Merge pull request #494 from chihaya/dependabot/go_modules/gopkg.in/yaml.v2-2.3.0
build(deps): bump gopkg.in/yaml.v2 from 2.2.8 to 2.3.0
2020-06-01 14:42:48 -04:00
dependabot-preview[bot]
689ee75178
build(deps): bump gopkg.in/yaml.v2 from 2.2.8 to 2.3.0
Bumps [gopkg.in/yaml.v2](https://github.com/go-yaml/yaml) from 2.2.8 to 2.3.0.
- [Release notes](https://github.com/go-yaml/yaml/releases)
- [Commits](https://github.com/go-yaml/yaml/compare/v2.2.8...v2.3.0)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2020-06-01 18:39:04 +00:00
Jimmy Zelinskie
3d803be039
Merge pull request #495 from chihaya/dependabot/go_modules/github.com/stretchr/testify-1.6.0
build(deps): bump github.com/stretchr/testify from 1.5.1 to 1.6.0
2020-06-01 14:35:20 -04:00
dependabot-preview[bot]
e9dac2a874
build(deps): bump github.com/stretchr/testify from 1.5.1 to 1.6.0
Bumps [github.com/stretchr/testify](https://github.com/stretchr/testify) from 1.5.1 to 1.6.0.
- [Release notes](https://github.com/stretchr/testify/releases)
- [Commits](https://github.com/stretchr/testify/compare/v1.5.1...v1.6.0)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2020-06-01 10:07:17 +00:00
Jimmy Zelinskie
3db6859db6
Merge pull request #492 from jzelinskie/log-err-type
pkg/log: add type field for errors
2020-05-08 14:04:34 -04:00
Jimmy Zelinskie
e266d218db pkg/log: add type field for errors 2020-05-07 13:15:43 -04:00
Jimmy Zelinskie
3c23a854c8
Merge pull request #490 from chihaya/dependabot/go_modules/github.com/sirupsen/logrus-1.6.0
build(deps): bump github.com/sirupsen/logrus from 1.5.0 to 1.6.0
2020-05-04 11:14:19 -04:00
dependabot-preview[bot]
932d0e50c1
build(deps): bump github.com/sirupsen/logrus from 1.5.0 to 1.6.0
Bumps [github.com/sirupsen/logrus](https://github.com/sirupsen/logrus) from 1.5.0 to 1.6.0.
- [Release notes](https://github.com/sirupsen/logrus/releases)
- [Changelog](https://github.com/sirupsen/logrus/blob/master/CHANGELOG.md)
- [Commits](https://github.com/sirupsen/logrus/compare/v1.5.0...v1.6.0)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2020-05-04 08:44:57 +00:00
Jimmy Zelinskie
0704b62b31
Merge pull request #489 from chihaya/dep-refresh
go: bump dependencies, require Go 1.14
2020-05-01 12:12:46 -04:00
Jimmy Zelinskie
5b771c47a1 go: bump dependencies, require Go 1.14 2020-04-30 19:39:17 -04:00
Jimmy Zelinskie
bdc4f7b4d6
Merge pull request #487 from chihaya/dependabot/go_modules/github.com/spf13/cobra-0.0.7
build(deps): bump github.com/spf13/cobra from 0.0.3 to 0.0.7
2020-04-30 19:22:10 -04:00
Jimmy Zelinskie
54f761efe7
Merge pull request #488 from chihaya/dependabot/go_modules/github.com/alicebob/miniredis-2.5.0incompatible
build(deps): bump github.com/alicebob/miniredis from 2.4.6+incompatible to 2.5.0+incompatible
2020-04-30 19:21:45 -04:00
Jimmy Zelinskie
03ac7353e0
Merge pull request #483 from jzelinskie/dependabot-ci
.github/workflows: only run dependabot CI on PR
2020-04-30 19:20:57 -04:00
dependabot-preview[bot]
a13acda170
build(deps): bump github.com/alicebob/miniredis
Bumps [github.com/alicebob/miniredis](https://github.com/alicebob/miniredis) from 2.4.6+incompatible to 2.5.0+incompatible.
- [Release notes](https://github.com/alicebob/miniredis/releases)
- [Changelog](https://github.com/alicebob/miniredis/blob/master/CHANGELOG.md)
- [Commits](https://github.com/alicebob/miniredis/compare/v2.4.6...v2.5.0)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2020-04-30 08:24:52 +00:00
dependabot-preview[bot]
7e16002dc0
build(deps): bump github.com/spf13/cobra from 0.0.3 to 0.0.7
Bumps [github.com/spf13/cobra](https://github.com/spf13/cobra) from 0.0.3 to 0.0.7.
- [Release notes](https://github.com/spf13/cobra/releases)
- [Changelog](https://github.com/spf13/cobra/blob/master/CHANGELOG.md)
- [Commits](https://github.com/spf13/cobra/compare/v0.0.3...0.0.7)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2020-04-30 08:24:35 +00:00
Jimmy Zelinskie
b072bb4166 .github/workflows: only run dependabot CI on PR 2020-04-29 13:13:40 -04:00
Jimmy Zelinskie
61e9d47a77
Merge pull request #477 from moistari/fix-dockerfile
Fixes Dockerfile
2020-04-07 11:34:08 -04:00
mohammad istari
64d471d13a Fixes Dockerfile
With Go Modules, the current Dockerfile ignores the code that was COPY'd
from the Docker context (and thus always building/installing what was on
GitHub).
2020-04-06 11:17:41 +07:00
Jimmy Zelinskie
b1852c7c8e
Merge pull request #469 from jzelinskie/fix-badge
readme: fix broken build status badge
2020-03-11 10:17:33 -04:00
Jimmy Zelinskie
b61fe233df
Merge pull request #470 from chihaya/sponsor
github: init funding
2020-03-11 10:17:19 -04:00
Jimmy Zelinskie
0a725f7d44 github: init funding
This adds a sponsor GitHub link
2020-03-10 21:26:44 -04:00
Jimmy Zelinskie
5e0ee0bd00 workflows: execute on pull request 2020-03-10 21:25:46 -04:00
Jimmy Zelinskie
130e186006 readme: fix broken build status badge
The link and image source were reversed by accident.
2020-02-20 13:13:38 -05:00
Jimmy Zelinskie
2a3bb5bea0
Merge pull request #468 from jzelinskie/workflows
Replace TravisCI with GitHub Workflows
2020-02-20 13:09:42 -05:00
Jimmy Zelinskie
7ba4b68138 dist: remove all traces of travisci 2020-02-18 11:21:43 -05:00
Jimmy Zelinskie
0e17b1352b .github: init workflow 2020-02-18 11:21:42 -05:00
mrd0ll4r
e6e72698b9
Merge pull request #466 from elotreum/routeparam-enhancement
add helper method to retrieve RouteParam by name
2020-01-22 22:28:52 +01:00
elotreum
d70d300422 add godoc explaining catch-all parameter matching for ByName 2020-01-20 11:31:45 -07:00
elotreum
e0b50f3ffa add helper method to retrieve RouteParam by name 2020-01-19 20:35:21 -07:00
mrd0ll4r
85d646d1ad
Merge pull request #465 from elotreum/custom-routes
http: allow for customized routes
2020-01-18 11:40:37 +01:00
elotreum
452eb1acef update routes in travis config files 2020-01-16 20:12:21 -07:00
elotreum
9e7323fa44 correct godoc comments to include period 2020-01-16 19:43:50 -07:00
elotreum
77a52f9f30 http: allow for customized routes
Update to allow arrays of routes to be passed to the http frontend.
This also supports named parameters as permitted by the
router.

To avoid external dependencies in the middleware, a RouteParam and
RouteParams type was added to the bittorrent package.

Note: this eliminates the need for "enable_legacy_php_urls", as
the the additional route could be added to the route array. However,
this may be considered a breaking change.
2020-01-14 16:35:28 -07:00
Jimmy Zelinskie
89cdaa8c6d
Merge pull request #464 from elotreum/header-fixes
http: explicitly set Content-Type header
2020-01-14 15:52:46 -05:00
elotreum
5082146ae9 http: explicitly set Content-Type header
Since Content-Type is not explicitly set, golang sniffs the responses
and attempts a guess. With announce responses, this usually means it
guesses application/octet-stream.

According to the godoc at
https://golang.org/pkg/net/http/#ResponseWriter

// If WriteHeader has not yet been called, Write calls
// WriteHeader(http.StatusOK) before writing the data. If the Header
// does not contain a Content-Type line, Write adds a Content-Type set
// to the result of passing the initial 512 bytes of written data to
// DetectContentType. Additionally, if the total size of all written
// data is under a few KB and there are no Flush calls, the
// Content-Length header is added automatically.
2020-01-14 13:31:25 -07:00
mrd0ll4r
053ce531d9
Merge pull request #461 from mrd0ll4r/travis-e2e
travis: add e2e tests with redis storage
2019-10-24 10:14:48 +09:00
Leo Balduf
c8c0de539c travis: get stuff to work 2019-10-18 10:42:57 +09:00
Leo Balduf
9acf809ffb docker: update Dockerfile for Go 1.13+ 2019-10-17 15:00:16 +09:00
Leo Balduf
a9a2d37f11 docs: update redis storage docs 2019-10-17 14:59:59 +09:00
Leo Balduf
728ec0c623 readme: update readme, move example config 2019-10-17 14:59:44 +09:00
Leo Balduf
ae431e1361 travis: refactor into scripts&configs, move to dist/travis 2019-10-17 14:58:24 +09:00
Leo Balduf
ddeb44b527 travis: add e2e tests with redis storage 2019-10-17 13:02:24 +09:00
mrd0ll4r
0a420fe053
Merge pull request #458 from mrd0ll4r/update-travis
travis: update to go 1.13
2019-10-17 13:01:49 +09:00
Leo Balduf
797d0cb6e2 travis: try to go get the latest dependencies 2019-10-17 12:51:53 +09:00
Leo Balduf
0936bd3f9a cmd/chihaya: fix imports for updated goimports 2019-10-17 12:51:53 +09:00
Leo Balduf
f4d34b54e5 travis: update to go 1.13 2019-10-17 12:51:53 +09:00
mrd0ll4r
4d58b4bce6
Merge pull request #459 from mrd0ll4r/frontend-defaults
Frontend defaults
2019-10-17 12:45:42 +09:00
Leo Balduf
87c72bc516 config: update example config 2019-10-17 12:38:08 +09:00
Leo Balduf
cb88a11d6a frontend: add defaults for parser options 2019-10-12 12:52:16 +09:00
mrd0ll4r
eed141dbe4
Merge pull request #454 from onestraw/rmdoc
docs: remove memorybysubnet
Fixes #452
2019-02-19 10:28:43 +03:00
onestraw
acdce7fea9 docs: remove memorybysubnet #452
Change-Id: I6907910e5752e0e0bd47c80f331e76210bbc744e
2019-02-19 14:57:15 +08:00
Jimmy Zelinskie
3889888f8a
Merge pull request #451 from jzelinskie/remove-endorsements
readme: remove endorsements
2019-02-14 17:44:37 -05:00
Jimmy Zelinskie
7b64e92ee9 readme: remove endorsements
We never got format approval for anything here, so I think we'd all feel
more comfortable not including it.
2019-02-14 15:04:33 -05:00
Jimmy Zelinskie
a48ab487e2
Merge pull request #450 from cooperlees/master
Remove Facebook from README.md
2019-02-14 15:03:34 -05:00
Cooper Ry Lees
d7cfcacbff Remove Facebook from README.md
- We no longer use the code that is on HEAD
- We should not incorrectly state that we do and mislead the general public
2019-02-14 09:25:13 -08:00
Jimmy Zelinskie
68b8edfdd5
Merge pull request #449 from jzelinskie/rm-memorybysubnet
storage: remove memorybysubnet
2019-02-14 11:10:11 -05:00
Jimmy Zelinskie
3e334b9536 storage: remove memorybysubnet
This code, while interesting, was only relevant to Facebook and they use
their own fork that they maintain. There was not enough outside
interest to warrant maintaining. I'd rather us use the effort to support
a redis storage backend instead.
2019-02-13 19:44:04 -05:00
Jimmy Zelinskie
94696c062e
Merge pull request #406 from chihaya/readme-refresh
readme refresh & add doc for architecture
2019-02-13 17:09:28 -05:00
Jimmy Zelinskie
a6df644597
Merge pull request #448 from jzelinskie/go-modules
vendor: move to Go modules for dependencies
2019-02-13 17:08:55 -05:00
Jimmy Zelinskie
6e362c184c docs: refactor architecture into its own doc 2019-02-13 10:53:26 -05:00
Jimmy Zelinskie
dc753b937c vendor: move to Go modules for dependencies
Fixes #444.
2019-02-12 20:36:25 -05:00
Jimmy Zelinskie
7df0145118 readme: refresh info, add graphviz architecture 2019-02-12 19:29:55 -05:00
Jimmy Zelinskie
ad1eee4eb7
Merge pull request #433 from onestraw/redis
Redis Storage
2019-02-12 17:20:19 -05:00
onestraw
36e0204a8f Clarification of Redis storage HA
Change-Id: I6674421d3afdbfab6acd97851cb062341b88a90e
2019-02-01 11:14:51 +08:00
onestraw
e83f68b952 storage/redis: add exiting info
Change-Id: I7876bf420a35b48314d14925f5f2ae591fa2d243
2019-01-28 15:18:21 +08:00
onestraw
7943288678 Add storage redis configuration example
Change-Id: I908c69d50fab0963f508f3688ec5934a25aa0550
2019-01-22 19:36:27 +08:00
onestraw
9d22b67f74 storage/redis: use redis reply
Change-Id: If6e6c2545b12c249413d3d13ea41e127b8d1d9b0
2019-01-21 19:44:33 +08:00
onestraw
fa19ffd050 add @mrd0ll4r 's comments
Change-Id: I53616703394f889fa2d0a4e952ac857d99c85218
2019-01-20 17:02:05 +08:00
onestraw
f2ab706f10 fix hlen/hdel race condition
Change-Id: Ib82e6e9f0c66d2df80d68fd00e0c6ec6b46a037b
2019-01-16 19:09:39 +08:00
mrd0ll4r
5f99a7e778
Merge pull request #442 from cenkalti/split-http
split listen&serve for http
2019-01-13 20:07:11 +03:00
onestraw
9a5fac67ed dep ensure udpate
Change-Id: Icb2627d7e6e8fb916b481ed9d9a47daa40330698
2019-01-04 13:58:20 +08:00
onestraw
d65ab677e7 storage/redis: refactor redis storage
- Based on @duyanghao's PR
  - Make staticcheck pass
  - Address review commentsq
2019-01-04 13:58:20 +08:00
duyanghao
e78892d5ac Add Support for Redis Storage Backend(To Achieve Chihaya High Availability)
Change-Id: I5cf703095d1060ac17e403b86056d3eccad97f2c
Signed-off-by: duyanghao <1294057873@qq.com>
2019-01-03 17:21:13 +08:00
Cenk Alti
d0fc3a4634
split listen&serve for http 2018-12-30 13:36:32 +03:00
Jimmy Zelinskie
b4b257c151
Merge pull request #441 from cenkalti/race
enable -race flag in tests
2018-12-29 22:29:36 -05:00
Cenk Alti
df4eeb840b
enable -race flag in tests 2018-12-30 00:50:54 +03:00
Jimmy Zelinskie
91715229f1
Merge pull request #438 from cenkalti/fix-437
Fix 437
2018-12-29 15:03:05 -05:00
Cenk Alti
0de1d25448
fix listenAndServe comment 2018-12-27 15:17:43 +03:00
Cenk Alti
b345eb3899
split listenAndServe into 2 functions 2018-12-26 18:15:05 +03:00
Cenk Alti
2a26215f2a
Revert "protect socket variable with mutex; fix #437"
This reverts commit 1b7ce4c378.
2018-12-26 18:10:48 +03:00
Cenk Alti
1b7ce4c378
protect socket variable with mutex; fix #437 2018-12-25 11:23:47 +03:00
Cenk Alti
fcbc168ae6
add test case for demonstrating panic 2018-12-25 11:23:08 +03:00
mrd0ll4r
8f0fc7ba10
Merge pull request #435 from onestraw/goimports
fix goimports error
2018-12-17 10:31:24 +01:00
onestraw
e09d11e4b9 fix goimports error
Change-Id: I0f6e383ec1081cc47c690fdaab3fd35590a2634b
2018-12-17 16:59:44 +08:00
Jimmy Zelinskie
dcd8e8ea86
Merge pull request #429 from yashpal1995/canonicalize_real-ip-header
frontend/http: canonicalize http real_ip_header
2018-10-24 14:28:21 -04:00
Justin Li
bacc7646d0
Merge pull request #431 from chihaya/fix-lint-import
Fix path to golint for travis
2018-10-24 13:06:11 -04:00
Justin Li
3c80ed8a8e Fix goimports violations 2018-10-24 09:25:03 -04:00
Justin Li
f19f08aa2e Fix path to golint for travis 2018-10-23 13:48:34 -04:00
Yashpal Choudhary
e749c9c6c9 frontend/http: canonicalize http real_ip_header
Fixes #428
2018-10-20 03:07:22 +05:30
Jimmy Zelinskie
3f9ac79570
Merge pull request #425 from jzelinskie/docker-ca
dockerfile: add CA certificates
2018-09-24 18:58:44 -04:00
Jimmy Zelinskie
bb56c2932b dockerfile: add CA certificates 2018-09-24 14:04:12 -04:00
mrd0ll4r
7c5f8bf9c5
Merge pull request #424 from daftaupe/torrentapproval_tests
middleware/torrentapproval : make use of ErrTorrentUnapproved
2018-09-21 09:43:37 +02:00
Pierre-Alain TORET
757ebf1241 middleware/torrentapproval : make use of ErrTorrentUnapproved 2018-09-21 08:49:03 +02:00
mrd0ll4r
c9d51e8e68
Merge pull request #423 from daftaupe/clientapproval_tests
middleware/clientapproval : add tests
2018-09-20 15:09:54 +02:00
Pierre-Alain TORET
6450a2fa00 middleware/clientapproval : add tests 2018-09-20 14:10:36 +02:00
Pierre-Alain TORET
12c9f95eb1 middleware/clientapproval : add error when using blacklist and whitelist at the same time 2018-09-20 11:47:14 +02:00
mrd0ll4r
d5bddeac96
Merge pull request #415 from daftaupe/#375
middleware : add torrent approval package
2018-09-19 15:01:16 +02:00
Pierre-Alain TORET
82c9f08f4f middleware : add torrent approval package
this makes it possible to specify a torrent whitelist/blacklist in the
configuration file based on their hexadecimal hash.

Fixes #375

Signed-off-by: Pierre-Alain TORET <pierre-alain.toret@protonmail.com>
2018-09-19 14:11:13 +02:00
mrd0ll4r
564a54a178
Merge pull request #420 from mrd0ll4r/connid-pool
frontend/udp: pool connection ID generation state
2018-09-18 18:07:32 +02:00
Leo Balduf
495f2c2734 frontend/udp: pool connection ID generation state 2018-09-18 11:25:54 +02:00
mrd0ll4r
b505cecde1
Merge pull request #419 from mrd0ll4r/connid-debug
frontend/udp: debug log connection ID generation/validation
2018-09-17 01:11:56 +02:00
Leo Balduf
96d0c3d829 frontend/udp: debug log connection ID generation/validation 2018-09-16 22:36:47 +02:00
Justin Li
862b452cef
Merge pull request #414 from chihaya/http-and-https
Add https_addr config, required to run HTTPS
2018-09-12 00:49:08 -04:00
Justin Li
1cb16ddb0c Add https_addr config, required to run HTTPS 2018-09-11 17:17:09 -04:00
Justin Li
1a4e4c833b
Merge pull request #413 from chihaya/recursive-stop-groups
Return []error from Stop() channel, allow recursive stop groups
2018-09-11 17:14:41 -04:00
Justin Li
2df7eac90f
Merge pull request #412 from chihaya/config-keepalive
Add config to enable keepalive/persistent connections
2018-09-09 11:38:18 -04:00
Justin Li
d95120c817 Return []error from Stop() channel, allow recursive stop groups 2018-09-09 11:30:15 -04:00
Justin Li
20edf7a136 Add config to enable keepalive/persistent connections 2018-09-09 09:01:53 -04:00
Jimmy Zelinskie
21f500c93e
Merge pull request #405 from jzelinskie/one-binary
cmd: add e2e command
2018-09-04 11:56:18 -04:00
Jimmy Zelinskie
3aa7d1a91d cmd: add e2e command
This change unifies chihaya and chihaya-e2e binaries.
It also vendors the code missing from the project that was used in
chihaya-e2e.

Fixes #402.
2018-09-04 11:38:22 -04:00
mrd0ll4r
3bcb79129c
Merge pull request #408 from mrd0ll4r/fix-tls
http: fix TLS
2018-09-04 17:07:32 +02:00
Leo Balduf
8095657735 http: fix TLS 2018-09-04 12:45:06 +02:00
Jimmy Zelinskie
085234044a
Merge pull request #403 from jzelinskie/trace
cmd/chihaya: add tracing flag
2018-07-04 12:09:41 -04:00
Jimmy Zelinskie
17f22e77a3 cmd/chihaya: add tracing flag 2018-07-03 23:14:33 -04:00
Jimmy Zelinskie
ff15955dcc
Merge pull request #401 from jzelinskie/style-nitpicks
Style nitpicks
2018-06-15 13:48:06 -04:00
Jimmy Zelinskie
0738d93644
Merge pull request #399 from jzelinskie/prom
dist/prometheus: remove rules file
2018-06-15 13:34:35 -04:00
Jimmy Zelinskie
aab8fa24c1
Merge pull request #400 from jzelinskie/helm-config-update
dist/helm: resync config with values.yaml
2018-06-15 13:34:23 -04:00
Jimmy Zelinskie
84ee1d6658 storage/memorybysubnet: fix mispellings 2018-06-15 13:33:26 -04:00
Jimmy Zelinskie
734c11c6ed bittorrent: anonymous fields for IPs in test table 2018-06-15 13:33:26 -04:00
Jimmy Zelinskie
3c052ec98d storage/memory: multi-line call to New() 2018-06-15 13:33:26 -04:00
Jimmy Zelinskie
f0780ad9cc frontend: isolate prometheus logic to one file 2018-06-15 13:33:26 -04:00
Jimmy Zelinskie
be57cd15b7 dist/helm: resync config with values.yaml 2018-06-14 18:40:49 -04:00
Jimmy Zelinskie
b737c8d0aa dist/prometheus: remove rules file
Not all users may want this file and is a burden to maintain.
2018-06-14 18:39:03 -04:00
Jimmy Zelinskie
be555c3b51
Delete index.html
We no longer need this for redirecting the domain.
Google domains supports redirects!
2018-05-11 13:13:52 -07:00
Jimmy Zelinskie
0d492c4349 Delete CNAME 2018-05-11 13:10:25 -07:00
Jimmy Zelinskie
2f603e43fc Create CNAME 2018-05-11 13:10:05 -07:00
Jimmy Zelinskie
ca4147a808 Delete CNAME 2018-05-11 13:09:56 -07:00
mrd0ll4r
9e251b23b6
Merge pull request #394 from mrd0ll4r/bittorrent-strings
bittorrent: implement nicer String methods for various types
2018-04-11 09:09:54 +02:00
mrd0ll4r
1f7ea58197
Merge pull request #395 from mrd0ll4r/opentracker-v6
Opentracker v6
2018-03-23 09:33:43 +01:00
Leo Balduf
0954c17692 chihaya-e2e: force IPv4 announces for now 2018-02-17 13:42:36 +01:00
Leo Balduf
ee7b4f944a doc: update frontend doc to indicate BEP15 IPv6 support 2018-02-17 13:41:36 +01:00
Leo Balduf
fa19d1125c udp: support both BEP15 and old opentracker v6 announces 2018-02-17 13:41:36 +01:00
Leo Balduf
6c5e8ad20c bittorrent: implement nicer String methods for various types 2018-02-17 13:11:27 +01:00
mrd0ll4r
0edd6382d5
Merge pull request #393 from mrd0ll4r/validate-port
Validate port
2018-02-14 11:27:19 +01:00
mrd0ll4r
b1c05d362a
Merge pull request #392 from mrd0ll4r/default-port
*: move default tracker port to 6969
2018-02-14 11:26:48 +01:00
Leo Balduf
b19f7115df bittorrent: validate port != 0 for announces 2018-02-13 10:13:57 +01:00
Leo Balduf
0c077f0a8c *: move default tracker port to 6969 2018-02-13 10:07:01 +01:00
mrd0ll4r
a8bc51ba1d
Merge pull request #382 from mrd0ll4r/ekop
travis: add chihaya e2e testing
2018-02-04 23:35:34 +01:00
Leo Balduf
7022b541bc dep: add dependencies for e2e testing 2018-02-04 12:38:12 +01:00
Jimmy Zelinskie
40f6456138
Merge pull request #381 from jzelinskie/subtests
*: add subtests for all table driven tests
2018-02-03 13:19:28 -05:00
Jimmy Zelinskie
811fe001ac bittorrent: pretty print nil for subtest naming 2018-02-02 17:13:13 -05:00
Leo Balduf
65704f47e1 travis: add chihaya-e2e 2018-01-21 18:43:28 +01:00
Leo Balduf
1a39a495d7 cmd/chihaya-e2e: add chihaya-e2e 2018-01-21 18:35:02 +01:00
Jimmy Zelinskie
a5b15d69ad
Merge pull request #376 from jzelinskie/register-mw
Add registrable middleware
2018-01-01 18:34:20 -05:00
Jimmy Zelinskie
6bef53658b
Merge pull request #380 from jzelinskie/rmglide
*: remove glide from README & Dockerfile
2017-12-31 15:47:42 -05:00
Jimmy Zelinskie
2004489016 *: add subtests for all table driven tests
Because we use testify, this is less useful than normal, but this is
still best practice for table-driven tests.
2017-12-29 17:44:45 -05:00
Jimmy Zelinskie
120c4615c1 travis: remove 'grep -v vendor' from go list
In recent versions of Go, you no longer need to manually remove the
vendor directory from commands like `go list`.
2017-12-29 17:14:55 -05:00
Jimmy Zelinskie
e9d1e71276 *: remove glide from README & Dockerfile 2017-12-29 17:12:17 -05:00
Jimmy Zelinskie
7dbbc86380 middleware: add a registration model 2017-12-29 16:55:48 -05:00
Jimmy Zelinskie
2bead6b7b4 cmd/chihaya: pull out pre and post funcs 2017-12-29 16:55:48 -05:00
Jimmy Zelinskie
5840cd3de1
Merge pull request #377 from jzelinskie/dep
vendor: migrate from glide to dep
2017-12-29 16:42:41 -05:00
Jimmy Zelinskie
d38a7017d1 travis: build with HEAD and vendored deps
Builds failing on HEAD do not fail CI/CD. Instead, this should be used
as a canary for breaking changes upstream.
2017-12-29 16:40:29 -05:00
Jimmy Zelinskie
f69159362a vendor: migrate from glide to dep 2017-12-29 16:40:29 -05:00
Davor Kapsa
24be4ece73 travis: update go version (#378)
* travis: update go version

* travis: remove go key

* travis: add 1.9.x go version
2017-12-26 14:23:24 -05:00
mrd0ll4r
8f472ad52c
Merge pull request #372 from mrd0ll4r/build-win
cmd/chihaya: make things work on windows
2017-12-24 13:28:29 +01:00
Jimmy Zelinskie
ff269b0f44
Merge pull request #374 from ilyaglow/docker-multistage
Improve Dockerfile
2017-12-20 10:05:31 -05:00
Ilya Glotov
b100583d7d
Set runtime image base on alpine 2017-12-20 14:51:11 +03:00
mrd0ll4r
22c42f9ec3
Merge pull request #305 from mrd0ll4r/doc-frontend
docs: add frontend documentation
2017-12-20 11:56:23 +01:00
Ilya Glotov
39e3b5ae5c
Remove GOOS environment variable 2017-12-12 15:34:58 +03:00
Ilya Glotov
ae7a13db21
Add docker improvements
* Add multistaged build effectively reducing image size
* Change deprecated MAINTAINER to LABEL
* Change ADD to COPY
* Start container as a non-root user
2017-12-12 15:03:53 +03:00
mrd0ll4r
d28c6717b1
Merge pull request #371 from mrd0ll4r/min-interval
middleware, config: re-add support for min interval
2017-12-07 19:44:19 +01:00
mrd0ll4r
15bd5c41f3
Merge pull request #368 from mrd0ll4r/udp-connect-af
udp: set address family for connect metrics
2017-12-07 19:10:30 +01:00
Leo Balduf
fa6e360da4 udp: set address family for connect metrics 2017-12-06 23:00:36 +01:00
mrd0ll4r
df34304ab4
Merge pull request #370 from mrd0ll4r/hook-stopper
middleware: document stop.Stopper behaviour for Hooks
2017-12-06 21:13:40 +01:00
mrd0ll4r
dde5cd1586
Merge pull request #369 from mrd0ll4r/storage-doc
storage: document PeerStore in more  detail
2017-12-06 21:12:59 +01:00
mrd0ll4r
34a6425fd5
Merge pull request #373 from mrd0ll4r/php-announces
http: add option for legacy PHP URLs
2017-12-06 15:16:33 +01:00
Leo Balduf
6e3470aa7e docs: add frontend documentation 2017-12-05 11:19:33 +01:00
Leo Balduf
2f58e98832 http: add option for legacy PHP URLs 2017-12-05 10:41:43 +01:00
Leo Balduf
35d146f675 cmd/chihaya: make things work on windows 2017-12-05 10:14:12 +01:00
Leo Balduf
ef166a6159 middleware, config: re-add support for min interval 2017-12-02 22:56:35 +01:00
Leo Balduf
756a0f6316 middleware: document stop.Stopper behaviour for Hooks 2017-12-02 22:46:09 +01:00
Leo Balduf
6198491194 storage: document PeerStore in more detail 2017-12-02 22:41:56 +01:00
Jimmy Zelinskie
395e59aef3
Merge pull request #366 from chihaya/tokei-badge
readme: add LoC badge
2017-11-28 19:32:42 -05:00
Jimmy Zelinskie
e505250b06
readme: add LoC badge 2017-11-28 16:59:57 -05:00
Jimmy Zelinskie
80558648d7 Merge pull request #363 from jzelinskie/req-san
Request Sanitizer via library
2017-10-18 12:45:25 -04:00
Jimmy Zelinskie
df0de94337 frontend/http: bandwidth are in bytes not pieces 2017-10-18 11:51:19 -04:00
Jimmy Zelinskie
1a0b5c56a6 frontend/http: disambiguate NumWantProvided 2017-10-17 22:06:03 -04:00
Jimmy Zelinskie
ca823e0e5f frontend: update to use non-object sanization 2017-10-17 22:02:45 -04:00
Jimmy Zelinskie
66e12c6684 bittorrent: add String() and LogFields() 2017-10-17 22:02:06 -04:00
mrd0ll4r
e7b8264e50 Merge pull request #361 from mrd0ll4r/timecache
Timecache: cache time
2017-10-15 21:47:05 +02:00
Leo Balduf
6dfdb7e192 udp: clean up connection ID generation 2017-10-15 20:05:39 +02:00
Leo Balduf
89bc479a3b *: make use of timecache 2017-10-15 20:05:39 +02:00
Leo Balduf
55b57549a6 pkg/timecache: implement a time cache 2017-10-15 20:05:35 +02:00
Jimmy Zelinskie
ce43a09956 *: add sanitization example config 2017-10-08 18:02:34 -04:00
Jimmy Zelinskie
134744a484 middleware: remove sanitization mw 2017-10-08 18:02:34 -04:00
Jimmy Zelinskie
47b5e67345 frontend/udp: add request sanitization 2017-10-08 18:02:34 -04:00
Jimmy Zelinskie
6dee48ce17 frontend/http: add request sanitization 2017-10-08 18:02:33 -04:00
Jimmy Zelinskie
b7e6719129 bittorrent: add initial request sanitizer 2017-10-08 18:02:33 -04:00
mrd0ll4r
b314b5003a Merge pull request #362 from mrd0ll4r/default-timeout
http: add default ReadTimeout, WriteTimeout
2017-10-03 13:01:53 +02:00
Leo Balduf
7d9166e003 http: add default ReadTimeout, WriteTimeout 2017-09-30 14:26:02 +02:00
mrd0ll4r
8300621799 Merge pull request #358 from mrd0ll4r/clean-logging
cmd/chihaya: clean up logging
2017-09-20 17:17:48 +02:00
Leo Balduf
79750ef983 cmd/chihaya: clean up logging 2017-09-19 21:27:52 +02:00
mrd0ll4r
b9773473e4 Merge pull request #356 from mrd0ll4r/stop-tests
storage: stop peer store after tests
2017-09-13 15:29:55 +02:00
Leo Balduf
b5dda16706 storage: stop peer store after tests 2017-09-13 03:47:19 +02:00
mrd0ll4r
2a4c82f613 Merge pull request #355 from mrd0ll4r/issue349
cmd/chihaya: log which config is in  use
2017-09-03 20:55:37 +02:00
mrd0ll4r
464d37b2a7 Merge pull request #354 from mrd0ll4r/fix-debug
pkg/log: fix debug logging
2017-09-03 20:55:26 +02:00
Leo Balduf
c7b052dbb2 cmd/chihaya: log which storage is in use 2017-09-03 19:51:29 +02:00
Leo Balduf
13857d5bce pkg/log: fix debug logging 2017-09-03 11:37:17 +02:00
mrd0ll4r
4f4495f0f3 Merge pull request #350 from mrd0ll4r/benchmark-scrapes
storage: add Benchmarks for ScrapeSwarm, optimize implementations
2017-08-30 19:34:55 +02:00
Leo Balduf
5400a99b75 storage: add Benchmarks for ScrapeSwarm, optimize implementations 2017-08-30 08:54:11 +02:00
mrd0ll4r
44dbf4abb4 Merge pull request #351 from mrd0ll4r/log-output
log: add SetOutput function
2017-08-30 08:31:20 +02:00
mrd0ll4r
7c666e336a Merge pull request #352 from mrd0ll4r/subnet-benchconf
storage: update benchmark config
2017-08-30 08:30:55 +02:00
Leo Balduf
8ee8793867 storage: update benchmark config 2017-08-25 19:41:41 +02:00
Leo Balduf
3168f50601 log: add SetOutput function 2017-08-24 12:48:13 +02:00
mrd0ll4r
d026424038 Merge pull request #348 from mrd0ll4r/memorybench
storage: add PeerLifetime to test config
2017-08-18 12:52:58 +02:00
Leo Balduf
df7b59e2f3 storage: add PeerLifetime to test config 2017-08-17 23:35:20 +02:00
Jimmy Zelinskie
3799b856c2 Merge pull request #346 from jzelinskie/up-deps
glide: update dependencies
2017-07-05 15:48:52 -04:00
Jimmy Zelinskie
034aa0b5dc glide: update dependencies
This change also moves the logrus library to the lowercase import in
order to avoid breaking downstream projects vendoring chihaya.
2017-07-03 18:57:13 -04:00
mrd0ll4r
2c67ad4dac Merge pull request #340 from mrd0ll4r/logging
pkg/log: create wrapper around logrus
2017-07-01 23:31:51 +02:00
Jimmy Zelinskie
3f3f75519d Merge pull request #345 from jzelinskie/README-fix
README: update build instructions
2017-07-01 17:11:15 -04:00
Jimmy Zelinskie
c5f8e5a9b0 README: update build instructions
Fixes #342.
2017-07-01 15:16:24 -04:00
Jimmy Zelinskie
13c71b4ee1 Merge pull request #344 from chihaya/fix-shard-key
*: fix shard key name
2017-06-30 03:02:33 -04:00
Jimmy Zelinskie
80e9fce087 *: fix shard key name 2017-06-29 17:10:25 -04:00
Leo Balduf
8ed171b0ea pkg/log: create wrapper around logrus 2017-06-26 20:46:40 +02:00
mrd0ll4r
153ad325b7 Merge pull request #343 from mrd0ll4r/storage-defaults
storage: update config defaults
2017-06-26 20:41:48 +02:00
Leo Balduf
1aa6c86d3f storage: update config defaults 2017-06-25 19:04:24 +02:00
mrd0ll4r
d43cb719b9 Merge pull request #339 from mrd0ll4r/xorshift
pkg/xorshift: rebuild to use stack only
2017-06-19 19:48:00 +02:00
Leo Balduf
02336d10e7 pkg/xorshift: rebuild to use stack only 2017-06-19 09:45:26 +02:00
mrd0ll4r
7ea4b3dc7a Merge pull request #338 from mrd0ll4r/fix-parsing
Fix and optimize query parsing, make parsing errors static
2017-06-19 09:03:48 +02:00
Leo Balduf
6e1cfa18d8 bittorrent: make invalid query escape errors static 2017-06-18 22:43:24 +02:00
Leo Balduf
2764717657 bittorrent: fix out-of-range panics for URL parsing 2017-06-18 22:43:21 +02:00
mrd0ll4r
2dcb4344cb Merge pull request #336 from mrd0ll4r/xorshift
pkg/prand: replace with pkg/xorshift
2017-06-12 22:09:38 +02:00
Leo Balduf
03b98e0090 pkg/prand: replace with pkg/xorshift 2017-06-12 22:07:05 +02:00
Jimmy Zelinskie
fa6dcddcb6 Merge pull request #335 from jzelinskie/fix-jwt
middleware/jwt: encode infohashes as hex
2017-06-09 00:11:02 -04:00
mrd0ll4r
3a323d9338 Merge pull request #331 from cedricgc/tracker-logic-context
TrackerLogic returns modified Contexts
2017-06-08 17:39:04 +02:00
Jimmy Zelinskie
ad496fceb8 middleware/jwt: encode infohashes as hex
This avoids some corner-cases that can be experienced when trying to
url-escape raw bytes.
2017-06-07 23:07:07 -04:00
Cedric Charly
f7becf952b frontend: TrackerLogic interface returns modified context
HandleAnnounce and HandleScrape must return the modified context changed
by the hooks. These contexts are passed to AfterAnnounce and AfterScrape
for further use.

Closes #304
2017-06-07 19:25:12 -05:00
Jimmy Zelinskie
3168f13b48 Merge pull request #332 from jzelinskie/jwt-escape-infohash
middleware/jwt: escape infohash when debugging
2017-06-06 14:23:26 -04:00
mrd0ll4r
6663c09391 Merge pull request #333 from mrd0ll4r/prom-rules
dist/prometheus: add rules for the storage
2017-06-06 19:58:33 +02:00
Jimmy Zelinskie
389dbd20dc Merge pull request #330 from jzelinskie/helm-flags
dist/helm: default debug and json logs
2017-06-06 13:55:45 -04:00
Leo Balduf
53297853a6 dist/prometheus: add rules for the storage 2017-06-06 19:54:02 +02:00
Jimmy Zelinskie
035c5b4960 middleware/jwt: escape infohash when debugging
Without this, the log lines end up looking like:
request=��1�H�7L a���-��7
2017-06-06 13:22:14 -04:00
Jimmy Zelinskie
6c3ddaefb3 Merge pull request #329 from jzelinskie/debug-jwt
Fix panic on binary start
2017-06-06 12:18:20 -04:00
Jimmy Zelinskie
8dddae0658 dist/helm: sync configs 2017-06-06 12:16:47 -04:00
Jimmy Zelinskie
233b5b52ec dist/helm: default debug and json logs 2017-06-05 22:37:35 -04:00
Jimmy Zelinskie
0d9a2309fc middleware/jwt: add debug logs for JWT failures 2017-06-05 22:09:34 -04:00
Jimmy Zelinskie
dab03f52dc storage: share prometheus models
Running of the binary actually caused a panic due to multiple calling of
MustRegister(). This fixes that by sharing models in the storage
package.
2017-06-05 22:07:13 -04:00
Jimmy Zelinskie
edef032381 Merge pull request #301 from jzelinskie/memorybysubnet
Storage Drivers (starting w/ subnet memory)
2017-06-04 13:47:55 -07:00
Jimmy Zelinskie
ce3281f3e8 example_config.yaml: fix identation from merge 2017-06-04 16:42:32 -04:00
Jimmy Zelinskie
effb05103a cmd/chihaya: remove extra registration of storages 2017-06-04 16:42:32 -04:00
Jimmy Zelinskie
c41519e73f storage: sync memory & memorybysubnet
This change is a manual merge of all of the optimizations that have been
added to the memory peer store into the memorybysubnet peer store.

This also fixes some inconsistencies between the two.
2017-06-04 16:42:32 -04:00
Jimmy Zelinskie
7786e1a915 storage: enforce all peer stores are loggable 2017-06-04 16:06:01 -04:00
Jimmy Zelinskie
ed69a0893e docs/storage: add memorybysubnet 2017-06-04 16:06:01 -04:00
Jimmy Zelinskie
85d7d9c677 storage/memorybysubnet: bench with masks 2017-06-03 15:47:58 -04:00
Jimmy Zelinskie
c37311e8c7 cmd/chihaya: register storage drivers 2017-06-03 15:47:58 -04:00
Jimmy Zelinskie
acf7e3c749 storage: register unique Prometheus metrics names 2017-06-03 15:47:58 -04:00
Jimmy Zelinskie
039f25f571 storage/memorybysubnet: add peerSubnet type 2017-06-03 15:47:58 -04:00
Jimmy Zelinskie
b013106f89 storage/memorybysubnet: delete empty subnet buckets 2017-06-03 15:47:58 -04:00
Jimmy Zelinskie
cb6a3be2ac storage/memorybysubnet: s/mask/preferredSubnet 2017-06-03 15:47:58 -04:00
Jimmy Zelinskie
d07b61d67d storage/memorybysubnet: replace range with len() 2017-06-03 15:47:58 -04:00
Jimmy Zelinskie
370004a9f5 storage/memorybysubnet: clarify subnet bits _set_ 2017-06-03 15:47:58 -04:00
Jimmy Zelinskie
9ce6c31021 storage/memorysubnet: allocate CIDRMask only once 2017-06-03 15:47:58 -04:00
Jimmy Zelinskie
496cc1a31d storage: dynamically register drivers 2017-06-03 15:47:58 -04:00
Jimmy Zelinskie
6fc3f618aa storage/memorybysubnet: add package docs 2017-06-03 15:45:41 -04:00
Jimmy Zelinskie
bd02be6ab5 storage/memorybysubnet: fix TODOs 2017-06-03 15:45:41 -04:00
Jimmy Zelinskie
7f7c79bdf1 storage/memorybysubnet: init 2017-06-03 15:45:41 -04:00
mrd0ll4r
abccf5bd7e Merge pull request #325 from cedricgc/peer-store-config
storage/memory: set default prometheus_reporting_interval
2017-06-03 21:29:11 +02:00
Cedric Charly
ce6706b0d5 storage/memory: set default prometheus_reporting_interval
When left blank in the config, the default prometheus_reporting_interval value
defaulted to 0, causing a panic during peer store initialization. This
change sets the default value to 1 if not provided.

Fixes #319
2017-06-03 14:21:39 -05:00
mrd0ll4r
d3de59bab0 Merge pull request #322 from mrd0ll4r/prom-config
dist/prometheus: add prometheus config and rules
2017-06-03 20:08:28 +02:00
Leo Balduf
7c4f8cf395 dist/helm: move from contrib/helm 2017-06-03 14:14:37 +02:00
Jimmy Zelinskie
ceacd6bdbd Merge pull request #323 from cedricgc/hook-logging
cmd/chihaya: log hook names on startup
2017-06-02 21:12:58 -07:00
Cedric Charly
86197a258c cmd/chihaya: log hook names on startup
Replace logging memory addresses with actual hook names.

Closes #317
2017-06-02 20:56:29 -05:00
Leo Balduf
7fabf4ba61 dist/prometheus: add prometheus config and rules 2017-06-02 19:33:05 +02:00
mrd0ll4r
634bc6b706 Merge pull request #306 from mrd0ll4r/storage-stats
storage: add prometheus metrics for seeders/leechers
2017-05-29 19:11:07 +02:00
Leo Balduf
a70d6dc036 memory: add metrics for peers 2017-05-29 19:04:40 +02:00
mrd0ll4r
1cc0738cbe Merge pull request #316 from mrd0ll4r/remove-time-now
Remove most calls to time.Now
2017-05-17 21:17:49 +02:00
Leo Balduf
143ba54695 storage/memory: remove calls to time.Now() 2017-05-12 20:27:31 +02:00
Leo Balduf
f61e7a9281 frontend: make request timing optional 2017-05-12 20:27:31 +02:00
mrd0ll4r
48abc6048e Merge pull request #315 from mrd0ll4r/fix-config
storage/config: fix example config, add warning for missing shardCount
2017-05-12 20:23:22 +02:00
Leo Balduf
71eb9fb767 storage/config: fix example config, add warning for missing shardCount 2017-05-12 13:16:39 +02:00
Jimmy Zelinskie
a4aeba7ecc Merge pull request #314 from jzelinskie/post-cpu
cmd/chihaya: move cpuprofile into postrun
2017-05-08 14:08:29 -04:00
Jimmy Zelinskie
d026a3359c cmd/chihaya: move cpuprofile into postrun
This also updates PreRuns to return their errors.
2017-05-07 18:52:17 -04:00
Jimmy Zelinskie
869777c6d0 Merge pull request #313 from jzelinskie/debug-logging
middleware: add structured logging to logic
2017-05-07 14:18:27 -04:00
Jimmy Zelinskie
1daee323a3 cmd/chihaya: add --json flag
This flag enables logging as JSON.
2017-05-07 04:38:31 -04:00
Jimmy Zelinskie
cb55019ab8 *: add structured logging 2017-05-07 04:30:22 -04:00
Jimmy Zelinskie
353ba01e51 Merge pull request #310 from jzelinskie/fix-restart
Refactor root run command
2017-05-03 13:42:27 -04:00
Jimmy Zelinskie
7b1e7e8c99 cmd/chihaya: s/chihayaCfg/cfg 2017-05-02 11:03:49 -04:00
Jimmy Zelinskie
23e9719073 cmd/chihaya: refactor out combination of errors 2017-05-01 15:57:01 -04:00
Jimmy Zelinskie
68cbe0fc21 cmd/chihaya: persist PeerStore across reloads 2017-05-01 15:56:38 -04:00
Jimmy Zelinskie
ea0dba3a3d cmd/chihaya: refactor root run command
This change refactors a bunch of the state of execution into its own
object. It also attempts to simplify stopping and adjusts some other
packages to integrate with the stopper interface.

Fixes #309.
2017-05-01 15:56:38 -04:00
Jimmy Zelinskie
20d1cbf537 pkg/prometheus: init 2017-05-01 15:56:16 -04:00
Jimmy Zelinskie
842bec32e7 cmd/chihaya: move enabling debug
PersistentPreRun is the more idiomatic place for this code to run when
using spf13/cobra.
2017-04-29 22:51:54 -04:00
Jimmy Zelinskie
00ab4acc98 Merge pull request #308 from TheOriginalWinCat/upstream
Make numwant in http request optional
2017-04-21 15:15:10 -04:00
tom
cdb0ff3612 http: make numWant optional
The default numwant value should be used if either numwant is zero or omitted from the announce request.
2017-04-21 13:17:58 +01:00
mrd0ll4r
9f45950dd0 Merge pull request #307 from mrd0ll4r/fixHttpShutdown
http: fix HTTP shutdown panicking
2017-04-05 09:08:09 +02:00
Leo Balduf
2e625af44d http: fix HTTP shutdown panicking 2017-04-04 22:34:53 +02:00
mrd0ll4r
ea1324602e Merge pull request #299 from mrd0ll4r/bump1.8
http: use go1.8 graceful shutdown
2017-02-18 21:04:33 +01:00
mrd0ll4r
1ef65d3704 Merge pull request #300 from mrd0ll4r/udpReadDeadline
udp: remove read deadline
2017-02-18 21:04:18 +01:00
Leo Balduf
233c355526 udp: remove read deadline 2017-02-18 13:17:45 +01:00
Leo Balduf
300b9c12ab http: use go1.8 graceful shutdown 2017-02-18 13:08:12 +01:00
Jimmy Zelinskie
bb93e478b8 Merge pull request #295 from jzelinskie/prom-errs
frontend/*: only record ClientErrors to Prometheus
2017-02-17 11:54:41 -05:00
Jimmy Zelinskie
f0eca4c3f7 Merge pull request #297 from jzelinskie/rmdoc
docs: remove deniability middleware
2017-02-16 15:13:17 -05:00
Jimmy Zelinskie
13882ae05d docs: remove deniability middleware 2017-02-16 01:04:17 -05:00
Jimmy Zelinskie
215f33d862 frontend/*: only record ClientErrors to Prometheus
All ClientErrors are constant and should not cause Prometheus streams to
be generated for all possible failure scenarios in the program.

Fixes #294.
2017-02-16 01:01:38 -05:00
Jimmy Zelinskie
b028a36424 Merge pull request #296 from jzelinskie/require-all
Replace last usage of assert with require
2017-02-15 14:05:50 -05:00
Jimmy Zelinskie
03080b39b4 glide: bump + track minor versions 2017-02-15 00:59:03 -05:00
Jimmy Zelinskie
517fb4044e tests: replace last usage of assert with require
Fixes #239.
2017-02-15 00:58:52 -05:00
mrd0ll4r
82d79e5113 Merge pull request #292 from mrd0ll4r/frontend-fix
frontend: fix recording of address family
2017-02-05 23:14:01 +01:00
mrd0ll4r
9db2d3dabb Merge pull request #290 from mrd0ll4r/parse-clienterror
bittorrent: make ParseURLData return ClientErrors
2017-02-05 23:13:49 +01:00
Leo Balduf
fdf4ed8169 frontend: fix recording of address family 2017-02-05 17:24:53 +01:00
Leo Balduf
98299c5912 bittorrent: make ParseURLData return ClientErrors 2017-02-05 13:22:14 +01:00
Jimmy Zelinskie
74dde3f362 Merge pull request #284 from jzelinskie/promaf
prometheus: record IP AddressFamily
2017-02-03 11:45:56 -05:00
Jimmy Zelinskie
51926ad562 Merge pull request #288 from jzelinskie/stopmv
mv pkg/stopper pkg/stop
2017-02-03 11:45:40 -05:00
Jimmy Zelinskie
a4b08c021b mv pkg/stopper pkg/stop
This makes most of the callsites stutter slightly less.
2017-02-02 21:09:25 -05:00
Jimmy Zelinskie
0e07b33827 prometheus: record IP AddressFamily 2017-02-02 19:19:33 -05:00
Jimmy Zelinskie
4d54980930 Merge pull request #287 from jzelinskie/scrapelimit
middleware: sanitize max scrape infohashes
2017-02-02 03:42:27 -05:00
Jimmy Zelinskie
3f29aa358b middleware: sanitize max scrape infohashes
Fixes #268.
2017-02-02 02:32:35 -05:00
Jimmy Zelinskie
fb30e9fb03 Merge pull request #286 from jzelinskie/https
frontend/http: add TLS support
2017-02-02 02:20:45 -05:00
Jimmy Zelinskie
6884a8f527 frontend/http: add TLS support
Fixes #283.
2017-02-02 02:01:51 -05:00
Jimmy Zelinskie
738e496929 Merge pull request #285 from chihaya/helm-head
contrib/helm: default to using HEAD container
2017-02-01 10:37:13 -05:00
Jimmy Zelinskie
8bc2b09724 contrib/helm: default to using HEAD container
This fixes a bug where the config being used is for HEAD, but previously the container image being used was for a tagged release with a different config schema.
2017-01-31 21:40:36 -05:00
Jimmy Zelinskie
4aad0e992b Merge pull request #282 from mrd0ll4r/udp-scrapes
udp: fix ordering of scrapes
2017-01-31 20:17:01 -05:00
Jimmy Zelinskie
9cc31763d0 Merge pull request #259 from jzelinskie/helm
initial helm package
2017-01-31 20:14:37 -05:00
Jimmy Zelinskie
7aebb44852 contrib/helm: update config schema 2017-01-31 19:59:48 -05:00
Jimmy Zelinskie
f40cd33b12 *: bump default shards to 1024 2017-01-31 19:59:24 -05:00
Jimmy Zelinskie
6deebdd6d4 frontend/udp: generate private key when empty 2017-01-31 19:53:36 -05:00
Jimmy Zelinskie
a48b9a50c3 initial helm package 2017-01-31 19:53:36 -05:00
Jimmy Zelinskie
baef1c17c3 Merge pull request #281 from jzelinskie/installfix
README: update development section
2017-01-31 19:50:29 -05:00
Jimmy Zelinskie
831b908402 *: comment middleware for OOB working config 2017-01-31 19:37:26 -05:00
Jimmy Zelinskie
a5dab8ac0a README: update development section
Fixes #269.
2017-01-31 19:34:45 -05:00
Leo Balduf
102b032c43 udp: fix ordering of scrapes 2017-01-30 13:07:14 +01:00
Jimmy Zelinskie
8cf49aad2e Merge pull request #276 from jzelinskie/configdocs
example_config: add initial documentation
2017-01-27 19:35:30 -05:00
mrd0ll4r
0c37672d47 Merge pull request #279 from mrd0ll4r/connidoptimize
udp: make connection ID handling faster
2017-01-27 23:57:40 +01:00
Leo Balduf
3e1cd77405 udp: make connection ID handling faster 2017-01-26 10:11:02 +01:00
mrd0ll4r
bfe970b12f Merge pull request #278 from mrd0ll4r/memorystats
storage: add prometheus to memory implementation
2017-01-25 20:06:43 +01:00
Leo Balduf
18d7e5d51b storage: add prometheus to memory implementation 2017-01-25 18:59:10 +01:00
Jimmy Zelinskie
8b1ab73894 Merge pull request #275 from jzelinskie/rmaction
frontend/udp: rm copypasta metric recording
2017-01-23 14:30:42 -05:00
Jimmy Zelinskie
acc051bcc4 frontend: rm copypasta metric recording
Fixes #271.
2017-01-23 14:06:32 -05:00
Jimmy Zelinskie
c2e34f7c36 example_config: quote all strings and fix docs 2017-01-23 14:03:32 -05:00
mrd0ll4r
a50be904e4 Merge pull request #277 from mrd0ll4r/http-prometheus-fix
http: fix prometheus timings
2017-01-23 18:24:55 +01:00
Jimmy Zelinskie
a204081a04 Merge pull request #274 from jzelinskie/fixsp
cmd: fix spelling mistake
2017-01-23 11:53:59 -05:00
Leo Balduf
0702755d0b http: fix prometheus timings
Fixes #272.
2017-01-23 16:24:29 +01:00
Jimmy Zelinskie
6200724ac6 Delete CNAME 2017-01-23 02:25:55 -05:00
Jimmy Zelinskie
ffcca91221 Create CNAME 2017-01-23 02:24:22 -05:00
Jimmy Zelinskie
91a0b4012a example_config: add initial documentation 2017-01-22 22:41:44 -05:00
Jimmy Zelinskie
fceee10aba cmd: fix spelling mistake 2017-01-22 19:06:13 -05:00
Jimmy Zelinskie
87d64dba50 Merge pull request #273 from jzelinskie/docsinit
copy over docs from docs repo
2017-01-22 18:56:14 -05:00
Jimmy Zelinskie
1e9af8bfe0 github: add CNAME file 2017-01-22 16:59:14 -05:00
Jimmy Zelinskie
062a480737 docs: init middleware docs 2017-01-22 16:58:50 -05:00
Jimmy Zelinskie
8a9f70825f Merge pull request #270 from jzelinskie/docs
docs: add github redirect
2017-01-22 16:45:02 -05:00
Jimmy Zelinskie
23ac850693 docs: add github redirect 2017-01-21 20:39:25 -05:00
mrd0ll4r
03f0c977d3 Merge pull request #258 from mrd0ll4r/sanitation-hook
middleware: add sanitization hook
2017-01-21 21:06:19 +01:00
Leo Balduf
3ae3843944 bittorrent: add AddressField to ScrapeRequest 2017-01-20 20:34:39 +01:00
Leo Balduf
3c098c0703 middleware: add sanitization hook 2017-01-20 20:29:59 +01:00
mrd0ll4r
91ce2aaf77 Merge pull request #247 from mrd0ll4r/varinterval
middleware: add varinterval
2016-12-30 23:07:47 +01:00
Leo Balduf
fe8c74bd9c middleware: add varinterval 2016-12-16 14:19:02 +01:00
Jimmy Zelinskie
f4dcf1c3fe Merge pull request #260 from jzelinskie/stopperpkg
stopper: move to pkg package
2016-12-13 15:42:50 -05:00
Jimmy Zelinskie
e3e545e22e stopper: move to pkg package
This also adds a package docs for the stopper package.
2016-12-11 21:36:01 -05:00
mrd0ll4r
f180d54f6d Merge pull request #253 from mrd0ll4r/prand
prand: add Container
2016-11-30 20:14:22 +01:00
Leo Balduf
79213c6bbd prand: add Container 2016-11-30 19:54:57 +01:00
mrd0ll4r
2a4b263955 Merge pull request #257 from mrd0ll4r/config-reload
cmd/chihaya: add config reloading via SIGUSR1
2016-11-29 09:25:34 +01:00
Leo Balduf
6b1d4c7ed5 cmd/chihaya: add config reloading via SIGUSR1
Fixes #215
2016-11-28 23:48:00 +01:00
mrd0ll4r
f3690011a7 Merge pull request #255 from mrd0ll4r/peerstore-test
storage: add TestPeerStore test
2016-11-27 20:45:22 +00:00
Leo Balduf
70ceb96313 storage: add TestPeerStore test 2016-11-27 21:43:22 +01:00
mrd0ll4r
989cc4deba Merge pull request #256 from mrd0ll4r/http-godoc-fix
http: fix godoc
2016-11-27 20:40:24 +00:00
Leo Balduf
e4e6ef4bfd http: fix godoc 2016-11-27 10:56:51 +01:00
93 changed files with 8191 additions and 1417 deletions

3
.github/FUNDING.yml vendored Normal file
View file

@ -0,0 +1,3 @@
---
github:
- "jzelinskie"

23
.github/dependabot.yml vendored Normal file
View file

@ -0,0 +1,23 @@
---
version: 2
updates:
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "monthly"
labels:
- "component/dependencies"
- package-ecosystem: "gomod"
directory: "/"
schedule:
interval: "monthly"
labels:
- "component/dependencies"
- package-ecosystem: "docker"
directory: "/"
schedule:
interval: "monthly"
labels:
- "component/dependencies"

112
.github/workflows/build.yaml vendored Normal file
View file

@ -0,0 +1,112 @@
---
name: "Build & Test"
on:
push:
branches:
- "!dependabot/*"
- "main"
pull_request:
branches: ["*"]
jobs:
build:
name: "Go Build"
runs-on: "ubuntu-latest"
steps:
- uses: "actions/checkout@v2"
- uses: "actions/setup-go@v2"
with:
go-version: "^1.17"
- name: "Build"
run: "go build ./cmd/..."
unit:
name: "Run Unit Tests"
runs-on: "ubuntu-latest"
steps:
- uses: "actions/checkout@v2"
- uses: "actions/setup-go@v2"
with:
go-version: "^1.17"
- name: "Run `go test`"
run: "go test -race ./..."
e2e-mem:
name: "E2E Memory Tests"
runs-on: "ubuntu-latest"
steps:
- uses: "actions/checkout@v2"
- uses: "actions/setup-go@v2"
with:
go-version: "^1.17"
- name: "Install and configure chihaya"
run: |
go install ./cmd/chihaya
cat ./dist/example_config.yaml
- name: "Run end-to-end tests"
run: |
chihaya --config=./dist/example_config.yaml --debug &
pid=$!
sleep 2
chihaya e2e --debug
kill $pid
e2e-redis:
name: "E2E Redis Tests"
runs-on: "ubuntu-latest"
services:
redis:
image: "redis"
ports: ["6379:6379"]
options: "--entrypoint redis-server"
steps:
- uses: "actions/checkout@v2"
- uses: "actions/setup-go@v2"
with:
go-version: "^1.17"
- name: "Install and configure chihaya"
run: |
go install ./cmd/chihaya
curl -LO https://github.com/jzelinskie/faq/releases/download/0.0.6/faq-linux-amd64
chmod +x faq-linux-amd64
./faq-linux-amd64 '.chihaya.storage = {"config":{"gc_interval":"3m","peer_lifetime":"31m","prometheus_reporting_interval":"1s","redis_broker":"redis://127.0.0.1:6379/0","redis_connect_timeout":"15s","redis_read_timeout":"15s","redis_write_timeout":"15s"},"name":"redis"}' ./dist/example_config.yaml > ./dist/example_redis_config.yaml
cat ./dist/example_redis_config.yaml
- name: "Run end-to-end tests"
run: |
chihaya --config=./dist/example_redis_config.yaml --debug &
pid=$!
sleep 2
chihaya e2e --debug
kill $pid
image-build:
name: "Docker Build"
runs-on: "ubuntu-latest"
steps:
- uses: "actions/checkout@v2"
- uses: "docker/setup-qemu-action@v1"
- uses: "docker/setup-buildx-action@v1"
with:
driver-opts: "image=moby/buildkit:master"
- uses: "docker/build-push-action@v1"
with:
push: false
tags: "latest"
helm:
name: "Helm Template"
runs-on: "ubuntu-latest"
steps:
- uses: "actions/checkout@v2"
- name: "Install Helm"
uses: "engineerd/configurator@v0.0.5"
with:
name: "helm"
pathInArchive: "linux-amd64/helm"
fromGitHubReleases: true
repo: "helm/helm"
version: "^v3"
urlTemplate: "https://get.helm.sh/helm-{{version}}-linux-amd64.tar.gz"
token: "${{ secrets.GITHUB_TOKEN }}"
- name: "Run `helm template`"
working-directory: "./dist/helm/chihaya"
run: "helm template . --debug"

86
.github/workflows/lint.yaml vendored Normal file
View file

@ -0,0 +1,86 @@
---
name: "Lint"
on:
push:
branches:
- "!dependabot/*"
- "main"
pull_request:
branches: ["*"]
jobs:
go-mod-tidy:
name: "Lint Go Modules"
runs-on: "ubuntu-latest"
steps:
- uses: "actions/checkout@v2"
- uses: "actions/setup-go@v2"
with:
go-version: "^1.17"
- name: "Run `go mod tidy`"
run: "go mod tidy && bash -c '[ $(git status --porcelain | tee /dev/fd/2 | wc -c) -eq 0 ]'"
go-fmt:
name: "Format Go"
runs-on: "ubuntu-latest"
steps:
- uses: "actions/checkout@v2"
- uses: "actions/setup-go@v2"
with:
go-version: "^1.17"
- name: "Install gofumpt"
run: "go install mvdan.cc/gofumpt@latest"
- name: "Run `gofumpt`"
run: |
GOFUMPT_OUTPUT="$(find . -iname '*.go' -type f | xargs gofumpt -d)"
if [ -n "$GOFUMPT_OUTPUT" ]; then
echo "The following files are not correctly formatted:"
echo "${GOFUMPT_OUTPUT}"
exit 1
fi
go-lint:
name: "Lint Go"
runs-on: "ubuntu-latest"
steps:
- uses: "actions/checkout@v2"
- uses: "actions/setup-go@v2"
with:
go-version: "^1.17"
- uses: "golangci/golangci-lint-action@v2"
with:
version: "v1.43"
skip-go-installation: true
skip-pkg-cache: true
skip-build-cache: false
extra-lint:
name: "Lint YAML & Markdown"
runs-on: "ubuntu-latest"
steps:
- uses: "actions/checkout@v2"
- uses: "bewuethr/yamllint-action@v1.1.1"
with:
config-file: ".yamllint"
- uses: "nosborn/github-action-markdown-cli@v2.0.0"
with:
files: "."
config_file: ".markdownlint.yaml"
codeql:
name: "Analyze with CodeQL"
runs-on: "ubuntu-latest"
permissions:
actions: "read"
contents: "read"
security-events: "write"
strategy:
fail-fast: false
matrix:
language: ["go"]
steps:
- uses: "actions/checkout@v2"
- uses: "github/codeql-action/init@v1"
with:
languages: "${{ matrix.language }}"
- uses: "github/codeql-action/autobuild@v1"
- uses: "github/codeql-action/analyze@v1"

50
.golangci.yaml Normal file
View file

@ -0,0 +1,50 @@
---
run:
timeout: "5m"
output:
sort-results: true
linters-settings:
goimports:
local-prefixes: "github.com/chihaya/chihaya"
gosec:
excludes:
- "G404" # Allow the usage of math/rand
linters:
enable:
- "bidichk"
- "bodyclose"
- "deadcode"
- "errcheck"
- "errname"
- "errorlint"
- "gofumpt"
- "goimports"
- "goprintffuncname"
- "gosec"
- "gosimple"
- "govet"
- "ifshort"
- "importas"
- "ineffassign"
- "makezero"
- "prealloc"
- "predeclared"
- "revive"
- "rowserrcheck"
- "staticcheck"
- "structcheck"
- "stylecheck"
- "tenv"
- "typecheck"
- "unconvert"
- "unused"
- "varcheck"
- "wastedassign"
- "whitespace"
issues:
include:
- "EXC0012" # Exported should have comment
- "EXC0012" # Exported should have comment
- "EXC0013" # Package comment should be of form
- "EXC0014" # Comment on exported should be of form
- "EXC0015" # Should have a package comment

3
.markdownlint.yaml Normal file
View file

@ -0,0 +1,3 @@
---
line-length: false
no-hard-tabs: false

View file

@ -1,22 +0,0 @@
language: go
go:
- 1.7
sudo: false
install:
- go get -t ./...
- go get -u github.com/golang/lint/golint
- go get -u golang.org/x/tools/cmd/goimports
script:
- go test -v $(go list ./... | grep -v /vendor/)
- go vet $(go list ./... | grep -v /vendor/)
- diff <(goimports -d $(find . -type f -name '*.go' -not -path "./vendor/*")) <(printf "")
- (for d in $(go list ./... | grep -v /vendor/); do diff <(golint $d) <(printf "") || exit 1; done)
notifications:
irc:
channels:
- irc.freenode.net#chihaya
use_notice: true
skip_join: true
on_success: always
on_failure: always
email: false

11
.yamllint Normal file
View file

@ -0,0 +1,11 @@
# vim: ft=yaml
---
yaml-files:
- "*.yaml"
- "*.yml"
- ".yamllint"
ignore: "dist/helm/"
extends: "default"
rules:
quoted-strings: "enable"
line-length: "disable"

View file

@ -1,78 +1,3 @@
## Discussion
## Contributing to LBRY
Long-term discussion and bug reports are maintained via [GitHub Issues].
Code review is done via [GitHub Pull Requests].
Real-time discussion is done via [freenode IRC].
[GitHub Issues]: https://github.com/chihaya/chihaya/issues
[GitHub Pull Requests]: https://github.com/chihaya/chihaya/pulls
[freenode IRC]: http://webchat.freenode.net/?channels=chihaya
## Pull Request Procedure
If you're looking to contribute, search the GitHub for issues labeled "low-hanging fruit".
You can also hop into IRC and ask a developer who's online for their opinion.
Small, self-describing fixes are perfectly fine to submit without discussion.
However, please do not submit a massive Pull Request without prior communication.
Large, unannounced changes usually lead to confusion and time wasted for everyone.
If you were planning to write a large change, post an issue on GitHub first and discuss it.
Pull Requests will be treated as "review requests", and we will give feedback we expect to see corrected on style and substance before merging.
Changes contributed via Pull Request should focus on a single issue at a time.
We will not accept pull-requests that try to "sneak" unrelated changes in.
The average contribution flow is as follows:
- Determine what to work on via creating and issue or finding an issue you want to solve.
- Create a topic branch from where you want to base your work. This is usually `master`.
- Make commits of logical units.
- Make sure your commit messages are in the proper format
- Push your changes to a topic branch in your fork of the repository.
- Submit a pull request.
- Your PR will be reviewed and merged by one of the maintainers.
- You may be asked to make changes and [rebase] your commits.
[rebase]: https://git-scm.com/book/en/v2/Git-Branching-Rebasin://git-scm.com/book/en/v2/Git-Branching-Rebasing
## Style
Any new files should include the license header found at the top of every source file.
### Go
The project follows idiomatic [Go conventions] for style.
If you're just starting out writing Go, you can check out this [meta-package] that documents style idiomatic style decisions you will find in open source Go code.
All files should have `gofmt` executed on them and code should strive to have full coverage of static analysis tools like [govet] and [golint].
[Go conventions]: https://github.com/golang/go/wiki/CodeReviewComments
[meta-package]: https://github.com/jzelinskie/conventions
[govet]: https://golang.org/cmd/vet
[golint]: https://github.com/golang/lint
### Commit Messages
We follow a rough convention for commit messages that is designed to answer two questions: what changed and why.
The subject line should feature the what and the body of the commit should describe the why.
```
scripts: add the test-cluster command
this uses tmux to setup a test cluster that you can easily kill and
start for debugging.
Fixes #38
```
The format can be described more formally as follows:
```
<subsystem>: <what changed>
<BLANK LINE>
<why this change was made>
<BLANK LINE>
<footer>
```
The first line is the subject and should be no longer than 70 characters, the second line is always blank, and other lines should be wrapped at 80 characters.
This allows the message to be easier to read on GitHub as well as in various git tools.
https://lbry.tech/contribute

View file

@ -1,23 +1,26 @@
FROM golang:alpine
MAINTAINER Jimmy Zelinskie <jimmyzelinskie@gmail.com>
FROM golang:alpine AS build-env
LABEL maintainer "Jimmy Zelinskie <jimmyzelinskie+git@gmail.com>"
# Install OS-level dependencies.
RUN apk update && \
apk add curl git && \
curl https://glide.sh/get | sh
RUN apk add --no-cache curl git
# Copy our source code into the container.
WORKDIR /go/src/github.com/chihaya/chihaya
ADD . /go/src/github.com/chihaya/chihaya
COPY . /go/src/github.com/chihaya/chihaya
# Install our golang dependencies and compile our binary.
RUN glide install
RUN go install github.com/chihaya/chihaya/cmd/chihaya
RUN CGO_ENABLED=0 go install ./cmd/chihaya
# Delete the compiler from the container.
# This makes the container much smaller when using Quay's squashing feature.
RUN rm -r /usr/local/go
FROM alpine:latest
RUN apk add --no-cache ca-certificates
COPY --from=build-env /go/bin/chihaya /chihaya
RUN adduser -D chihaya
# Expose a docker interface to our binary.
EXPOSE 6880 6881
ENTRYPOINT ["chihaya"]
EXPOSE 6880 6969
# Drop root privileges
USER chihaya
ENTRYPOINT ["/chihaya"]

18
LICENSE
View file

@ -1,3 +1,21 @@
The MIT License (MIT)
Copyright (c) 2015-2022 LBRY Inc
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the
following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Chihaya is released under a BSD 2-Clause license, reproduced below.
Copyright (c) 2015, The Chihaya Authors

233
README.md
View file

@ -1,123 +1,146 @@
# Chihaya
# LBRY Tracker
[![Build Status](https://api.travis-ci.org/chihaya/chihaya.svg?branch=master)](https://travis-ci.org/chihaya/chihaya)
[![Docker Repository on Quay.io](https://quay.io/repository/jzelinskie/chihaya/status "Docker Repository on Quay.io")](https://quay.io/repository/jzelinskie/chihaya)
[![Go Report Card](https://goreportcard.com/badge/github.com/chihaya/chihaya)](https://goreportcard.com/report/github.com/chihaya/chihaya)
[![GoDoc](https://godoc.org/github.com/chihaya/chihaya?status.svg)](https://godoc.org/github.com/chihaya/chihaya)
[![License](https://img.shields.io/badge/license-BSD-blue.svg)](https://en.wikipedia.org/wiki/BSD_licenses#2-clause_license_.28.22Simplified_BSD_License.22_or_.22FreeBSD_License.22.29)
[![IRC Channel](https://img.shields.io/badge/freenode-%23chihaya-blue.svg "IRC Channel")](http://webchat.freenode.net/?channels=chihaya)
The LBRY tracker is a server that helps peers find each other. It was forked from [Chihaya](https://github.com/chihaya/chihaya), an open-source [BitTorrent tracker](https://en.wikipedia.org/wiki/BitTorrent_tracker).
**Note:** The master branch may be in an unstable or even broken state during development.
Please use [releases] instead of the master branch in order to get stable binaries.
Chihaya is an open source [BitTorrent tracker] written in [Go].
## Installation and Usage
Differentiating features include:
### Building from HEAD
- Protocol-agnostic middleware
- HTTP and UDP frontends
- IPv4 and IPv6 support
- [YAML] configuration
- Metrics via [Prometheus]
[releases]: https://github.com/chihaya/chihaya/releases
[BitTorrent tracker]: http://en.wikipedia.org/wiki/BitTorrent_tracker
[Go]: https://golang.org
[YAML]: http://yaml.org
[Prometheus]: http://prometheus.io
## Why Chihaya?
Chihaya is built for developers looking to integrate BitTorrent into a preexisting production environment.
Chihaya's pluggable architecture and middleware framework offers a simple and flexible integration point that abstracts the BitTorrent tracker protocols.
The most common use case for Chihaya is integration with the deployment of cloud software.
[OpenBittorrent]: https://openbittorrent.com
### Production Use
#### Facebook
[Facebook] uses BitTorrent to deploy new versions of their software.
In order to optimize the flow of traffic within their datacenters, Chihaya is configured to prefer peers within the same subnet.
Because Facebook organizes their network such that server racks are allocated IP addresses in the same subnet, the vast majority of deployment traffic never impacts the congested areas of their network.
[Facebook]: https://facebook.com
#### CoreOS
[Quay] is a container registry that offers the ability to download containers via BitTorrent in order to speed up large or geographically distant deployments.
Announce URLs from Quay's torrent files contain a [JWT] in order to allow Chihaya to verify that an infohash was approved by the registry.
By verifying the infohash, Quay can be sure that only their content is being shared by their tracker.
[Quay]: https://quay.io
[JWT]: https://jwt.io
## Development
### Getting Started
In order to compile the project, the [latest stable version of Go] and a [working Go environment] are required.
In order to compile the project, the [latest stable version of Go] and knowledge of a [working Go environment] are required.
```sh
$ go get -t -u github.com/chihaya/chihaya
$ go install github.com/chihaya/chihaya/cmd/chihaya
git clone git@github.com:lbryio/tracker.git
cd tracker
go build ./cmd/chihaya
./chihaya --help
```
[latest stable version of Go]: https://golang.org/dl
[working Go environment]: https://golang.org/doc/code.html
### Contributing
### Testing
Long-term discussion and bug reports are maintained via [GitHub Issues].
Code review is done via [GitHub Pull Requests].
Real-time discussion is done via [freenode IRC].
The following will run all tests and benchmarks.
Removing `-bench` will just run unit tests.
For more information read [CONTRIBUTING.md].
[GitHub Issues]: https://github.com/chihaya/chihaya/issues
[GitHub Pull Requests]: https://github.com/chihaya/chihaya/pulls
[freenode IRC]: http://webchat.freenode.net/?channels=chihaya
[CONTRIBUTING.md]: https://github.com/chihaya/chihaya/blob/master/CONTRIBUTING.md
### Architecture
```
+----------------------+
| BitTorrent Client |<--------------+
+----------------------+ |
| |
| |
| |
+------------v--------------------------+-------------------+-------------------------+
|+----------------------+ +----------------------+frontend| chihaya|
|| Parser | | Writer | | |
|+----------------------+ +----------------------+ | |
| | ^ | |
+------------+--------------------------+-------------------+ |
+------------v--------------------------+-------------------+ |
|+----------------------+ +----------------------+ logic| |
|| PreHook Middleware |-->| Response Generator |<-------|-------------+ |
|+----------------------+ +----------------------+ | | |
| | | |
|+----------------------+ | +----------------------+|
|| PostHook Middleware |-----------------------------------|>| Storage ||
|+----------------------+ | +----------------------+|
| | |
+-----------------------------------------------------------+-------------------------+
```sh
go test -bench $(go list ./...)
```
BitTorrent clients send Announce and Scrape requests to a _Frontend_.
Frontends parse requests and write responses for the particular protocol they implement.
The _TrackerLogic_ interface to is used to generate responses for their requests and optionally perform a task after responding to a client.
A configurable chain of _PreHook_ and _PostHook_ middleware is used to construct an instance of TrackerLogic.
PreHooks are middleware that are executed before the response has been written.
After all PreHooks have executed, any missing response fields that are required are filled by reading out of the configured implementation of the _Storage_ interface.
PostHooks are asynchronous tasks that occur after a response has been delivered to the client.
Request data is written to the storage asynchronously in one of these PostHooks.
The tracker executable contains a command to end-to-end test a BitTorrent tracker.
See
## Related projects
```sh
tracker --help
```
- [BitTorrent.org](https://github.com/bittorrent/bittorrent.org): a static website containing the BitTorrent spec and all BEPs
- [OpenTracker](http://erdgeist.org/arts/software/opentracker): a popular BitTorrent tracker written in C
- [Ocelot](https://github.com/WhatCD/Ocelot): a private BitTorrent tracker written in C++
### Configuration
Configuration of the tracker is done via one YAML configuration file.
The `dist/` directory contains an example configuration file.
Files and directories under `docs/` contain detailed information about configuring middleware, storage implementations, architecture etc.
This is an example for an UDP server running on 9252 with metrics enabled. Remember to **change the private key** to some random string.
```
---
chihaya:
announce_interval: "30m"
min_announce_interval: "15m"
metrics_addr: "0.0.0.0:6880"
udp:
addr: "0.0.0.0:9252"
max_clock_skew: "10s"
private_key: ">>>>CHANGE THIS TO SOME RANDOM THING<<<<"
enable_request_timing: false
allow_ip_spoofing: false
max_numwant: 100
default_numwant: 50
max_scrape_infohashes: 50
storage:
name: "memory"
config:
gc_interval: "3m"
peer_lifetime: "31m"
shard_count: 1024
prometheus_reporting_interval: "1s"
```
# Running from Docker
This section assumes `docker` and `docker-compose` to be installed on a Linux distro. Please check official docs on how to install [Docker Engine](https://docs.docker.com/engine/install/) and [Docker Compose](https://docs.docker.com/compose/install/).
## Docker Compose from lbry/tracker
In order to define a tracker service and let Docker Compose manage it, create a file named `docker-compose.yml` with:
```
version: "3"
services:
tracker:
image: lbry/tracker
command: --config /config/conf.yml
volumes:
- .:/config
network_mode: host
restart: always
```
Unfortunately the tracker does not work without `network_mode: host` due some bug with UDP on Docker. In this mode, firewall configuration needs to be done manually. If using `ufw`, try `ufw allow 9252`.
Now, move the configuration to the same directory as `docker-compose.yml`, naming it `conf.yml`. If it is not ready, check the configuration section above.
Start the tracker by running the following in the same directory as the compose file:
`docker-compose up -d`
Logs can be read with:
`docker-compose logs`
To stop:
`docker-compose down`
## Building the containter
A Dockerfile is provided within the repo. To build the container locally, run this command on the same directory the repo was cloned:
`sudo docker build -f Dockerfile . -t some_name/tracker:latest`
It will produce an image called `some_name/tracker`, which can be used in the Docker Compose section.
# Running from source as a service
For ease of maintenance, it is recommended to run the tracker as a service.
This is an example for running it under as the current user using `systemd`:
```
[Unit]
Description=Chihaya BT tracker
After=network.target
[Service]
Type=simple
#User=chihaya
#Group=chihaya
WorkingDirectory=/home/user/github/tracker
ExecStart=/home/user/github/tracker/chihaya --config dist/example_config.yaml
Restart=on-failure
[Install]
WantedBy=multi-user.target
```
To try it, change `/home/user/github/tracker` to where the code was cloned and run:
```bash=
mkdir -p ~/.config/systemd/user
# PASTE FILE IN ~/.config/systemd/user/tracker.service
systemctl --user enable tracker
systemctl --user start tracker
systemctl --user status tracker
```
## Contributing
Contributions to this project are welcome, encouraged, and compensated. For more details, please check [this](https://lbry.tech/contribute) link.
## License
LBRY's code changes are MIT licensed, and the upstream Chihaya code is licensed under a BSD 2-Clause license. For the full license, see [LICENSE](LICENSE).
## Security
We take security seriously. Please contact security@lbry.com regarding any security issues. [Our PGP key is here](https://lbry.com/faq/pgp-key) if you need it.
## Contact
The primary contact for this project is [@shyba](mailto:vshyba@lbry.com).

View file

@ -4,8 +4,11 @@
package bittorrent
import (
"fmt"
"net"
"time"
"github.com/chihaya/chihaya/pkg/log"
)
// PeerID represents a peer ID.
@ -24,6 +27,16 @@ func PeerIDFromBytes(b []byte) PeerID {
return PeerID(buf)
}
// String implements fmt.Stringer, returning the base16 encoded PeerID.
func (p PeerID) String() string {
return fmt.Sprintf("%x", p[:])
}
// RawString returns a 20-byte string of the raw bytes of the ID.
func (p PeerID) RawString() string {
return string(p[:])
}
// PeerIDFromString creates a PeerID from a string.
//
// It panics if s is not 20 bytes long.
@ -66,11 +79,24 @@ func InfoHashFromString(s string) InfoHash {
return InfoHash(buf)
}
// String implements fmt.Stringer, returning the base16 encoded InfoHash.
func (i InfoHash) String() string {
return fmt.Sprintf("%x", i[:])
}
// RawString returns a 20-byte string of the raw bytes of the InfoHash.
func (i InfoHash) RawString() string {
return string(i[:])
}
// AnnounceRequest represents the parsed parameters from an announce request.
type AnnounceRequest struct {
Event Event
InfoHash InfoHash
Compact bool
EventProvided bool
NumWantProvided bool
IPProvided bool
NumWant uint32
Left uint64
Downloaded uint64
@ -80,6 +106,24 @@ type AnnounceRequest struct {
Params
}
// LogFields renders the current response as a set of log fields.
func (r AnnounceRequest) LogFields() log.Fields {
return log.Fields{
"event": r.Event,
"infoHash": r.InfoHash,
"compact": r.Compact,
"eventProvided": r.EventProvided,
"numWantProvided": r.NumWantProvided,
"ipProvided": r.IPProvided,
"numWant": r.NumWant,
"left": r.Left,
"downloaded": r.Downloaded,
"uploaded": r.Uploaded,
"peer": r.Peer,
"params": r.Params,
}
}
// AnnounceResponse represents the parameters used to create an announce
// response.
type AnnounceResponse struct {
@ -92,37 +136,116 @@ type AnnounceResponse struct {
IPv6Peers []Peer
}
// LogFields renders the current response as a set of log fields.
func (r AnnounceResponse) LogFields() log.Fields {
return log.Fields{
"compact": r.Compact,
"complete": r.Complete,
"interval": r.Interval,
"minInterval": r.MinInterval,
"ipv4Peers": r.IPv4Peers,
"ipv6Peers": r.IPv6Peers,
}
}
// ScrapeRequest represents the parsed parameters from a scrape request.
type ScrapeRequest struct {
AddressFamily AddressFamily
InfoHashes []InfoHash
Params Params
}
// LogFields renders the current response as a set of log fields.
func (r ScrapeRequest) LogFields() log.Fields {
return log.Fields{
"addressFamily": r.AddressFamily,
"infoHashes": r.InfoHashes,
"params": r.Params,
}
}
// ScrapeResponse represents the parameters used to create a scrape response.
//
// The Scrapes must be in the same order as the InfoHashes in the corresponding
// ScrapeRequest.
type ScrapeResponse struct {
Files map[InfoHash]Scrape
Files []Scrape
}
// LogFields renders the current response as a set of Logrus fields.
func (sr ScrapeResponse) LogFields() log.Fields {
return log.Fields{
"files": sr.Files,
}
}
// Scrape represents the state of a swarm that is returned in a scrape response.
type Scrape struct {
InfoHash InfoHash
Snatches uint32
Complete uint32
Incomplete uint32
}
// AddressFamily is the address family of an IP address.
type AddressFamily uint8
func (af AddressFamily) String() string {
switch af {
case IPv4:
return "IPv4"
case IPv6:
return "IPv6"
default:
panic("tried to print unknown AddressFamily")
}
}
// AddressFamily constants.
const (
IPv4 AddressFamily = iota
IPv6
)
// IP is a net.IP with an AddressFamily.
type IP struct {
net.IP
AddressFamily
}
func (ip IP) String() string {
return ip.IP.String()
}
// Peer represents the connection details of a peer that is returned in an
// announce response.
type Peer struct {
ID PeerID
IP net.IP
IP IP
Port uint16
}
// String implements fmt.Stringer to return a human-readable representation.
// The string will have the format <PeerID>@[<IP>]:<port>, for example
// "0102030405060708090a0b0c0d0e0f1011121314@[10.11.12.13]:1234"
func (p Peer) String() string {
return fmt.Sprintf("%s@[%s]:%d", p.ID.String(), p.IP.String(), p.Port)
}
// LogFields renders the current peer as a set of Logrus fields.
func (p Peer) LogFields() log.Fields {
return log.Fields{
"ID": p.ID,
"IP": p.IP,
"port": p.Port,
}
}
// Equal reports whether p and x are the same.
func (p Peer) Equal(x Peer) bool { return p.EqualEndpoint(x) && p.ID == x.ID }
// EqualEndpoint reports whether p and x have the same endpoint.
func (p Peer) EqualEndpoint(x Peer) bool { return p.Port == x.Port && p.IP.Equal(x.IP) }
func (p Peer) EqualEndpoint(x Peer) bool { return p.Port == x.Port && p.IP.Equal(x.IP.IP) }
// ClientError represents an error that should be exposed to the client over
// the BitTorrent protocol implementation.

View file

@ -0,0 +1,53 @@
package bittorrent
import (
"fmt"
"net"
"testing"
"github.com/stretchr/testify/require"
)
var (
b = []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20}
expected = "0102030405060708090a0b0c0d0e0f1011121314"
)
var peerStringTestCases = []struct {
input Peer
expected string
}{
{
input: Peer{
ID: PeerIDFromBytes(b),
IP: IP{net.IPv4(10, 11, 12, 1), IPv4},
Port: 1234,
},
expected: fmt.Sprintf("%s@[10.11.12.1]:1234", expected),
},
{
input: Peer{
ID: PeerIDFromBytes(b),
IP: IP{net.ParseIP("2001:db8::ff00:42:8329"), IPv6},
Port: 1234,
},
expected: fmt.Sprintf("%s@[2001:db8::ff00:42:8329]:1234", expected),
},
}
func TestPeerID_String(t *testing.T) {
s := PeerIDFromBytes(b).String()
require.Equal(t, expected, s)
}
func TestInfoHash_String(t *testing.T) {
s := InfoHashFromBytes(b).String()
require.Equal(t, expected, s)
}
func TestPeer_String(t *testing.T) {
for _, c := range peerStringTestCases {
got := c.input.String()
require.Equal(t, c.expected, got)
}
}

View file

@ -5,7 +5,7 @@ import (
)
func TestClientID(t *testing.T) {
var clientTable = []struct{ peerID, clientID string }{
clientTable := []struct{ peerID, clientID string }{
{"-AZ3034-6wfG2wk6wWLc", "AZ3034"},
{"-AZ3042-6ozMq5q6Q3NX", "AZ3042"},
{"-BS5820-oy4La2MWGEFj", "BS5820"},
@ -43,11 +43,13 @@ func TestClientID(t *testing.T) {
}
for _, tt := range clientTable {
t.Run(tt.peerID, func(t *testing.T) {
var clientID ClientID
copy(clientID[:], []byte(tt.clientID))
parsedID := NewClientID(PeerIDFromString(tt.peerID))
if parsedID != clientID {
t.Error("Incorrectly parsed peer ID", tt.peerID, "as", parsedID)
}
})
}
}

View file

@ -1,13 +1,14 @@
package bittorrent
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestNew(t *testing.T) {
var table = []struct {
table := []struct {
data string
expected Event
expectedErr error
@ -22,8 +23,17 @@ func TestNew(t *testing.T) {
}
for _, tt := range table {
t.Run(fmt.Sprintf("%#v expecting %s", tt.data, nilPrinter(tt.expectedErr)), func(t *testing.T) {
got, err := NewEvent(tt.data)
assert.Equal(t, err, tt.expectedErr, "errors should equal the expected value")
assert.Equal(t, got, tt.expected, "events should equal the expected value")
require.Equal(t, err, tt.expectedErr, "errors should equal the expected value")
require.Equal(t, got, tt.expected, "events should equal the expected value")
})
}
}
func nilPrinter(err error) string {
if err == nil {
return "nil"
}
return err.Error()
}

View file

@ -5,6 +5,8 @@ import (
"net/url"
"strconv"
"strings"
"github.com/chihaya/chihaya/pkg/log"
)
// Params is used to fetch (optional) request parameters from an Announce.
@ -39,6 +41,10 @@ var ErrKeyNotFound = errors.New("query: value for the provided key does not exis
// with invalid length.
var ErrInvalidInfohash = ClientError("provided invalid infohash")
// ErrInvalidQueryEscape is returned when a query string contains invalid
// escapes.
var ErrInvalidQueryEscape = ClientError("invalid query escape")
// QueryParams parses a URL Query and implements the Params interface with some
// additional helpers.
type QueryParams struct {
@ -48,6 +54,37 @@ type QueryParams struct {
infoHashes []InfoHash
}
type routeParamsKey struct{}
// RouteParamsKey is a key for the context of a request that
// contains the named parameters from the http router.
var RouteParamsKey = routeParamsKey{}
// RouteParam is a type that contains the values from the named parameters
// on the route.
type RouteParam struct {
Key string
Value string
}
// RouteParams is a collection of RouteParam instances.
type RouteParams []RouteParam
// ByName returns the value of the first RouteParam that matches the given
// name. If no matching RouteParam is found, an empty string is returned.
// In the event that a "catch-all" parameter is provided on the route and
// no value is matched, an empty string is returned. For example: a route of
// "/announce/*param" matches on "/announce/". However, ByName("param") will
// return an empty string.
func (rp RouteParams) ByName(name string) string {
for _, p := range rp {
if p.Key == name {
return p.Value
}
}
return ""
}
// ParseURLData parses a request URL or UDP URLData as defined in BEP41.
// It expects a concatenated string of the request's path and query parts as
// defined in RFC 3986. As both the udp: and http: scheme used by BitTorrent
@ -63,6 +100,10 @@ type QueryParams struct {
// parse each value as an InfoHash and return an error if parsing fails. All
// InfoHashes are collected and can later be retrieved by calling the InfoHashes
// method.
//
// Also note that any error that is encountered during parsing is returned as a
// ClientError, as this method is expected to be used to parse client-provided
// data.
func ParseURLData(urlData string) (*QueryParams, error) {
var path, query string
@ -76,7 +117,7 @@ func ParseURLData(urlData string) (*QueryParams, error) {
q, err := parseQuery(query)
if err != nil {
return nil, err
return nil, ClientError(err.Error())
}
q.path = path
return q, nil
@ -84,69 +125,55 @@ func ParseURLData(urlData string) (*QueryParams, error) {
// parseQuery parses a URL query into QueryParams.
// The query is expected to exclude the delimiting '?'.
func parseQuery(rawQuery string) (*QueryParams, error) {
var (
keyStart, keyEnd int
valStart, valEnd int
onKey = true
func parseQuery(query string) (q *QueryParams, err error) {
// This is basically url.parseQuery, but with a map[string]string
// instead of map[string][]string for the values.
q = &QueryParams{
query: rawQuery,
query: query,
infoHashes: nil,
params: make(map[string]string),
}
)
for i, length := 0, len(rawQuery); i < length; i++ {
separator := rawQuery[i] == '&' || rawQuery[i] == ';'
last := i == length-1
if separator || last {
if onKey && !last {
keyStart = i + 1
for query != "" {
key := query
if i := strings.IndexAny(key, "&;"); i >= 0 {
key, query = key[:i], key[i+1:]
} else {
query = ""
}
if key == "" {
continue
}
if last && !separator && !onKey {
valEnd = i
value := ""
if i := strings.Index(key, "="); i >= 0 {
key, value = key[:i], key[i+1:]
}
keyStr, err := url.QueryUnescape(rawQuery[keyStart : keyEnd+1])
key, err = url.QueryUnescape(key)
if err != nil {
return nil, err
// QueryUnescape returns an error like "invalid escape: '%x'".
// But frontends record these errors to prometheus, which generates
// a lot of time series.
// We log it here for debugging instead.
log.Debug("failed to unescape query param key", log.Err(err))
return nil, ErrInvalidQueryEscape
}
var valStr string
if valEnd > 0 {
valStr, err = url.QueryUnescape(rawQuery[valStart : valEnd+1])
value, err = url.QueryUnescape(value)
if err != nil {
return nil, err
}
// QueryUnescape returns an error like "invalid escape: '%x'".
// But frontends record these errors to prometheus, which generates
// a lot of time series.
// We log it here for debugging instead.
log.Debug("failed to unescape query param value", log.Err(err))
return nil, ErrInvalidQueryEscape
}
if keyStr == "info_hash" {
if len(valStr) != 20 {
if key == "info_hash" {
if len(value) != 20 {
return nil, ErrInvalidInfohash
}
q.infoHashes = append(q.infoHashes, InfoHashFromString(valStr))
q.infoHashes = append(q.infoHashes, InfoHashFromString(value))
} else {
q.params[strings.ToLower(keyStr)] = valStr
}
valEnd = 0
onKey = true
keyStart = i + 1
} else if rawQuery[i] == '=' {
onKey = false
valStart = i + 1
valEnd = 0
} else if onKey {
keyEnd = i
} else {
valEnd = i
q.params[strings.ToLower(key)] = value
}
}
@ -160,15 +187,15 @@ func (qp *QueryParams) String(key string) (string, bool) {
return value, ok
}
// Uint64 returns a uint parsed from a query. After being called, it is safe to
// Uint returns a uint parsed from a query. After being called, it is safe to
// cast the uint64 to your desired length.
func (qp *QueryParams) Uint64(key string) (uint64, error) {
func (qp *QueryParams) Uint(key string, bitSize int) (uint64, error) {
str, exists := qp.params[key]
if !exists {
return 0, ErrKeyNotFound
}
val, err := strconv.ParseUint(str, 10, 64)
val, err := strconv.ParseUint(str, 10, bitSize)
if err != nil {
return 0, err
}

View file

@ -27,6 +27,12 @@ var (
InvalidQueries = []string{
"/announce?" + "info_hash=%0%a",
}
// See https://github.com/chihaya/chihaya/issues/334.
shouldNotPanicQueries = []string{
"/annnounce?" + "info_hash=" + testPeerID + "&a",
"/annnounce?" + "info_hash=" + testPeerID + "&=b?",
}
)
func mapArrayEqual(boxed map[string][]string, unboxed map[string]string) bool {
@ -84,26 +90,40 @@ func TestParseInvalidURLData(t *testing.T) {
}
}
func BenchmarkParseQuery(b *testing.B) {
for bCount := 0; bCount < b.N; bCount++ {
for parseIndex, parseStr := range ValidAnnounceArguments {
parsedQueryObj, err := parseQuery(parseStr.Encode())
if err != nil {
b.Error(err, parseIndex)
b.Log(parsedQueryObj)
func TestParseShouldNotPanicURLData(t *testing.T) {
for _, parseStr := range shouldNotPanicQueries {
_, _ = ParseURLData(parseStr)
}
}
func BenchmarkParseQuery(b *testing.B) {
announceStrings := make([]string, 0)
for i := range ValidAnnounceArguments {
announceStrings = append(announceStrings, ValidAnnounceArguments[i].Encode())
}
b.ResetTimer()
for bCount := 0; bCount < b.N; bCount++ {
i := bCount % len(announceStrings)
parsedQueryObj, err := parseQuery(announceStrings[i])
if err != nil {
b.Error(err, i)
b.Log(parsedQueryObj)
}
}
}
func BenchmarkURLParseQuery(b *testing.B) {
announceStrings := make([]string, 0)
for i := range ValidAnnounceArguments {
announceStrings = append(announceStrings, ValidAnnounceArguments[i].Encode())
}
b.ResetTimer()
for bCount := 0; bCount < b.N; bCount++ {
for parseIndex, parseStr := range ValidAnnounceArguments {
parsedQueryObj, err := url.ParseQuery(parseStr.Encode())
i := bCount % len(announceStrings)
parsedQueryObj, err := url.ParseQuery(announceStrings[i])
if err != nil {
b.Error(err, parseIndex)
b.Error(err, i)
b.Log(parsedQueryObj)
}
}
}
}

49
bittorrent/sanitize.go Normal file
View file

@ -0,0 +1,49 @@
package bittorrent
import (
"net"
"github.com/chihaya/chihaya/pkg/log"
)
// ErrInvalidIP indicates an invalid IP for an Announce.
var ErrInvalidIP = ClientError("invalid IP")
// SanitizeAnnounce enforces a max and default NumWant and coerces the peer's
// IP address into the proper format.
func SanitizeAnnounce(r *AnnounceRequest, maxNumWant, defaultNumWant uint32) error {
if !r.NumWantProvided {
r.NumWant = defaultNumWant
} else if r.NumWant > maxNumWant {
r.NumWant = maxNumWant
}
if ip := r.Peer.IP.To4(); ip != nil {
r.Peer.IP.IP = ip
r.Peer.IP.AddressFamily = IPv4
} else if len(r.Peer.IP.IP) == net.IPv6len { // implies r.Peer.IP.To4() == nil
r.Peer.IP.AddressFamily = IPv6
} else {
return ErrInvalidIP
}
log.Debug("sanitized announce", r, log.Fields{
"maxNumWant": maxNumWant,
"defaultNumWant": defaultNumWant,
})
return nil
}
// SanitizeScrape enforces a max number of infohashes for a single scrape
// request.
func SanitizeScrape(r *ScrapeRequest, maxScrapeInfoHashes uint32) error {
if len(r.InfoHashes) > int(maxScrapeInfoHashes) {
r.InfoHashes = r.InfoHashes[:maxScrapeInfoHashes]
}
log.Debug("sanitized scrape", r, log.Fields{
"maxScrapeInfoHashes": maxScrapeInfoHashes,
})
return nil
}

View file

@ -5,32 +5,61 @@ import (
"io/ioutil"
"os"
"gopkg.in/yaml.v2"
yaml "gopkg.in/yaml.v2"
httpfrontend "github.com/chihaya/chihaya/frontend/http"
udpfrontend "github.com/chihaya/chihaya/frontend/udp"
"github.com/chihaya/chihaya/frontend/http"
"github.com/chihaya/chihaya/frontend/udp"
"github.com/chihaya/chihaya/middleware"
"github.com/chihaya/chihaya/middleware/clientapproval"
"github.com/chihaya/chihaya/middleware/jwt"
"github.com/chihaya/chihaya/storage/memory"
// Imports to register middleware drivers.
_ "github.com/chihaya/chihaya/middleware/clientapproval"
_ "github.com/chihaya/chihaya/middleware/fixedpeer"
_ "github.com/chihaya/chihaya/middleware/jwt"
_ "github.com/chihaya/chihaya/middleware/torrentapproval"
_ "github.com/chihaya/chihaya/middleware/varinterval"
// Imports to register storage drivers.
_ "github.com/chihaya/chihaya/storage/memory"
_ "github.com/chihaya/chihaya/storage/redis"
)
type hookConfig struct {
type storageConfig struct {
Name string `yaml:"name"`
Config interface{} `yaml:"config"`
}
// Config represents the configuration used for executing Chihaya.
type Config struct {
middleware.ResponseConfig `yaml:",inline"`
MetricsAddr string `yaml:"metrics_addr"`
HTTPConfig http.Config `yaml:"http"`
UDPConfig udp.Config `yaml:"udp"`
Storage storageConfig `yaml:"storage"`
PreHooks []middleware.HookConfig `yaml:"prehooks"`
PostHooks []middleware.HookConfig `yaml:"posthooks"`
}
// PreHookNames returns only the names of the configured middleware.
func (cfg Config) PreHookNames() (names []string) {
for _, hook := range cfg.PreHooks {
names = append(names, hook.Name)
}
return
}
// PostHookNames returns only the names of the configured middleware.
func (cfg Config) PostHookNames() (names []string) {
for _, hook := range cfg.PostHooks {
names = append(names, hook.Name)
}
return
}
// ConfigFile represents a namespaced YAML configation file.
type ConfigFile struct {
MainConfigBlock struct {
middleware.Config `yaml:",inline"`
PrometheusAddr string `yaml:"prometheus_addr"`
HTTPConfig httpfrontend.Config `yaml:"http"`
UDPConfig udpfrontend.Config `yaml:"udp"`
Storage memory.Config `yaml:"storage"`
PreHooks []hookConfig `yaml:"prehooks"`
PostHooks []hookConfig `yaml:"posthooks"`
} `yaml:"chihaya"`
Chihaya Config `yaml:"chihaya"`
}
// ParseConfigFile returns a new ConfigFile given the path to a YAML
@ -61,46 +90,3 @@ func ParseConfigFile(path string) (*ConfigFile, error) {
return &cfgFile, nil
}
// CreateHooks creates instances of Hooks for all of the PreHooks and PostHooks
// configured in a ConfigFile.
func (cfg ConfigFile) CreateHooks() (preHooks, postHooks []middleware.Hook, err error) {
for _, hookCfg := range cfg.MainConfigBlock.PreHooks {
cfgBytes, err := yaml.Marshal(hookCfg.Config)
if err != nil {
panic("failed to remarshal valid YAML")
}
switch hookCfg.Name {
case "jwt":
var jwtCfg jwt.Config
err := yaml.Unmarshal(cfgBytes, &jwtCfg)
if err != nil {
return nil, nil, errors.New("invalid JWT middleware config: " + err.Error())
}
hook, err := jwt.NewHook(jwtCfg)
if err != nil {
return nil, nil, errors.New("invalid JWT middleware config: " + err.Error())
}
preHooks = append(preHooks, hook)
case "client approval":
var caCfg clientapproval.Config
err := yaml.Unmarshal(cfgBytes, &caCfg)
if err != nil {
return nil, nil, errors.New("invalid client approval middleware config: " + err.Error())
}
hook, err := clientapproval.NewHook(caCfg)
if err != nil {
return nil, nil, errors.New("invalid client approval middleware config: " + err.Error())
}
preHooks = append(preHooks, hook)
}
}
for _, hookCfg := range cfg.MainConfigBlock.PostHooks {
switch hookCfg.Name {
}
}
return
}

134
cmd/chihaya/e2e.go Normal file
View file

@ -0,0 +1,134 @@
package main
import (
"crypto/rand"
"fmt"
"time"
"github.com/anacrolix/torrent/tracker"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/chihaya/chihaya/bittorrent"
"github.com/chihaya/chihaya/pkg/log"
)
// EndToEndRunCmdFunc implements a Cobra command that runs the end-to-end test
// suite for a Chihaya build.
func EndToEndRunCmdFunc(cmd *cobra.Command, args []string) error {
delay, err := cmd.Flags().GetDuration("delay")
if err != nil {
return err
}
// Test the HTTP tracker
httpAddr, err := cmd.Flags().GetString("httpaddr")
if err != nil {
return err
}
if len(httpAddr) != 0 {
log.Info("testing HTTP...")
err := test(httpAddr, delay)
if err != nil {
return err
}
log.Info("success")
}
// Test the UDP tracker.
udpAddr, err := cmd.Flags().GetString("udpaddr")
if err != nil {
return err
}
if len(udpAddr) != 0 {
log.Info("testing UDP...")
err := test(udpAddr, delay)
if err != nil {
return err
}
log.Info("success")
}
return nil
}
func generateInfohash() [20]byte {
b := make([]byte, 20)
n, err := rand.Read(b)
if err != nil {
panic(err)
}
if n != 20 {
panic(fmt.Errorf("not enough randomness? Got %d bytes", n))
}
return [20]byte(bittorrent.InfoHashFromBytes(b))
}
func test(addr string, delay time.Duration) error {
ih := generateInfohash()
return testWithInfohash(ih, addr, delay)
}
func testWithInfohash(infoHash [20]byte, url string, delay time.Duration) error {
req := tracker.AnnounceRequest{
InfoHash: infoHash,
PeerId: [20]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20},
Downloaded: 50,
Left: 100,
Uploaded: 50,
Event: tracker.Started,
IPAddress: uint32(50<<24 | 10<<16 | 12<<8 | 1),
NumWant: 50,
Port: 10001,
}
resp, err := tracker.Announce{
TrackerUrl: url,
Request: req,
UserAgent: "chihaya-e2e",
}.Do()
if err != nil {
return errors.Wrap(err, "announce failed")
}
if len(resp.Peers) != 1 {
return fmt.Errorf("expected one peer, got %d", len(resp.Peers))
}
time.Sleep(delay)
req = tracker.AnnounceRequest{
InfoHash: infoHash,
PeerId: [20]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21},
Downloaded: 50,
Left: 100,
Uploaded: 50,
Event: tracker.Started,
IPAddress: uint32(50<<24 | 10<<16 | 12<<8 | 2),
NumWant: 50,
Port: 10002,
}
resp, err = tracker.Announce{
TrackerUrl: url,
Request: req,
UserAgent: "chihaya-e2e",
}.Do()
if err != nil {
return errors.Wrap(err, "announce failed")
}
if len(resp.Peers) != 1 {
return fmt.Errorf("expected 1 peers, got %d", len(resp.Peers))
}
if resp.Peers[0].Port != 10001 {
return fmt.Errorf("expected port 10001, got %d ", resp.Peers[0].Port)
}
return nil
}

View file

@ -1,166 +1,246 @@
package main
import (
"context"
"errors"
"net/http"
"os"
"os/signal"
"runtime/pprof"
"runtime"
"strings"
"syscall"
"time"
log "github.com/Sirupsen/logrus"
"github.com/prometheus/client_golang/prometheus"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
httpfrontend "github.com/chihaya/chihaya/frontend/http"
udpfrontend "github.com/chihaya/chihaya/frontend/udp"
"github.com/chihaya/chihaya/frontend/http"
"github.com/chihaya/chihaya/frontend/udp"
"github.com/chihaya/chihaya/middleware"
"github.com/chihaya/chihaya/storage/memory"
"github.com/chihaya/chihaya/pkg/log"
"github.com/chihaya/chihaya/pkg/metrics"
"github.com/chihaya/chihaya/pkg/stop"
"github.com/chihaya/chihaya/storage"
)
func rootCmdRun(cmd *cobra.Command, args []string) error {
debugLog, _ := cmd.Flags().GetBool("debug")
if debugLog {
log.SetLevel(log.DebugLevel)
log.Debugln("debug logging enabled")
}
cpuProfilePath, _ := cmd.Flags().GetString("cpuprofile")
if cpuProfilePath != "" {
log.Infoln("enabled CPU profiling to", cpuProfilePath)
f, err := os.Create(cpuProfilePath)
if err != nil {
return err
}
pprof.StartCPUProfile(f)
defer pprof.StopCPUProfile()
// Run represents the state of a running instance of Chihaya.
type Run struct {
configFilePath string
peerStore storage.PeerStore
logic *middleware.Logic
sg *stop.Group
}
configFilePath, _ := cmd.Flags().GetString("config")
configFile, err := ParseConfigFile(configFilePath)
// NewRun runs an instance of Chihaya.
func NewRun(configFilePath string) (*Run, error) {
r := &Run{
configFilePath: configFilePath,
}
return r, r.Start(nil)
}
// Start begins an instance of Chihaya.
// It is optional to provide an instance of the peer store to avoid the
// creation of a new one.
func (r *Run) Start(ps storage.PeerStore) error {
configFile, err := ParseConfigFile(r.configFilePath)
if err != nil {
return errors.New("failed to read config: " + err.Error())
}
cfg := configFile.MainConfigBlock
cfg := configFile.Chihaya
go func() {
promServer := http.Server{
Addr: cfg.PrometheusAddr,
Handler: prometheus.Handler(),
}
log.Infoln("started serving prometheus stats on", cfg.PrometheusAddr)
if err := promServer.ListenAndServe(); err != nil {
log.Fatalln("failed to start prometheus server:", err.Error())
}
}()
r.sg = stop.NewGroup()
// Force the compiler to enforce memory against the storage interface.
peerStore, err := memory.New(cfg.Storage)
log.Info("starting metrics server", log.Fields{"addr": cfg.MetricsAddr})
r.sg.Add(metrics.NewServer(cfg.MetricsAddr))
if ps == nil {
log.Info("starting storage", log.Fields{"name": cfg.Storage.Name})
ps, err = storage.NewPeerStore(cfg.Storage.Name, cfg.Storage.Config)
if err != nil {
return errors.New("failed to create memory storage: " + err.Error())
return errors.New("failed to create storage: " + err.Error())
}
log.Info("started storage", ps)
}
r.peerStore = ps
preHooks, postHooks, err := configFile.CreateHooks()
preHooks, err := middleware.HooksFromHookConfigs(cfg.PreHooks)
if err != nil {
return errors.New("failed to create hooks: " + err.Error())
return errors.New("failed to validate hook config: " + err.Error())
}
logic := middleware.NewLogic(cfg.Config, peerStore, preHooks, postHooks)
postHooks, err := middleware.HooksFromHookConfigs(cfg.PostHooks)
if err != nil {
return errors.New("failed to create TrackerLogic: " + err.Error())
return errors.New("failed to validate hook config: " + err.Error())
}
shutdown := make(chan struct{})
errChan := make(chan error)
var httpFrontend *httpfrontend.Frontend
var udpFrontend *udpfrontend.Frontend
log.Info("starting tracker logic", log.Fields{
"prehooks": cfg.PreHookNames(),
"posthooks": cfg.PostHookNames(),
})
r.logic = middleware.NewLogic(cfg.ResponseConfig, r.peerStore, preHooks, postHooks)
if cfg.HTTPConfig.Addr != "" {
httpFrontend = httpfrontend.NewFrontend(logic, cfg.HTTPConfig)
go func() {
log.Infoln("started serving HTTP on", cfg.HTTPConfig.Addr)
if err := httpFrontend.ListenAndServe(); err != nil {
errChan <- errors.New("failed to cleanly shutdown HTTP frontend: " + err.Error())
log.Info("starting HTTP frontend", cfg.HTTPConfig)
httpfe, err := http.NewFrontend(r.logic, cfg.HTTPConfig)
if err != nil {
return err
}
}()
r.sg.Add(httpfe)
}
if cfg.UDPConfig.Addr != "" {
udpFrontend = udpfrontend.NewFrontend(logic, cfg.UDPConfig)
go func() {
log.Infoln("started serving UDP on", cfg.UDPConfig.Addr)
if err := udpFrontend.ListenAndServe(); err != nil {
errChan <- errors.New("failed to cleanly shutdown UDP frontend: " + err.Error())
}
}()
}
sigChan := make(chan os.Signal)
signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM)
go func() {
select {
case <-sigChan:
case <-shutdown:
}
if udpFrontend != nil {
udpFrontend.Stop()
}
if httpFrontend != nil {
httpFrontend.Stop()
}
for err := range peerStore.Stop() {
log.Info("starting UDP frontend", cfg.UDPConfig)
udpfe, err := udp.NewFrontend(r.logic, cfg.UDPConfig)
if err != nil {
errChan <- err
return err
}
r.sg.Add(udpfe)
}
// Stop hooks.
errs := logic.Stop()
return nil
}
func combineErrors(prefix string, errs []error) error {
errStrs := make([]string, 0, len(errs))
for _, err := range errs {
errChan <- err
errStrs = append(errStrs, err.Error())
}
close(errChan)
}()
return errors.New(prefix + ": " + strings.Join(errStrs, "; "))
}
closed := false
var bufErr error
for err = range errChan {
// Stop shuts down an instance of Chihaya.
func (r *Run) Stop(keepPeerStore bool) (storage.PeerStore, error) {
log.Debug("stopping frontends and metrics server")
if errs := r.sg.Stop().Wait(); len(errs) != 0 {
return nil, combineErrors("failed while shutting down frontends", errs)
}
log.Debug("stopping logic")
if errs := r.logic.Stop().Wait(); len(errs) != 0 {
return nil, combineErrors("failed while shutting down middleware", errs)
}
if !keepPeerStore {
log.Debug("stopping peer store")
if errs := r.peerStore.Stop().Wait(); len(errs) != 0 {
return nil, combineErrors("failed while shutting down peer store", errs)
}
r.peerStore = nil
}
return r.peerStore, nil
}
// RootRunCmdFunc implements a Cobra command that runs an instance of Chihaya
// and handles reloading and shutdown via process signals.
func RootRunCmdFunc(cmd *cobra.Command, args []string) error {
configFilePath, err := cmd.Flags().GetString("config")
if err != nil {
if !closed {
close(shutdown)
closed = true
} else {
log.Infoln(bufErr)
return err
}
r, err := NewRun(configFilePath)
if err != nil {
return err
}
ctx, _ := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
reload, _ := signal.NotifyContext(context.Background(), ReloadSignals...)
for {
select {
case <-reload.Done():
log.Info("reloading; received reload signal")
peerStore, err := r.Stop(true)
if err != nil {
return err
}
if err := r.Start(peerStore); err != nil {
return err
}
case <-ctx.Done():
log.Info("shutting down; received shutdown signal")
if _, err := r.Stop(false); err != nil {
return err
}
return nil
}
bufErr = err
}
}
return bufErr
// RootPreRunCmdFunc handles command line flags for the Run command.
func RootPreRunCmdFunc(cmd *cobra.Command, args []string) error {
noColors, err := cmd.Flags().GetBool("nocolors")
if err != nil {
return err
}
if noColors {
log.SetFormatter(&logrus.TextFormatter{DisableColors: true})
}
jsonLog, err := cmd.Flags().GetBool("json")
if err != nil {
return err
}
if jsonLog {
log.SetFormatter(&logrus.JSONFormatter{})
log.Info("enabled JSON logging")
}
debugLog, err := cmd.Flags().GetBool("debug")
if err != nil {
return err
}
if debugLog {
log.SetDebug(true)
log.Info("enabled debug logging")
}
return nil
}
// RootPostRunCmdFunc handles clean up of any state initialized by command line
// flags.
func RootPostRunCmdFunc(cmd *cobra.Command, args []string) error {
return nil
}
func main() {
var rootCmd = &cobra.Command{
rootCmd := &cobra.Command{
Use: "chihaya",
Short: "BitTorrent Tracker",
Long: "A customizible, multi-protocol BitTorrent Tracker",
Run: func(cmd *cobra.Command, args []string) {
if err := rootCmdRun(cmd, args); err != nil {
log.Fatal(err)
Long: "A customizable, multi-protocol BitTorrent Tracker",
PersistentPreRunE: RootPreRunCmdFunc,
RunE: RootRunCmdFunc,
PersistentPostRunE: RootPostRunCmdFunc,
}
},
rootCmd.PersistentFlags().Bool("debug", false, "enable debug logging")
rootCmd.PersistentFlags().Bool("json", false, "enable json logging")
if runtime.GOOS == "windows" {
rootCmd.PersistentFlags().Bool("nocolors", true, "disable log coloring")
} else {
rootCmd.PersistentFlags().Bool("nocolors", false, "disable log coloring")
}
rootCmd.Flags().String("config", "/etc/chihaya.yaml", "location of configuration file")
rootCmd.Flags().String("cpuprofile", "", "location to save a CPU profile")
rootCmd.Flags().Bool("debug", false, "enable debug logging")
e2eCmd := &cobra.Command{
Use: "e2e",
Short: "exec e2e tests",
Long: "Execute the Chihaya end-to-end test suite",
RunE: EndToEndRunCmdFunc,
}
e2eCmd.Flags().String("httpaddr", "http://127.0.0.1:6969/announce", "address of the HTTP tracker")
e2eCmd.Flags().String("udpaddr", "udp://127.0.0.1:6969", "address of the UDP tracker")
e2eCmd.Flags().Duration("delay", time.Second, "delay between announces")
rootCmd.AddCommand(e2eCmd)
if err := rootCmd.Execute(); err != nil {
log.Fatal(err)
log.Fatal("failed when executing root cobra command: " + err.Error())
}
}

View file

@ -0,0 +1,15 @@
//go:build darwin || freebsd || linux || netbsd || openbsd || dragonfly || solaris
// +build darwin freebsd linux netbsd openbsd dragonfly solaris
package main
import (
"os"
"syscall"
)
// ReloadSignals are the signals that the current OS will send to the process
// when a configuration reload is requested.
var ReloadSignals = []os.Signal{
syscall.SIGUSR1,
}

View file

@ -0,0 +1,14 @@
//go:build windows
// +build windows
package main
import (
"os"
"os/signal"
"syscall"
)
var ReloadSignals = []os.Signal{
syscall.SIGHUP,
}

197
dist/example_config.yaml vendored Normal file
View file

@ -0,0 +1,197 @@
---
chihaya:
# The interval communicated with BitTorrent clients informing them how
# frequently they should announce in between client events.
announce_interval: "30m"
# The interval communicated with BitTorrent clients informing them of the
# minimal duration between announces.
min_announce_interval: "15m"
# The network interface that will bind to an HTTP endpoint that can be
# scraped by programs collecting metrics.
#
# /metrics serves metrics in the Prometheus format
# /debug/pprof/{cmdline,profile,symbol,trace} serves profiles in the pprof format
metrics_addr: "0.0.0.0:6880"
# This block defines configuration for the tracker's HTTP interface.
# If you do not wish to run this, delete this section.
http:
# The network interface that will bind to an HTTP server for serving
# BitTorrent traffic. Remove this to disable the non-TLS listener.
addr: "0.0.0.0:6969"
# The network interface that will bind to an HTTPS server for serving
# BitTorrent traffic. If set, tls_cert_path and tls_key_path are required.
https_addr: ""
# The path to the required files to listen via HTTPS.
tls_cert_path: ""
tls_key_path: ""
# The timeout durations for HTTP requests.
read_timeout: "5s"
write_timeout: "5s"
# When true, persistent connections will be allowed. Generally this is not
# useful for a public tracker, but helps performance in some cases (use of
# a reverse proxy, or when there are few clients issuing many requests).
enable_keepalive: false
idle_timeout: "30s"
# Whether to time requests.
# Disabling this should increase performance/decrease load.
enable_request_timing: false
# An array of routes to listen on for announce requests. This is an option
# to support trackers that do not listen for /announce or need to listen
# on multiple routes.
#
# This supports named parameters and catch-all parameters as described at
# https://github.com/julienschmidt/httprouter#named-parameters
announce_routes:
- "/announce"
# - "/announce.php"
# An array of routes to listen on for scrape requests. This is an option
# to support trackers that do not listen for /scrape or need to listen
# on multiple routes.
#
# This supports named parameters and catch-all parameters as described at
# https://github.com/julienschmidt/httprouter#named-parameters
scrape_routes:
- "/scrape"
# - "/scrape.php"
# When enabled, the IP address used to connect to the tracker will not
# override the value clients advertise as their IP address.
allow_ip_spoofing: false
# The HTTP Header containing the IP address of the client.
# This is only necessary if using a reverse proxy.
real_ip_header: "x-real-ip"
# The maximum number of peers returned for an individual request.
max_numwant: 100
# The default number of peers returned for an individual request.
default_numwant: 50
# The maximum number of infohashes that can be scraped in one request.
max_scrape_infohashes: 50
# This block defines configuration for the tracker's UDP interface.
# If you do not wish to run this, delete this section.
udp:
# The network interface that will bind to a UDP server for serving
# BitTorrent traffic.
addr: "0.0.0.0:6969"
# The leeway for a timestamp on a connection ID.
max_clock_skew: "10s"
# The key used to encrypt connection IDs.
private_key: "paste a random string here that will be used to hmac connection IDs"
# Whether to time requests.
# Disabling this should increase performance/decrease load.
enable_request_timing: false
# When enabled, the IP address used to connect to the tracker will not
# override the value clients advertise as their IP address.
allow_ip_spoofing: false
# The maximum number of peers returned for an individual request.
max_numwant: 100
# The default number of peers returned for an individual request.
default_numwant: 50
# The maximum number of infohashes that can be scraped in one request.
max_scrape_infohashes: 50
# This block defines configuration used for the storage of peer data.
storage:
name: "memory"
config:
# The frequency which stale peers are removed.
# This balances between
# - collecting garbage more often, potentially using more CPU time, but potentially using less memory (lower value)
# - collecting garbage less frequently, saving CPU time, but keeping old peers long, thus using more memory (higher value).
gc_interval: "3m"
# The amount of time until a peer is considered stale.
# To avoid churn, keep this slightly larger than `announce_interval`
peer_lifetime: "31m"
# The number of partitions data will be divided into in order to provide a
# higher degree of parallelism.
shard_count: 1024
# The interval at which metrics about the number of infohashes and peers
# are collected and posted to Prometheus.
prometheus_reporting_interval: "1s"
# This block defines configuration used for redis storage.
# storage:
# name: redis
# config:
# # The frequency which stale peers are removed.
# # This balances between
# # - collecting garbage more often, potentially using more CPU time, but potentially using less memory (lower value)
# # - collecting garbage less frequently, saving CPU time, but keeping old peers long, thus using more memory (higher value).
# gc_interval: "3m"
# # The interval at which metrics about the number of infohashes and peers
# # are collected and posted to Prometheus.
# prometheus_reporting_interval: "1s"
# # The amount of time until a peer is considered stale.
# # To avoid churn, keep this slightly larger than `announce_interval`
# peer_lifetime: "31m"
# # The address of redis storage.
# redis_broker: "redis://pwd@127.0.0.1:6379/0"
# # The timeout for reading a command reply from redis.
# redis_read_timeout: "15s"
# # The timeout for writing a command to redis.
# redis_write_timeout: "15s"
# # The timeout for connecting to redis server.
# redis_connect_timeout: "15s"
# This block defines configuration used for middleware executed before a
# response has been returned to a BitTorrent client.
prehooks:
# - name: "jwt"
# options:
# issuer: "https://issuer.com"
# audience: "https://chihaya.issuer.com"
# jwk_set_url: "https://issuer.com/keys"
# jwk_set_update_interval: "5m"
# - name: "client approval"
# options:
# whitelist:
# - "OP1011"
# blacklist:
# - "OP1012"
# - name: "interval variation"
# options:
# modify_response_probability: 0.2
# max_increase_delta: 60
# modify_min_interval: true
# This block defines configuration used for torrent approval, it requires to be given
# hashes for whitelist or for blacklist. Hashes are hexadecimal-encoaded.
# - name: "torrent approval"
# options:
# whitelist:
# - "a1b2c3d4e5a1b2c3d4e5a1b2c3d4e5a1b2c3d4e5"
# blacklist:
# - "e1d2c3b4a5e1b2c3b4a5e1d2c3b4e5e1d2c3b4a5"

21
dist/helm/chihaya/.helmignore vendored Normal file
View file

@ -0,0 +1,21 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj

10
dist/helm/chihaya/Chart.yaml vendored Normal file
View file

@ -0,0 +1,10 @@
apiVersion: v1
name: chihaya
home: https://chihaya.io
version: 0.1.0
description: A Helm chart for running the Chihaya BitTorrent tracker on Kubernetes.
sources:
- https://github.com/chihaya/chihaya
maintainers:
- name: Jimmy Zelinskie
email: jimmyzelinskie@gmail.com

6
dist/helm/chihaya/templates/NOTES.txt vendored Normal file
View file

@ -0,0 +1,6 @@
You can port forward a local port to Prometheus or the HTTP tracker by running:
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "fullname" . }}" -o jsonpath="{.items[0].metadata.name}")
# Metrics port
kubectl port-forward $POD_NAME 8080:{{ $v := .Values.config.chihaya.metrics_addr | split ":" }}{{ $v._1 }}
# HTTP tracker port
kubectl port-forward $POD_NAME 8080:{{ $v := .Values.config.chihaya.http.addr | split ":" }}{{ $v._1 }}

View file

@ -0,0 +1,16 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 24 -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 24 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "fullname" -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- printf "%s-%s" .Release.Name $name | trunc 24 -}}
{{- end -}}

View file

@ -0,0 +1,9 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "fullname" . }}
labels:
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
data:
config.yaml: |
{{ toYaml .Values.config | indent 4 }}

View file

@ -0,0 +1,43 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: {{ template "fullname" . }}
labels:
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
spec:
replicas: {{ .Values.replicaCount }}
template:
metadata:
labels:
app: {{ template "fullname" . }}
spec:
volumes:
- name: config
configMap:
name: {{ template "fullname" . }}
containers:
- name: {{ .Chart.Name }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
args:
- "--config=/etc/chihaya/config.yaml"
- "--debug"
- "--json"
ports:
- name: bittorrent-http
containerPort: {{ $v := .Values.config.chihaya.http.addr | split ":" }}{{ $v._1 }}
protocol: TCP
- name: bittorrent-udp
containerPort: {{ $v := .Values.config.chihaya.udp.addr | split ":" }}{{ $v._1 }}
protocol: UDP
- name: metrics
containerPort: {{ $v := .Values.config.chihaya.metrics_addr | split ":" }}{{ $v._1 }}
livenessProbe:
httpGet:
path: /
port: {{ $v := .Values.config.chihaya.metrics_addr | split ":" }}{{ $v._1 }}
volumeMounts:
- name: config
mountPath: /etc/chihaya
resources:
{{ toYaml .Values.resources | indent 10 }}

View file

@ -0,0 +1,27 @@
apiVersion: v1
kind: Service
metadata:
name: {{ template "fullname" . }}
labels:
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
annotations:
prometheus.io/scrape: "true"
prometheus.io/path: "/metrics"
prometheus.io/port: {{ $v := .Values.config.chihaya.metrics_addr | split ":" }}{{ $v._1 | quote }}
spec:
type: {{ .Values.service.type }}
ports:
- name: bittorrent-http
port: {{ $v := .Values.config.chihaya.http.addr | split ":" }}{{ $v._1 }}
targetPort: {{ $v := .Values.config.chihaya.http.addr | split ":" }}{{ $v._1 }}
protocol: TCP
- name: bittorrent-udp
port: {{ $v := .Values.config.chihaya.udp.addr | split ":" }}{{ $v._1 }}
targetPort: {{ $v := .Values.config.chihaya.udp.addr | split ":" }}{{ $v._1 }}
protocol: UDP
- name: metrics
port: {{ $v := .Values.config.chihaya.metrics_addr | split ":" }}{{ $v._1 }}
targetPort: {{ $v := .Values.config.chihaya.metrics_addr | split ":" }}{{ $v._1 }}
protocol: TCP
selector:
app: {{ template "fullname" . }}

162
dist/helm/chihaya/values.yaml vendored Normal file
View file

@ -0,0 +1,162 @@
replicaCount: 1
image:
repository: quay.io/jzelinskie/chihaya-git
tag: latest
pullPolicy: IfNotPresent
service:
name: chihaya
type: ClusterIP
resources:
limits:
cpu: 100m
memory: 1Gi
config:
chihaya:
# The interval communicated with BitTorrent clients informing them how
# frequently they should announce in between client events.
announce_interval: 30m
# The interval communicated with BitTorrent clients informing them of the
# minimal duration between announces.
min_announce_interval: 15m
# The network interface that will bind to an HTTP endpoint that can be
# scraped by programs collecting metrics.
#
# /metrics serves metrics in the Prometheus format
# /debug/pprof/{cmdline,profile,symbol,trace} serves profiles in the pprof format
metrics_addr: "0.0.0.0:6880"
# The maximum number of peers returned in an announce.
max_numwant: 50
# The default number of peers returned in an announce.
default_numwant: 25
# The number of infohashes a single scrape can request before being truncated.
max_scrape_infohashes: 50
# This block defines configuration for the tracker's HTTP interface.
# If you do not wish to run this, delete this section.
http:
# The network interface that will bind to an HTTP server for serving
# BitTorrent traffic.
addr: "0.0.0.0:6969"
# The path to the required files to listen via HTTPS.
tls_cert_path: ""
tls_key_path: ""
# The timeout durations for HTTP requests.
read_timeout: 5s
write_timeout: 5s
# Whether to time requests.
# Disabling this should increase performance/decrease load.
enable_request_timing: false
# When true, persistent connections will be allowed. Generally this is not
# useful for a public tracker, but helps performance in some cases (use of
# a reverse proxy, or when there are few clients issuing many requests).
enable_keepalive: false
idle_timeout: 30s
# Whether to listen on /announce.php and /scrape.php in addition to their
# non-.php counterparts.
# This is an option for compatibility with (very) old clients or otherwise
# outdated systems.
# This might be useful to retracker.local users, for more information see
# http://rutracker.wiki/Оптимизация_обмена_битторрент_траффиком_в_локальных_сетях
# and
# http://rutracker.wiki/Retracker.local
enable_legacy_php_urls: false
# When enabled, the IP address used to connect to the tracker will not
# override the value clients advertise as their IP address.
allow_ip_spoofing: false
# The HTTP Header containing the IP address of the client.
# This is only necessary if using a reverse proxy.
real_ip_header: "x-real-ip"
# The maximum number of peers returned for an individual request.
max_numwant: 100
# The default number of peers returned for an individual request.
default_numwant: 50
# The maximum number of infohashes that can be scraped in one request.
max_scrape_infohashes: 50
# This block defines configuration for the tracker's UDP interface.
# If you do not wish to run this, delete this section.
udp:
# The network interface that will bind to a UDP server for serving
# BitTorrent traffic.
addr: "0.0.0.0:6969"
# The leeway for a timestamp on a connection ID.
max_clock_skew: 10s
# The key used to encrypt connection IDs.
private_key: "paste a random string here that will be used to hmac connection IDs"
# Whether to time requests.
# Disabling this should increase performance/decrease load.
enable_request_timing: false
# When enabled, the IP address used to connect to the tracker will not
# override the value clients advertise as their IP address.
allow_ip_spoofing: false
# The maximum number of peers returned for an individual request.
max_numwant: 100
# The default number of peers returned for an individual request.
default_numwant: 50
# The maximum number of infohashes that can be scraped in one request.
max_scrape_infohashes: 50
# This block defines configuration used for the storage of peer data.
storage:
name: memory
config:
# The frequency which stale peers are removed.
gc_interval: 3m
# The amount of time until a peer is considered stale.
# To avoid churn, keep this slightly larger than `announce_interval`
peer_lifetime: 31m
# The number of partitions data will be divided into in order to provide a
# higher degree of parallelism.
shard_count: 1024
# The interval at which metrics about the number of infohashes and peers
# are collected and posted to Prometheus.
prometheus_reporting_interval: 1s
# This block defines configuration used for middleware executed before a
# response has been returned to a BitTorrent client.
prehooks:
#- name: jwt
# options:
# issuer: "https://issuer.com"
# audience: "https://chihaya.issuer.com"
# jwk_set_url: "https://issuer.com/keys"
# jwk_set_update_interval: 5m
#- name: client approval
# options:
# whitelist:
# - "OP1011"
# blacklist:
# - "OP1012"
#- name: interval variation
# options:
# modify_response_probability: 0.2
# max_increase_delta: 60
# modify_min_interval: true

12
dist/prometheus/prometheus.yaml vendored Normal file
View file

@ -0,0 +1,12 @@
---
global:
scrape_interval: "5s"
evaluation_interval: "5s"
# A scrape configuration containing exactly one endpoint to scrape:
scrape_configs:
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
- job_name: "local-chihaya" # you can name this however you want
scrape_interval: "5s" # optionally override the global scrape_interval
static_configs:
- targets: ["localhost:6881"] # provide the address of chihaya's prometheus endpoint

36
docs/architecture.dot Normal file
View file

@ -0,0 +1,36 @@
digraph G {
subgraph cluster_0 {
label = "chihaya";
style = "line";
color = "blue";
"Storage";
subgraph cluster_1 {
label = "frontend";
style = "line";
color = "hotpink";
"Parser";
"Writer";
}
subgraph cluster_2 {
label = "logic";
style = "line";
color = "purple";
"PreHook Middleware";
"PostHook Middleware";
"Response Generator";
}
}
"BitTorrent Client" -> "Parser";
"Parser" -> "PreHook Middleware";
"PreHook Middleware" -> "Response Generator";
"PostHook Middleware" -> "Storage";
"Storage" -> "Response Generator";
"Response Generator" -> "Writer";
"Writer" -> "BitTorrent Client";
}

16
docs/architecture.md Normal file
View file

@ -0,0 +1,16 @@
# Architecture
## Overview
BitTorrent clients send Announce and Scrape requests to a _Frontend_.
Frontends parse requests and write responses for the particular protocol they implement.
The _TrackerLogic_ interface is used to generate responses for requests and optionally perform a task after responding to a client.
A configurable chain of _PreHook_ and _PostHook_ middleware is used to construct an instance of TrackerLogic.
PreHooks are middleware that are executed before the response has been written.
After all PreHooks have executed, any missing response fields that are required are filled by reading out of the configured implementation of the _Storage_ interface.
PostHooks are asynchronous tasks that occur after a response has been delivered to the client.
Because they are unnecessary to for generating a response, updates to the Storage for a particular request are done asynchronously in a PostHook.
## Diagram
![architecture diagram](https://user-images.githubusercontent.com/343539/52676700-05c45c80-2ef9-11e9-9887-8366008b4e7e.png)

111
docs/frontend.md Normal file
View file

@ -0,0 +1,111 @@
# Frontends
A _Frontend_ is a component of Chihaya that serves a BitTorrent tracker on one protocol.
The frontend accepts, parses and sanitizes requests, passes them to the _Logic_ and writes responses to _Clients_.
This documentation first gives a high-level overview of Frontends and later goes into implementation specifics.
Users of Chihaya are expected to just read the first part - developers should read both.
## Functionality
A Frontend serves one protocol, for example HTTP ([BEP 3]) or UDP ([BEP 15]).
It listens for requests and usually answers each of them with one response, a basic overview of the control flow is:
1. Read the request.
2. Parse the request.
3. Have the Logic handle the request. This calls a series of `PreHooks`.
4. Send a response to the Client.
5. Process the request and response through `PostHooks`.
## Available Frontends
Chihaya ships with frontends for HTTP(S) and UDP.
The HTTP frontend uses Go's `http` package.
The UDP frontend implements both [old-opentracker-style] IPv6 and the IPv6 support specified in [BEP 15].
The advantage of the old opentracker style is that it contains a usable IPv6 `ip` field, to enable IP overrides in announces.
## Implementing a Frontend
This part is intended for developers.
### Implementation Specifics
A frontend should serve only one protocol.
It may serve that protocol on multiple transports or networks, if applicable.
An example of that is the `http` Frontend, operating both on HTTP and HTTPS.
The typical control flow of handling announces, in more detail, is:
1. Read the request.
2. Parse the request, if invalid go to 9.
3. Validate/sanitize the request, if invalid go to 9.
4. If the request is protocol-specific, handle, respond, and go to 8.
5. Pass the request to the `TrackerLogic`'s `HandleAnnounce` or `HandleScrape` method, if an error is returned go to 9.
6. Send the response to the Client.
7. Pass the request and response to the `TrackerLogic`'s `AfterAnnounce` or `AfterScrape` method.
8. Finish, accept next request.
9. For invalid requests or errors during processing: Send an error response to the client.
This step may be skipped for suspected denial-of-service attacks.
The error response may contain information about the cause of the error.
Only errors where the Client is at fault should be explained, internal server errors should be returned without explanation.
Then finish, and accept the next request.
#### Configuration
The frontend must be configurable using a single, exported struct.
The struct must have YAML annotations.
The struct must implement `log.Fielder` to be logged on startup.
#### Metrics
Frontends may provide runtime metrics, such as the number of requests or their duration.
Metrics must be reported using [Prometheus].
A frontend should provide at least the following metrics:
- The number of valid and invalid requests handled
- The average time it takes to handle a single request.
This request timing should be made optional using a config entry.
Requests should be separated by type, i.e. Scrapes, Announces, and other protocol-specific requests.
If the frontend serves multiple transports or networks, metrics for them should be separable.
It is recommended to publish one Prometheus `HistogramVec` with:
- A name like `chihaya_PROTOCOL_response_duration_milliseconds`
- A value holding the duration in milliseconds of the reported request
- Labels for:
- `action` (= `announce`, `scrape`, ...)
- `address_family` (= `Unknown`, `IPv4`, `IPv6`, ...), if applicable
- `error` (= A textual representation of the error encountered during processing.)
Because `error` is expected to hold the textual representation of any error that occurred during the request, great care must be taken to ensure all error messages are static.
`error` must not contain any information directly taken from the request, e.g. the value of an invalid parameter.
This would cause this dimension of prometheus to explode, which slows down prometheus clients and reporters.
#### Error Handling
Frontends should return `bittorrent.ClientError`s to the Client.
Frontends must not return errors that are not a `bittorrent.ClientError` to the Client.
A message like `internal server error` should be used instead.
#### Request Sanitization
The `TrackerLogic` expects sanitized requests in order to function properly.
The `bittorrent` package provides the `SanitizeAnnounce` and `SanitizeScrape` functions to sanitize Announces and Scrapes, respectively.
This is the minimal required sanitization, every `AnnounceRequest` and `ScrapeRequest` must be sanitized this way.
Note that the `AnnounceRequest` struct contains booleans of the form `XProvided`, where `X` denotes an optional parameter of the BitTorrent protocol.
These should be set according to the values received by the Client.
#### Contexts
All methods of the `TrackerLogic` interface expect a `context.Context` as a parameter.
After a request is handled by `HandleAnnounce` without errors, the populated context returned must be used to call `AfterAnnounce`.
The same applies to Scrapes.
This way, a PreHook can communicate with a PostHook by setting a context value.
[BEP 3]: http://bittorrent.org/beps/bep_0003.html
[BEP 15]: http://bittorrent.org/beps/bep_0015.html
[Prometheus]: https://prometheus.io/
[old-opentracker-style]: https://web.archive.org/web/20170503181830/http://opentracker.blog.h3q.com/2007/12/28/the-ipv6-situation/

View file

@ -0,0 +1,35 @@
# Announce Interval Variation Middleware
This package provides the announce middleware `interval variation` which randomizes the announce interval.
## Functionality
This middleware chooses random announces and modifies the `interval` and `min_interval` fields.
A random number of seconds are added to the `interval` field and, if desired, also to the `min_interval` field.
Note that if a response is picked for modification and `min_interval` should be changed as well, both `interval` and `min_interval` are modified by the same amount.
## Use Case
Use this middleware to avoid recurring load spikes on the tracker.
By randomizing the announce interval, load spikes will flatten out after a few announce cycles.
## Configuration
This middleware provides the following parameters for configuration:
- `modify_response_probability` (float, >0, <= 1) indicates the probability by which a response will be chosen to have its announce intervals modified.
- `max_increase_delta` (int, >0) sets an upper boundary (inclusive) for the amount of seconds added.
- `modify_min_interval` (boolean) whether to modify the `min_interval` field as well.
An example config might look like this:
```yaml
chihaya:
prehooks:
- name: interval variation
config:
modify_response_probability: 0.2
max_increase_delta: 60
modify_min_interval: true
```

86
docs/storage/redis.md Normal file
View file

@ -0,0 +1,86 @@
# Redis Storage
This storage implementation separates Chihaya from its storage service.
Chihaya achieves HA by storing all peer data in Redis.
Multiple instances of Chihaya can use the same redis instance concurrently.
The storage service can get HA by clustering.
If one instance of Chihaya goes down, peer data will still be available in Redis.
The HA of storage service is not considered here.
In case Redis runs as a single node, peer data will be unavailable if the node is down.
You should consider setting up a Redis cluster for Chihaya in production.
This storage implementation is currently orders of magnitude slower than the in-memory implementation.
## Use Case
When one instance of Chihaya is down, other instances can continue serving peers from Redis.
## Configuration
```yaml
chihaya:
storage:
name: redis
config:
# The frequency which stale peers are removed.
# This balances between
# - collecting garbage more often, potentially using more CPU time, but potentially using less memory (lower value)
# - collecting garbage less frequently, saving CPU time, but keeping old peers long, thus using more memory (higher value).
gc_interval: 3m
# The interval at which metrics about the number of infohashes and peers
# are collected and posted to Prometheus.
prometheus_reporting_interval: 1s
# The amount of time until a peer is considered stale.
# To avoid churn, keep this slightly larger than `announce_interval`
peer_lifetime: 31m
# The address of redis storage.
redis_broker: "redis://pwd@127.0.0.1:6379/0"
# The timeout for reading a command reply from redis.
redis_read_timeout: 15s
# The timeout for writing a command to redis.
redis_write_timeout: 15s
# The timeout for connecting to redis server.
redis_connect_timeout: 15s
```
## Implementation
Seeders and Leechers for a particular InfoHash are stored within a redis hash.
The InfoHash is used as key, _peer keys_ are the fields, last modified times are values.
Peer keys are derived from peers and contain Peer ID, IP, and Port.
All the InfoHashes (swarms) are also stored in a redis hash, with IP family as the key, infohash as field, and last modified time as value.
Here is an example:
```yaml
- IPv4
- IPv4_S_<infohash 1>: <modification time>
- IPv4_L_<infohash 1>: <modification time>
- IPv4_S_<infohash 2>: <modification time>
- IPv4_S_<infohash 1>
- <peer 1 key>: <modification time>
- <peer 2 key>: <modification time>
- IPv4_L_<infohash 1>
- <peer 3 key>: <modification time>
- IPv4_S_<infohash 2>
- <peer 3 key>: <modification time>
```
In this case, prometheus would record two swarms, three seeders, and one leecher.
These three keys per address family are used to record the count of swarms, seeders, and leechers.
```yaml
- IPv4_infohash_count: 2
- IPv4_S_count: 3
- IPv4_L_count: 1
```
Note: `IPv4_infohash_count` has a different meaning compared to the `memory` storage:
It represents the number of infohashes reported by seeder, meaning that infohashes without seeders are not counted.

View file

@ -1,43 +0,0 @@
chihaya:
announce_interval: 15m
prometheus_addr: localhost:6880
http:
addr: 0.0.0.0:6881
allow_ip_spoofing: false
real_ip_header: x-real-ip
read_timeout: 5s
write_timeout: 5s
request_timeout: 5s
udp:
addr: 0.0.0.0:6881
allow_ip_spoofing: false
max_clock_skew: 10s
private_key: |
paste a random string here that will be used to hmac connection IDs
storage:
gc_interval: 14m
peer_lifetime: 15m
shards: 1
max_numwant: 100
prehooks:
- name: jwt
config:
issuer: https://issuer.com
audience: https://chihaya.issuer.com
jwk_set_url: https://issuer.com/keys
jwk_set_update_interval: 5m
- name: client approval
config:
whitelist:
- OP1011
blacklist:
- OP1012
posthooks:
- name: gossip
config:
boostrap_node: 127.0.0.1:6881

View file

@ -11,14 +11,20 @@ import (
// after the response has been delivered to the client.
type TrackerLogic interface {
// HandleAnnounce generates a response for an Announce.
HandleAnnounce(context.Context, *bittorrent.AnnounceRequest) (*bittorrent.AnnounceResponse, error)
//
// Returns the updated context, the generated AnnounceResponse and no error
// on success; nil and error on failure.
HandleAnnounce(context.Context, *bittorrent.AnnounceRequest) (context.Context, *bittorrent.AnnounceResponse, error)
// AfterAnnounce does something with the results of an Announce after it
// has been completed.
AfterAnnounce(context.Context, *bittorrent.AnnounceRequest, *bittorrent.AnnounceResponse)
// HandleScrape generates a response for a Scrape.
HandleScrape(context.Context, *bittorrent.ScrapeRequest) (*bittorrent.ScrapeResponse, error)
//
// Returns the updated context, the generated AnnounceResponse and no error
// on success; nil and error on failure.
HandleScrape(context.Context, *bittorrent.ScrapeRequest) (context.Context, *bittorrent.ScrapeResponse, error)
// AfterScrape does something with the results of a Scrape after it has been completed.
AfterScrape(context.Context, *bittorrent.ScrapeRequest, *bittorrent.ScrapeResponse)

View file

@ -3,7 +3,7 @@ package bencode
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var unmarshalTests = []struct {
@ -24,9 +24,11 @@ var unmarshalTests = []struct {
func TestUnmarshal(t *testing.T) {
for _, tt := range unmarshalTests {
t.Run(tt.input, func(t *testing.T) {
got, err := Unmarshal([]byte(tt.input))
assert.Nil(t, err, "unmarshal should not fail")
assert.Equal(t, got, tt.expected, "unmarshalled values should match the expected results")
require.Nil(t, err, "unmarshal should not fail")
require.Equal(t, got, tt.expected, "unmarshalled values should match the expected results")
})
}
}
@ -44,8 +46,8 @@ func BenchmarkUnmarshalScalar(b *testing.B) {
d2 := NewDecoder(&bufferLoop{"i42e"})
for i := 0; i < b.N; i++ {
d1.Decode()
d2.Decode()
_, _ = d1.Decode()
_, _ = d2.Decode()
}
}
@ -61,8 +63,8 @@ func TestUnmarshalLarge(t *testing.T) {
dec := NewDecoder(&bufferLoop{string(buf)})
got, err := dec.Decode()
assert.Nil(t, err, "decode should not fail")
assert.Equal(t, got, data, "encoding and decoding should equal the original value")
require.Nil(t, err, "decode should not fail")
require.Equal(t, got, data, "encoding and decoding should equal the original value")
}
func BenchmarkUnmarshalLarge(b *testing.B) {
@ -77,6 +79,6 @@ func BenchmarkUnmarshalLarge(b *testing.B) {
dec := NewDecoder(&bufferLoop{string(buf)})
for i := 0; i < b.N; i++ {
dec.Decode()
_, _ = dec.Decode()
}
}

View file

@ -66,7 +66,7 @@ func marshal(w io.Writer, data interface{}) (err error) {
err = marshalInt(w, int64(v))
case int64:
err = marshalInt(w, int64(v))
err = marshalInt(w, v)
case uint:
err = marshalUint(w, uint64(v))
@ -78,7 +78,7 @@ func marshal(w io.Writer, data interface{}) (err error) {
err = marshalUint(w, uint64(v))
case uint64:
err = marshalUint(w, uint64(v))
err = marshalUint(w, v)
case time.Duration: // Assume seconds
err = marshalInt(w, int64(v/time.Second))
@ -90,7 +90,7 @@ func marshal(w io.Writer, data interface{}) (err error) {
err = marshalList(w, v)
case []Dict:
var interfaceSlice = make([]interface{}, len(v))
interfaceSlice := make([]interface{}, len(v))
for i, d := range v {
interfaceSlice[i] = d
}

View file

@ -2,10 +2,11 @@ package bencode
import (
"bytes"
"fmt"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var marshalTests = []struct {
@ -35,10 +36,12 @@ var marshalTests = []struct {
}
func TestMarshal(t *testing.T) {
for _, test := range marshalTests {
got, err := Marshal(test.input)
assert.Nil(t, err, "marshal should not fail")
assert.Contains(t, test.expected, string(got), "the marshaled result should be one of the expected permutations")
for _, tt := range marshalTests {
t.Run(fmt.Sprintf("%#v", tt.input), func(t *testing.T) {
got, err := Marshal(tt.input)
require.Nil(t, err, "marshal should not fail")
require.Contains(t, tt.expected, string(got), "the marshaled result should be one of the expected permutations")
})
}
}
@ -47,8 +50,8 @@ func BenchmarkMarshalScalar(b *testing.B) {
encoder := NewEncoder(buf)
for i := 0; i < b.N; i++ {
encoder.Encode("test")
encoder.Encode(123)
_ = encoder.Encode("test")
_ = encoder.Encode(123)
}
}
@ -64,6 +67,6 @@ func BenchmarkMarshalLarge(b *testing.B) {
encoder := NewEncoder(buf)
for i := 0; i < b.N; i++ {
encoder.Encode(data)
_ = encoder.Encode(data)
}
}

View file

@ -4,188 +4,398 @@ package http
import (
"context"
"crypto/tls"
"errors"
"net"
"net/http"
"time"
log "github.com/Sirupsen/logrus"
"github.com/julienschmidt/httprouter"
"github.com/prometheus/client_golang/prometheus"
"github.com/tylerb/graceful"
"github.com/chihaya/chihaya/bittorrent"
"github.com/chihaya/chihaya/frontend"
"github.com/chihaya/chihaya/middleware"
"github.com/chihaya/chihaya/pkg/log"
"github.com/chihaya/chihaya/pkg/stop"
)
func init() {
prometheus.MustRegister(promResponseDurationMilliseconds)
recordResponseDuration("action", nil, time.Second)
}
var promResponseDurationMilliseconds = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "chihaya_http_response_duration_milliseconds",
Help: "The duration of time it takes to receive and write a response to an API request",
Buckets: prometheus.ExponentialBuckets(9.375, 2, 10),
},
[]string{"action", "error"},
)
// recordResponseDuration records the duration of time to respond to a Request
// in milliseconds .
func recordResponseDuration(action string, err error, duration time.Duration) {
var errString string
if err != nil {
errString = err.Error()
}
promResponseDurationMilliseconds.
WithLabelValues(action, errString).
Observe(float64(duration.Nanoseconds()) / float64(time.Millisecond))
}
// Config represents all of the configurable options for an HTTP BitTorrent
// Frontend.
type Config struct {
Addr string `yaml:"addr"`
HTTPSAddr string `yaml:"https_addr"`
ReadTimeout time.Duration `yaml:"read_timeout"`
WriteTimeout time.Duration `yaml:"write_timeout"`
RequestTimeout time.Duration `yaml:"request_timeout"`
AllowIPSpoofing bool `yaml:"allow_ip_spoofing"`
RealIPHeader string `yaml:"real_ip_header"`
IdleTimeout time.Duration `yaml:"idle_timeout"`
EnableKeepAlive bool `yaml:"enable_keepalive"`
TLSCertPath string `yaml:"tls_cert_path"`
TLSKeyPath string `yaml:"tls_key_path"`
AnnounceRoutes []string `yaml:"announce_routes"`
ScrapeRoutes []string `yaml:"scrape_routes"`
EnableRequestTiming bool `yaml:"enable_request_timing"`
ParseOptions `yaml:",inline"`
}
// Frontend holds the state of an HTTP BitTorrent Frontend.
// LogFields renders the current config as a set of Logrus fields.
func (cfg Config) LogFields() log.Fields {
return log.Fields{
"addr": cfg.Addr,
"httpsAddr": cfg.HTTPSAddr,
"readTimeout": cfg.ReadTimeout,
"writeTimeout": cfg.WriteTimeout,
"idleTimeout": cfg.IdleTimeout,
"enableKeepAlive": cfg.EnableKeepAlive,
"tlsCertPath": cfg.TLSCertPath,
"tlsKeyPath": cfg.TLSKeyPath,
"announceRoutes": cfg.AnnounceRoutes,
"scrapeRoutes": cfg.ScrapeRoutes,
"enableRequestTiming": cfg.EnableRequestTiming,
"allowIPSpoofing": cfg.AllowIPSpoofing,
"realIPHeader": cfg.RealIPHeader,
"maxNumWant": cfg.MaxNumWant,
"defaultNumWant": cfg.DefaultNumWant,
"maxScrapeInfoHashes": cfg.MaxScrapeInfoHashes,
}
}
// Default config constants.
const (
defaultReadTimeout = 2 * time.Second
defaultWriteTimeout = 2 * time.Second
defaultIdleTimeout = 30 * time.Second
)
// Validate sanity checks values set in a config and returns a new config with
// default values replacing anything that is invalid.
//
// This function warns to the logger when a value is changed.
func (cfg Config) Validate() Config {
validcfg := cfg
if cfg.ReadTimeout <= 0 {
validcfg.ReadTimeout = defaultReadTimeout
log.Warn("falling back to default configuration", log.Fields{
"name": "http.ReadTimeout",
"provided": cfg.ReadTimeout,
"default": validcfg.ReadTimeout,
})
}
if cfg.WriteTimeout <= 0 {
validcfg.WriteTimeout = defaultWriteTimeout
log.Warn("falling back to default configuration", log.Fields{
"name": "http.WriteTimeout",
"provided": cfg.WriteTimeout,
"default": validcfg.WriteTimeout,
})
}
if cfg.IdleTimeout <= 0 {
validcfg.IdleTimeout = defaultIdleTimeout
if cfg.EnableKeepAlive {
// If keepalive is disabled, this configuration isn't used anyway.
log.Warn("falling back to default configuration", log.Fields{
"name": "http.IdleTimeout",
"provided": cfg.IdleTimeout,
"default": validcfg.IdleTimeout,
})
}
}
if cfg.MaxNumWant <= 0 {
validcfg.MaxNumWant = defaultMaxNumWant
log.Warn("falling back to default configuration", log.Fields{
"name": "http.MaxNumWant",
"provided": cfg.MaxNumWant,
"default": validcfg.MaxNumWant,
})
}
if cfg.DefaultNumWant <= 0 {
validcfg.DefaultNumWant = defaultDefaultNumWant
log.Warn("falling back to default configuration", log.Fields{
"name": "http.DefaultNumWant",
"provided": cfg.DefaultNumWant,
"default": validcfg.DefaultNumWant,
})
}
if cfg.MaxScrapeInfoHashes <= 0 {
validcfg.MaxScrapeInfoHashes = defaultMaxScrapeInfoHashes
log.Warn("falling back to default configuration", log.Fields{
"name": "http.MaxScrapeInfoHashes",
"provided": cfg.MaxScrapeInfoHashes,
"default": validcfg.MaxScrapeInfoHashes,
})
}
return validcfg
}
// Frontend represents the state of an HTTP BitTorrent Frontend.
type Frontend struct {
grace *graceful.Server
srv *http.Server
tlsSrv *http.Server
tlsCfg *tls.Config
logic frontend.TrackerLogic
Config
}
// NewFrontend allocates a new instance of a Frontend.
func NewFrontend(logic frontend.TrackerLogic, cfg Config) *Frontend {
return &Frontend{
// NewFrontend creates a new instance of an HTTP Frontend that asynchronously
// serves requests.
func NewFrontend(logic frontend.TrackerLogic, provided Config) (*Frontend, error) {
cfg := provided.Validate()
f := &Frontend{
logic: logic,
Config: cfg,
}
if cfg.Addr == "" && cfg.HTTPSAddr == "" {
return nil, errors.New("must specify addr or https_addr or both")
}
// Stop provides a thread-safe way to shutdown a currently running Tracker.
func (t *Frontend) Stop() {
t.grace.Stop(t.grace.Timeout)
<-t.grace.StopChan()
if len(cfg.AnnounceRoutes) < 1 || len(cfg.ScrapeRoutes) < 1 {
return nil, errors.New("must specify routes")
}
func (t *Frontend) handler() http.Handler {
// If TLS is enabled, create a key pair.
if cfg.TLSCertPath != "" && cfg.TLSKeyPath != "" {
var err error
f.tlsCfg = &tls.Config{
MinVersion: tls.VersionTLS12,
Certificates: make([]tls.Certificate, 1),
}
f.tlsCfg.Certificates[0], err = tls.LoadX509KeyPair(cfg.TLSCertPath, cfg.TLSKeyPath)
if err != nil {
return nil, err
}
}
if cfg.HTTPSAddr != "" && f.tlsCfg == nil {
return nil, errors.New("must specify tls_cert_path and tls_key_path when using https_addr")
}
if cfg.HTTPSAddr == "" && f.tlsCfg != nil {
return nil, errors.New("must specify https_addr when using tls_cert_path and tls_key_path")
}
var listenerHTTP, listenerHTTPS net.Listener
var err error
if cfg.Addr != "" {
listenerHTTP, err = net.Listen("tcp", f.Addr)
if err != nil {
return nil, err
}
}
if cfg.HTTPSAddr != "" {
listenerHTTPS, err = net.Listen("tcp", f.HTTPSAddr)
if err != nil {
if listenerHTTP != nil {
listenerHTTP.Close()
}
return nil, err
}
}
if cfg.Addr != "" {
go func() {
if err := f.serveHTTP(listenerHTTP); err != nil {
log.Fatal("failed while serving http", log.Err(err))
}
}()
}
if cfg.HTTPSAddr != "" {
go func() {
if err := f.serveHTTPS(listenerHTTPS); err != nil {
log.Fatal("failed while serving https", log.Err(err))
}
}()
}
return f, nil
}
// Stop provides a thread-safe way to shutdown a currently running Frontend.
func (f *Frontend) Stop() stop.Result {
stopGroup := stop.NewGroup()
if f.srv != nil {
stopGroup.AddFunc(f.makeStopFunc(f.srv))
}
if f.tlsSrv != nil {
stopGroup.AddFunc(f.makeStopFunc(f.tlsSrv))
}
return stopGroup.Stop()
}
func (f *Frontend) makeStopFunc(stopSrv *http.Server) stop.Func {
return func() stop.Result {
c := make(stop.Channel)
go func() {
c.Done(stopSrv.Shutdown(context.Background()))
}()
return c.Result()
}
}
func (f *Frontend) handler() http.Handler {
router := httprouter.New()
router.GET("/announce", t.announceRoute)
router.GET("/scrape", t.scrapeRoute)
for _, route := range f.AnnounceRoutes {
router.GET(route, f.announceRoute)
}
for _, route := range f.ScrapeRoutes {
router.GET(route, f.scrapeRoute)
}
return router
}
// ListenAndServe listens on the TCP network address t.Addr and blocks serving
// BitTorrent requests until t.Stop() is called or an error is returned.
func (t *Frontend) ListenAndServe() error {
t.grace = &graceful.Server{
Server: &http.Server{
Addr: t.Addr,
Handler: t.handler(),
ReadTimeout: t.ReadTimeout,
WriteTimeout: t.WriteTimeout,
},
Timeout: t.RequestTimeout,
NoSignalHandling: true,
ConnState: func(conn net.Conn, state http.ConnState) {
switch state {
case http.StateNew:
//stats.RecordEvent(stats.AcceptedConnection)
case http.StateClosed:
//stats.RecordEvent(stats.ClosedConnection)
case http.StateHijacked:
panic("http: connection impossibly hijacked")
// Ignore the following cases.
case http.StateActive, http.StateIdle:
default:
panic("http: connection transitioned to unknown state")
}
},
}
t.grace.SetKeepAlivesEnabled(false)
if err := t.grace.ListenAndServe(); err != nil {
if opErr, ok := err.(*net.OpError); !ok || (ok && opErr.Op != "accept") {
panic("http: failed to gracefully run HTTP server: " + err.Error())
}
// serveHTTP blocks while listening and serving non-TLS HTTP BitTorrent
// requests until Stop() is called or an error is returned.
func (f *Frontend) serveHTTP(l net.Listener) error {
f.srv = &http.Server{
Addr: f.Addr,
Handler: f.handler(),
ReadTimeout: f.ReadTimeout,
WriteTimeout: f.WriteTimeout,
IdleTimeout: f.IdleTimeout,
}
f.srv.SetKeepAlivesEnabled(f.EnableKeepAlive)
// Start the HTTP server.
if err := f.srv.Serve(l); !errors.Is(err, http.ErrServerClosed) {
return err
}
return nil
}
// announceRoute parses and responds to an Announce by using t.TrackerLogic.
func (t *Frontend) announceRoute(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
// serveHTTPS blocks while listening and serving TLS HTTP BitTorrent
// requests until Stop() is called or an error is returned.
func (f *Frontend) serveHTTPS(l net.Listener) error {
f.tlsSrv = &http.Server{
Addr: f.HTTPSAddr,
TLSConfig: f.tlsCfg,
Handler: f.handler(),
ReadTimeout: f.ReadTimeout,
WriteTimeout: f.WriteTimeout,
}
f.tlsSrv.SetKeepAlivesEnabled(f.EnableKeepAlive)
// Start the HTTP server.
if err := f.tlsSrv.ServeTLS(l, "", ""); !errors.Is(err, http.ErrServerClosed) {
return err
}
return nil
}
func injectRouteParamsToContext(ctx context.Context, ps httprouter.Params) context.Context {
rp := bittorrent.RouteParams{}
for _, p := range ps {
rp = append(rp, bittorrent.RouteParam{Key: p.Key, Value: p.Value})
}
return context.WithValue(ctx, bittorrent.RouteParamsKey, rp)
}
// announceRoute parses and responds to an Announce.
func (f *Frontend) announceRoute(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
var err error
start := time.Now()
defer recordResponseDuration("announce", err, time.Since(start))
var start time.Time
if f.EnableRequestTiming {
start = time.Now()
}
var af *bittorrent.AddressFamily
defer func() {
if f.EnableRequestTiming {
recordResponseDuration("announce", af, err, time.Since(start))
} else {
recordResponseDuration("announce", af, err, time.Duration(0))
}
}()
req, err := ParseAnnounce(r, t.RealIPHeader, t.AllowIPSpoofing)
req, err := ParseAnnounce(r, f.ParseOptions)
if err != nil {
WriteError(w, err)
return
}
resp, err := t.logic.HandleAnnounce(context.Background(), req)
if err != nil {
WriteError(w, err)
_ = WriteError(w, err)
return
}
af = new(bittorrent.AddressFamily)
*af = req.IP.AddressFamily
ctx := injectRouteParamsToContext(context.Background(), ps)
ctx, resp, err := f.logic.HandleAnnounce(ctx, req)
if err != nil {
_ = WriteError(w, err)
return
}
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
err = WriteAnnounceResponse(w, resp)
if err != nil {
WriteError(w, err)
_ = WriteError(w, err)
return
}
go t.logic.AfterAnnounce(context.Background(), req, resp)
go f.logic.AfterAnnounce(ctx, req, resp)
}
// scrapeRoute parses and responds to a Scrape by using t.TrackerLogic.
func (t *Frontend) scrapeRoute(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
// scrapeRoute parses and responds to a Scrape.
func (f *Frontend) scrapeRoute(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
var err error
start := time.Now()
defer recordResponseDuration("scrape", err, time.Since(start))
var start time.Time
if f.EnableRequestTiming {
start = time.Now()
}
var af *bittorrent.AddressFamily
defer func() {
if f.EnableRequestTiming {
recordResponseDuration("scrape", af, err, time.Since(start))
} else {
recordResponseDuration("scrape", af, err, time.Duration(0))
}
}()
req, err := ParseScrape(r)
req, err := ParseScrape(r, f.ParseOptions)
if err != nil {
WriteError(w, err)
_ = WriteError(w, err)
return
}
host, _, err := net.SplitHostPort(r.RemoteAddr)
if err != nil {
log.Errorln("http: unable to determine remote address for scrape:", err)
WriteError(w, err)
log.Error("http: unable to determine remote address for scrape", log.Err(err))
_ = WriteError(w, err)
return
}
ip := net.ParseIP(host)
ctx := context.WithValue(context.Background(), middleware.ScrapeIsIPv6Key, len(ip) == net.IPv6len)
reqIP := net.ParseIP(host)
if reqIP.To4() != nil {
req.AddressFamily = bittorrent.IPv4
} else if len(reqIP) == net.IPv6len { // implies reqIP.To4() == nil
req.AddressFamily = bittorrent.IPv6
} else {
log.Error("http: invalid IP: neither v4 nor v6", log.Fields{"RemoteAddr": r.RemoteAddr})
_ = WriteError(w, bittorrent.ErrInvalidIP)
return
}
af = new(bittorrent.AddressFamily)
*af = req.AddressFamily
resp, err := t.logic.HandleScrape(ctx, req)
ctx := injectRouteParamsToContext(context.Background(), ps)
ctx, resp, err := f.logic.HandleScrape(ctx, req)
if err != nil {
WriteError(w, err)
_ = WriteError(w, err)
return
}
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
err = WriteScrapeResponse(w, resp)
if err != nil {
WriteError(w, err)
_ = WriteError(w, err)
return
}
go t.logic.AfterScrape(context.Background(), req, resp)
go f.logic.AfterScrape(ctx, req, resp)
}

View file

@ -1,18 +1,35 @@
package http
import (
"errors"
"net"
"net/http"
"github.com/chihaya/chihaya/bittorrent"
)
// ParseAnnounce parses an bittorrent.AnnounceRequest from an http.Request.
// ParseOptions is the configuration used to parse an Announce Request.
//
// If allowIPSpoofing is true, IPs provided via params will be used.
// If realIPHeader is not empty string, the first value of the HTTP Header with
// If AllowIPSpoofing is true, IPs provided via BitTorrent params will be used.
// If RealIPHeader is not empty string, the value of the first HTTP Header with
// that name will be used.
func ParseAnnounce(r *http.Request, realIPHeader string, allowIPSpoofing bool) (*bittorrent.AnnounceRequest, error) {
type ParseOptions struct {
AllowIPSpoofing bool `yaml:"allow_ip_spoofing"`
RealIPHeader string `yaml:"real_ip_header"`
MaxNumWant uint32 `yaml:"max_numwant"`
DefaultNumWant uint32 `yaml:"default_numwant"`
MaxScrapeInfoHashes uint32 `yaml:"max_scrape_infohashes"`
}
// Default parser config constants.
const (
defaultMaxNumWant = 100
defaultDefaultNumWant = 50
defaultMaxScrapeInfoHashes = 50
)
// ParseAnnounce parses an bittorrent.AnnounceRequest from an http.Request.
func ParseAnnounce(r *http.Request, opts ParseOptions) (*bittorrent.AnnounceRequest, error) {
qp, err := bittorrent.ParseURLData(r.RequestURI)
if err != nil {
return nil, err
@ -20,15 +37,23 @@ func ParseAnnounce(r *http.Request, realIPHeader string, allowIPSpoofing bool) (
request := &bittorrent.AnnounceRequest{Params: qp}
eventStr, _ := qp.String("event")
// Attempt to parse the event from the request.
var eventStr string
eventStr, request.EventProvided = qp.String("event")
if request.EventProvided {
request.Event, err = bittorrent.NewEvent(eventStr)
if err != nil {
return nil, bittorrent.ClientError("failed to provide valid client event")
}
} else {
request.Event = bittorrent.None
}
// Determine if the client expects a compact response.
compactStr, _ := qp.String("compact")
request.Compact = compactStr != "" && compactStr != "0"
// Parse the infohash from the request.
infoHashes := qp.InfoHashes()
if len(infoHashes) < 1 {
return nil, bittorrent.ClientError("no info_hash parameter supplied")
@ -38,6 +63,7 @@ func ParseAnnounce(r *http.Request, realIPHeader string, allowIPSpoofing bool) (
}
request.InfoHash = infoHashes[0]
// Parse the PeerID from the request.
peerID, ok := qp.String("peer_id")
if !ok {
return nil, bittorrent.ClientError("failed to parse parameter: peer_id")
@ -47,48 +73,55 @@ func ParseAnnounce(r *http.Request, realIPHeader string, allowIPSpoofing bool) (
}
request.Peer.ID = bittorrent.PeerIDFromString(peerID)
request.Left, err = qp.Uint64("left")
// Determine the number of remaining bytes for the client.
request.Left, err = qp.Uint("left", 64)
if err != nil {
return nil, bittorrent.ClientError("failed to parse parameter: left")
}
request.Downloaded, err = qp.Uint64("downloaded")
// Determine the number of bytes downloaded by the client.
request.Downloaded, err = qp.Uint("downloaded", 64)
if err != nil {
return nil, bittorrent.ClientError("failed to parse parameter: downloaded")
}
request.Uploaded, err = qp.Uint64("uploaded")
// Determine the number of bytes shared by the client.
request.Uploaded, err = qp.Uint("uploaded", 64)
if err != nil {
return nil, bittorrent.ClientError("failed to parse parameter: uploaded")
}
numwant, err := qp.Uint64("numwant")
if err != nil {
// Determine the number of peers the client wants in the response.
numwant, err := qp.Uint("numwant", 32)
if err != nil && !errors.Is(err, bittorrent.ErrKeyNotFound) {
return nil, bittorrent.ClientError("failed to parse parameter: numwant")
}
// If there were no errors, the user actually provided the numwant.
request.NumWantProvided = err == nil
request.NumWant = uint32(numwant)
port, err := qp.Uint64("port")
// Parse the port where the client is listening.
port, err := qp.Uint("port", 16)
if err != nil {
return nil, bittorrent.ClientError("failed to parse parameter: port")
}
request.Peer.Port = uint16(port)
request.Peer.IP = requestedIP(r, qp, realIPHeader, allowIPSpoofing)
if request.Peer.IP == nil {
// Parse the IP address where the client is listening.
request.Peer.IP.IP, request.IPProvided = requestedIP(r, qp, opts)
if request.Peer.IP.IP == nil {
return nil, bittorrent.ClientError("failed to parse peer IP address")
}
// Sanitize IPv4 addresses to 4 bytes.
if ip := request.Peer.IP.To4(); ip != nil {
request.Peer.IP = ip
if err := bittorrent.SanitizeAnnounce(request, opts.MaxNumWant, opts.DefaultNumWant); err != nil {
return nil, err
}
return request, nil
}
// ParseScrape parses an bittorrent.ScrapeRequest from an http.Request.
func ParseScrape(r *http.Request) (*bittorrent.ScrapeRequest, error) {
func ParseScrape(r *http.Request, opts ParseOptions) (*bittorrent.ScrapeRequest, error) {
qp, err := bittorrent.ParseURLData(r.RequestURI)
if err != nil {
return nil, err
@ -104,39 +137,35 @@ func ParseScrape(r *http.Request) (*bittorrent.ScrapeRequest, error) {
Params: qp,
}
if err := bittorrent.SanitizeScrape(request, opts.MaxScrapeInfoHashes); err != nil {
return nil, err
}
return request, nil
}
// requestedIP determines the IP address for a BitTorrent client request.
//
// If allowIPSpoofing is true, IPs provided via params will be used.
// If realIPHeader is not empty string, the first value of the HTTP Header with
// that name will be used.
func requestedIP(r *http.Request, p bittorrent.Params, realIPHeader string, allowIPSpoofing bool) net.IP {
if allowIPSpoofing {
func requestedIP(r *http.Request, p bittorrent.Params, opts ParseOptions) (ip net.IP, provided bool) {
if opts.AllowIPSpoofing {
if ipstr, ok := p.String("ip"); ok {
ip := net.ParseIP(ipstr)
return ip
return net.ParseIP(ipstr), true
}
if ipstr, ok := p.String("ipv4"); ok {
ip := net.ParseIP(ipstr)
return ip
return net.ParseIP(ipstr), true
}
if ipstr, ok := p.String("ipv6"); ok {
ip := net.ParseIP(ipstr)
return ip
return net.ParseIP(ipstr), true
}
}
if realIPHeader != "" {
if ips, ok := r.Header[realIPHeader]; ok && len(ips) > 0 {
ip := net.ParseIP(ips[0])
return ip
if opts.RealIPHeader != "" {
if ip := r.Header.Get(opts.RealIPHeader); ip != "" {
return net.ParseIP(ip), false
}
}
host, _, _ := net.SplitHostPort(r.RemoteAddr)
return net.ParseIP(host)
return net.ParseIP(host), false
}

View file

@ -0,0 +1,50 @@
package http
import (
"errors"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/chihaya/chihaya/bittorrent"
)
func init() {
prometheus.MustRegister(promResponseDurationMilliseconds)
}
var promResponseDurationMilliseconds = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "chihaya_http_response_duration_milliseconds",
Help: "The duration of time it takes to receive and write a response to an API request",
Buckets: prometheus.ExponentialBuckets(9.375, 2, 10),
},
[]string{"action", "address_family", "error"},
)
// recordResponseDuration records the duration of time to respond to a Request
// in milliseconds.
func recordResponseDuration(action string, af *bittorrent.AddressFamily, err error, duration time.Duration) {
var errString string
if err != nil {
var clientErr bittorrent.ClientError
if errors.As(err, &clientErr) {
errString = clientErr.Error()
} else {
errString = "internal error"
}
}
var afString string
if af == nil {
afString = "Unknown"
} else if *af == bittorrent.IPv4 {
afString = "IPv4"
} else if *af == bittorrent.IPv6 {
afString = "IPv6"
}
promResponseDurationMilliseconds.
WithLabelValues(action, afString, errString).
Observe(float64(duration.Nanoseconds()) / float64(time.Millisecond))
}

View file

@ -1,21 +1,22 @@
package http
import (
"errors"
"net/http"
log "github.com/Sirupsen/logrus"
"github.com/chihaya/chihaya/bittorrent"
"github.com/chihaya/chihaya/frontend/http/bencode"
"github.com/chihaya/chihaya/pkg/log"
)
// WriteError communicates an error to a BitTorrent client over HTTP.
func WriteError(w http.ResponseWriter, err error) error {
message := "internal server error"
if _, clientErr := err.(bittorrent.ClientError); clientErr {
message = err.Error()
var clientErr bittorrent.ClientError
if errors.As(err, &clientErr) {
message = clientErr.Error()
} else {
log.Errorf("http: internal error: %s", err)
log.Error("http: internal error", log.Err(err))
}
w.WriteHeader(http.StatusOK)
@ -58,7 +59,7 @@ func WriteAnnounceResponse(w http.ResponseWriter, resp *bittorrent.AnnounceRespo
}
// Add the peers to the dictionary.
var peers []bencode.Dict
peers := make([]bencode.Dict, 0, len(resp.IPv4Peers)+len(resp.IPv6Peers))
for _, peer := range resp.IPv4Peers {
peers = append(peers, dict(peer))
}
@ -74,8 +75,8 @@ func WriteAnnounceResponse(w http.ResponseWriter, resp *bittorrent.AnnounceRespo
// client over HTTP.
func WriteScrapeResponse(w http.ResponseWriter, resp *bittorrent.ScrapeResponse) error {
filesDict := bencode.NewDict()
for infohash, scrape := range resp.Files {
filesDict[string(infohash[:])] = bencode.Dict{
for _, scrape := range resp.Files {
filesDict[string(scrape.InfoHash[:])] = bencode.Dict{
"complete": scrape.Complete,
"incomplete": scrape.Incomplete,
}

View file

@ -1,16 +1,17 @@
package http
import (
"fmt"
"net/http/httptest"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/chihaya/chihaya/bittorrent"
)
func TestWriteError(t *testing.T) {
var table = []struct {
table := []struct {
reason, expected string
}{
{"hello world", "d14:failure reason11:hello worlde"},
@ -18,16 +19,28 @@ func TestWriteError(t *testing.T) {
}
for _, tt := range table {
t.Run(fmt.Sprintf("%s expecting %s", tt.reason, tt.expected), func(t *testing.T) {
r := httptest.NewRecorder()
err := WriteError(r, bittorrent.ClientError(tt.reason))
assert.Nil(t, err)
assert.Equal(t, r.Body.String(), tt.expected)
require.Nil(t, err)
require.Equal(t, r.Body.String(), tt.expected)
})
}
}
func TestWriteStatus(t *testing.T) {
r := httptest.NewRecorder()
err := WriteError(r, bittorrent.ClientError("something is missing"))
assert.Nil(t, err)
assert.Equal(t, r.Body.String(), "d14:failure reason20:something is missinge")
table := []struct {
reason, expected string
}{
{"something is missing", "d14:failure reason20:something is missinge"},
}
for _, tt := range table {
t.Run(fmt.Sprintf("%s expecting %s", tt.reason, tt.expected), func(t *testing.T) {
r := httptest.NewRecorder()
err := WriteError(r, bittorrent.ClientError(tt.reason))
require.Nil(t, err)
require.Equal(t, r.Body.String(), tt.expected)
})
}
}

View file

@ -11,24 +11,27 @@ type BytePool struct {
func New(length int) *BytePool {
var bp BytePool
bp.Pool.New = func() interface{} {
return make([]byte, length, length)
b := make([]byte, length)
return &b
}
return &bp
}
// Get returns a byte slice from the pool.
func (bp *BytePool) Get() []byte {
return bp.Pool.Get().([]byte)
func (bp *BytePool) Get() *[]byte {
return bp.Pool.Get().(*[]byte)
}
// Put returns a byte slice to the pool.
func (bp *BytePool) Put(b []byte) {
b = b[:cap(b)]
func (bp *BytePool) Put(b *[]byte) {
*b = (*b)[:cap(*b)]
// Zero out the bytes.
// Apparently this specific expression is optimized by the compiler, see
// github.com/golang/go/issues/5373.
for i := range b {
b[i] = 0
// This specific expression is optimized by the compiler:
// https://github.com/golang/go/issues/5373.
for i := range *b {
(*b)[i] = 0
}
bp.Pool.Put(b)
}

View file

@ -2,18 +2,77 @@ package udp
import (
"crypto/hmac"
"crypto/sha256"
"encoding/binary"
"hash"
"net"
"time"
sha256 "github.com/minio/sha256-simd"
"github.com/chihaya/chihaya/pkg/log"
)
// ttl is the number of seconds a connection ID should be valid according to
// BEP 15.
// ttl is the duration a connection ID should be valid according to BEP 15.
const ttl = 2 * time.Minute
// NewConnectionID creates a new 8 byte connection identifier for UDP packets
// as described by BEP 15.
// NewConnectionID creates an 8-byte connection identifier for UDP packets as
// described by BEP 15.
// This is a wrapper around creating a new ConnectionIDGenerator and generating
// an ID. It is recommended to use the generator for performance.
func NewConnectionID(ip net.IP, now time.Time, key string) []byte {
return NewConnectionIDGenerator(key).Generate(ip, now)
}
// ValidConnectionID determines whether a connection identifier is legitimate.
// This is a wrapper around creating a new ConnectionIDGenerator and validating
// the ID. It is recommended to use the generator for performance.
func ValidConnectionID(connectionID []byte, ip net.IP, now time.Time, maxClockSkew time.Duration, key string) bool {
return NewConnectionIDGenerator(key).Validate(connectionID, ip, now, maxClockSkew)
}
// A ConnectionIDGenerator is a reusable generator and validator for connection
// IDs as described in BEP 15.
// It is not thread safe, but is safe to be pooled and reused by other
// goroutines. It manages its state itself, so it can be taken from and returned
// to a pool without any cleanup.
// After initial creation, it can generate connection IDs without allocating.
// See Generate and Validate for usage notes and guarantees.
type ConnectionIDGenerator struct {
// mac is a keyed HMAC that can be reused for subsequent connection ID
// generations.
mac hash.Hash
// connID is an 8-byte slice that holds the generated connection ID after a
// call to Generate.
// It must not be referenced after the generator is returned to a pool.
// It will be overwritten by subsequent calls to Generate.
connID []byte
// scratch is a 32-byte slice that is used as a scratchpad for the generated
// HMACs.
scratch []byte
}
// NewConnectionIDGenerator creates a new connection ID generator.
func NewConnectionIDGenerator(key string) *ConnectionIDGenerator {
return &ConnectionIDGenerator{
mac: hmac.New(sha256.New, []byte(key)),
connID: make([]byte, 8),
scratch: make([]byte, 32),
}
}
// reset resets the generator.
// This is called by other methods of the generator, it's not necessary to call
// it after getting a generator from a pool.
func (g *ConnectionIDGenerator) reset() {
g.mac.Reset()
g.connID = g.connID[:8]
g.scratch = g.scratch[:0]
}
// Generate generates an 8-byte connection ID as described in BEP 15 for the
// given IP and the current time.
//
// The first 4 bytes of the connection identifier is a unix timestamp and the
// last 4 bytes are a truncated HMAC token created from the aforementioned
@ -22,29 +81,36 @@ const ttl = 2 * time.Minute
// Truncated HMAC is known to be safe for 2^(-n) where n is the size in bits
// of the truncated HMAC token. In this use case we have 32 bits, thus a
// forgery probability of approximately 1 in 4 billion.
func NewConnectionID(ip net.IP, now time.Time, key string) []byte {
buf := make([]byte, 8)
binary.BigEndian.PutUint32(buf, uint32(now.UTC().Unix()))
//
// The generated ID is written to g.connID, which is also returned. g.connID
// will be reused, so it must not be referenced after returning the generator
// to a pool and will be overwritten be subsequent calls to Generate!
func (g *ConnectionIDGenerator) Generate(ip net.IP, now time.Time) []byte {
g.reset()
mac := hmac.New(sha256.New, []byte(key))
mac.Write(buf[:4])
mac.Write(ip)
macBytes := mac.Sum(nil)[:4]
copy(buf[4:], macBytes)
binary.BigEndian.PutUint32(g.connID, uint32(now.Unix()))
return buf
g.mac.Write(g.connID[:4])
g.mac.Write(ip)
g.scratch = g.mac.Sum(g.scratch)
copy(g.connID[4:8], g.scratch[:4])
log.Debug("generated connection ID", log.Fields{"ip": ip, "now": now, "connID": g.connID})
return g.connID
}
// ValidConnectionID determines whether a connection identifier is legitimate.
func ValidConnectionID(connectionID []byte, ip net.IP, now time.Time, maxClockSkew time.Duration, key string) bool {
// Validate validates the given connection ID for an IP and the current time.
func (g *ConnectionIDGenerator) Validate(connectionID []byte, ip net.IP, now time.Time, maxClockSkew time.Duration) bool {
ts := time.Unix(int64(binary.BigEndian.Uint32(connectionID[:4])), 0)
log.Debug("validating connection ID", log.Fields{"connID": connectionID, "ip": ip, "ts": ts, "now": now})
if now.After(ts.Add(ttl)) || ts.After(now.Add(maxClockSkew)) {
return false
}
mac := hmac.New(sha256.New, []byte(key))
mac.Write(connectionID[:4])
mac.Write(ip)
expectedMAC := mac.Sum(nil)[:4]
return hmac.Equal(expectedMAC, connectionID[4:])
g.reset()
g.mac.Write(connectionID[:4])
g.mac.Write(ip)
g.scratch = g.mac.Sum(g.scratch)
return hmac.Equal(g.scratch[:4], connectionID[4:])
}

View file

@ -1,9 +1,18 @@
package udp
import (
"crypto/hmac"
"encoding/binary"
"fmt"
"net"
"sync"
"testing"
"time"
sha256 "github.com/minio/sha256-simd"
"github.com/stretchr/testify/require"
"github.com/chihaya/chihaya/pkg/log"
)
var golden = []struct {
@ -18,12 +27,167 @@ var golden = []struct {
{0, 0, "[::]", "", true},
}
// simpleNewConnectionID generates a new connection ID the explicit way.
// This is used to verify correct behaviour of the generator.
func simpleNewConnectionID(ip net.IP, now time.Time, key string) []byte {
buf := make([]byte, 8)
binary.BigEndian.PutUint32(buf, uint32(now.Unix()))
mac := hmac.New(sha256.New, []byte(key))
mac.Write(buf[:4])
mac.Write(ip)
macBytes := mac.Sum(nil)[:4]
copy(buf[4:], macBytes)
// this is just in here because logging impacts performance and we benchmark
// this version too.
log.Debug("manually generated connection ID", log.Fields{"ip": ip, "now": now, "connID": buf})
return buf
}
func TestVerification(t *testing.T) {
for _, tt := range golden {
t.Run(fmt.Sprintf("%s created at %d verified at %d", tt.ip, tt.createdAt, tt.now), func(t *testing.T) {
cid := NewConnectionID(net.ParseIP(tt.ip), time.Unix(tt.createdAt, 0), tt.key)
got := ValidConnectionID(cid, net.ParseIP(tt.ip), time.Unix(tt.now, 0), time.Minute, tt.key)
if got != tt.valid {
t.Errorf("expected validity: %t got validity: %t", tt.valid, got)
}
})
}
}
func TestGeneration(t *testing.T) {
for _, tt := range golden {
t.Run(fmt.Sprintf("%s created at %d", tt.ip, tt.createdAt), func(t *testing.T) {
want := simpleNewConnectionID(net.ParseIP(tt.ip), time.Unix(tt.createdAt, 0), tt.key)
got := NewConnectionID(net.ParseIP(tt.ip), time.Unix(tt.createdAt, 0), tt.key)
require.Equal(t, want, got)
})
}
}
func TestReuseGeneratorGenerate(t *testing.T) {
for _, tt := range golden {
t.Run(fmt.Sprintf("%s created at %d", tt.ip, tt.createdAt), func(t *testing.T) {
cid := NewConnectionID(net.ParseIP(tt.ip), time.Unix(tt.createdAt, 0), tt.key)
require.Len(t, cid, 8)
gen := NewConnectionIDGenerator(tt.key)
for i := 0; i < 3; i++ {
connID := gen.Generate(net.ParseIP(tt.ip), time.Unix(tt.createdAt, 0))
require.Equal(t, cid, connID)
}
})
}
}
func TestReuseGeneratorValidate(t *testing.T) {
for _, tt := range golden {
t.Run(fmt.Sprintf("%s created at %d verified at %d", tt.ip, tt.createdAt, tt.now), func(t *testing.T) {
gen := NewConnectionIDGenerator(tt.key)
cid := gen.Generate(net.ParseIP(tt.ip), time.Unix(tt.createdAt, 0))
for i := 0; i < 3; i++ {
got := gen.Validate(cid, net.ParseIP(tt.ip), time.Unix(tt.now, 0), time.Minute)
if got != tt.valid {
t.Errorf("expected validity: %t got validity: %t", tt.valid, got)
}
}
})
}
}
func BenchmarkSimpleNewConnectionID(b *testing.B) {
ip := net.ParseIP("127.0.0.1")
key := "some random string that is hopefully at least this long"
createdAt := time.Now()
b.RunParallel(func(pb *testing.PB) {
sum := int64(0)
for pb.Next() {
cid := simpleNewConnectionID(ip, createdAt, key)
sum += int64(cid[7])
}
_ = sum
})
}
func BenchmarkNewConnectionID(b *testing.B) {
ip := net.ParseIP("127.0.0.1")
key := "some random string that is hopefully at least this long"
createdAt := time.Now()
b.RunParallel(func(pb *testing.PB) {
sum := int64(0)
for pb.Next() {
cid := NewConnectionID(ip, createdAt, key)
sum += int64(cid[7])
}
_ = sum
})
}
func BenchmarkConnectionIDGenerator_Generate(b *testing.B) {
ip := net.ParseIP("127.0.0.1")
key := "some random string that is hopefully at least this long"
createdAt := time.Now()
pool := &sync.Pool{
New: func() interface{} {
return NewConnectionIDGenerator(key)
},
}
b.RunParallel(func(pb *testing.PB) {
sum := int64(0)
for pb.Next() {
gen := pool.Get().(*ConnectionIDGenerator)
cid := gen.Generate(ip, createdAt)
sum += int64(cid[7])
pool.Put(gen)
}
})
}
func BenchmarkValidConnectionID(b *testing.B) {
ip := net.ParseIP("127.0.0.1")
key := "some random string that is hopefully at least this long"
createdAt := time.Now()
cid := NewConnectionID(ip, createdAt, key)
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
if !ValidConnectionID(cid, ip, createdAt, 10*time.Second, key) {
b.FailNow()
}
}
})
}
func BenchmarkConnectionIDGenerator_Validate(b *testing.B) {
ip := net.ParseIP("127.0.0.1")
key := "some random string that is hopefully at least this long"
createdAt := time.Now()
cid := NewConnectionID(ip, createdAt, key)
pool := &sync.Pool{
New: func() interface{} {
return NewConnectionIDGenerator(key)
},
}
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
gen := pool.Get().(*ConnectionIDGenerator)
if !gen.Validate(cid, ip, createdAt, 10*time.Second) {
b.FailNow()
}
pool.Put(gen)
}
})
}

View file

@ -6,44 +6,22 @@ import (
"bytes"
"context"
"encoding/binary"
"errors"
"fmt"
"math/rand"
"net"
"sync"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/chihaya/chihaya/bittorrent"
"github.com/chihaya/chihaya/frontend"
"github.com/chihaya/chihaya/frontend/udp/bytepool"
"github.com/chihaya/chihaya/middleware"
"github.com/chihaya/chihaya/pkg/log"
"github.com/chihaya/chihaya/pkg/stop"
"github.com/chihaya/chihaya/pkg/timecache"
)
func init() {
prometheus.MustRegister(promResponseDurationMilliseconds)
recordResponseDuration("action", nil, time.Second)
}
var promResponseDurationMilliseconds = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "chihaya_udp_response_duration_milliseconds",
Help: "The duration of time it takes to receive and write a response to an API request",
Buckets: prometheus.ExponentialBuckets(9.375, 2, 10),
},
[]string{"action", "error"},
)
// recordResponseDuration records the duration of time to respond to a UDP
// Request in milliseconds .
func recordResponseDuration(action string, err error, duration time.Duration) {
var errString string
if err != nil {
errString = err.Error()
}
promResponseDurationMilliseconds.
WithLabelValues(action, errString).
Observe(float64(duration.Nanoseconds()) / float64(time.Millisecond))
}
var allowedGeneratedPrivateKeyRunes = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890")
// Config represents all of the configurable options for a UDP BitTorrent
// Tracker.
@ -51,7 +29,71 @@ type Config struct {
Addr string `yaml:"addr"`
PrivateKey string `yaml:"private_key"`
MaxClockSkew time.Duration `yaml:"max_clock_skew"`
AllowIPSpoofing bool `yaml:"allow_ip_spoofing"`
EnableRequestTiming bool `yaml:"enable_request_timing"`
ParseOptions `yaml:",inline"`
}
// LogFields renders the current config as a set of Logrus fields.
func (cfg Config) LogFields() log.Fields {
return log.Fields{
"addr": cfg.Addr,
"privateKey": cfg.PrivateKey,
"maxClockSkew": cfg.MaxClockSkew,
"enableRequestTiming": cfg.EnableRequestTiming,
"allowIPSpoofing": cfg.AllowIPSpoofing,
"maxNumWant": cfg.MaxNumWant,
"defaultNumWant": cfg.DefaultNumWant,
"maxScrapeInfoHashes": cfg.MaxScrapeInfoHashes,
}
}
// Validate sanity checks values set in a config and returns a new config with
// default values replacing anything that is invalid.
//
// This function warns to the logger when a value is changed.
func (cfg Config) Validate() Config {
validcfg := cfg
// Generate a private key if one isn't provided by the user.
if cfg.PrivateKey == "" {
rand.Seed(time.Now().UnixNano())
pkeyRunes := make([]rune, 64)
for i := range pkeyRunes {
pkeyRunes[i] = allowedGeneratedPrivateKeyRunes[rand.Intn(len(allowedGeneratedPrivateKeyRunes))]
}
validcfg.PrivateKey = string(pkeyRunes)
log.Warn("UDP private key was not provided, using generated key", log.Fields{"key": validcfg.PrivateKey})
}
if cfg.MaxNumWant <= 0 {
validcfg.MaxNumWant = defaultMaxNumWant
log.Warn("falling back to default configuration", log.Fields{
"name": "udp.MaxNumWant",
"provided": cfg.MaxNumWant,
"default": validcfg.MaxNumWant,
})
}
if cfg.DefaultNumWant <= 0 {
validcfg.DefaultNumWant = defaultDefaultNumWant
log.Warn("falling back to default configuration", log.Fields{
"name": "udp.DefaultNumWant",
"provided": cfg.DefaultNumWant,
"default": validcfg.DefaultNumWant,
})
}
if cfg.MaxScrapeInfoHashes <= 0 {
validcfg.MaxScrapeInfoHashes = defaultMaxScrapeInfoHashes
log.Warn("falling back to default configuration", log.Fields{
"name": "udp.MaxScrapeInfoHashes",
"provided": cfg.MaxScrapeInfoHashes,
"default": validcfg.MaxScrapeInfoHashes,
})
}
return validcfg
}
// Frontend holds the state of a UDP BitTorrent Frontend.
@ -60,57 +102,94 @@ type Frontend struct {
closing chan struct{}
wg sync.WaitGroup
genPool *sync.Pool
logic frontend.TrackerLogic
Config
}
// NewFrontend allocates a new instance of a Frontend.
func NewFrontend(logic frontend.TrackerLogic, cfg Config) *Frontend {
return &Frontend{
// NewFrontend creates a new instance of an UDP Frontend that asynchronously
// serves requests.
func NewFrontend(logic frontend.TrackerLogic, provided Config) (*Frontend, error) {
cfg := provided.Validate()
f := &Frontend{
closing: make(chan struct{}),
logic: logic,
Config: cfg,
genPool: &sync.Pool{
New: func() interface{} {
return NewConnectionIDGenerator(cfg.PrivateKey)
},
},
}
if err := f.listen(); err != nil {
return nil, err
}
go func() {
if err := f.serve(); err != nil {
log.Fatal("failed while serving udp", log.Err(err))
}
}()
return f, nil
}
// Stop provides a thread-safe way to shutdown a currently running Frontend.
func (t *Frontend) Stop() {
close(t.closing)
t.socket.SetReadDeadline(time.Now())
t.wg.Wait()
func (t *Frontend) Stop() stop.Result {
select {
case <-t.closing:
return stop.AlreadyStopped
default:
}
// ListenAndServe listens on the UDP network address t.Addr and blocks serving
// BitTorrent requests until t.Stop() is called or an error is returned.
func (t *Frontend) ListenAndServe() error {
c := make(stop.Channel)
go func() {
close(t.closing)
_ = t.socket.SetReadDeadline(time.Now())
t.wg.Wait()
c.Done(t.socket.Close())
}()
return c.Result()
}
// listen resolves the address and binds the server socket.
func (t *Frontend) listen() error {
udpAddr, err := net.ResolveUDPAddr("udp", t.Addr)
if err != nil {
return err
}
t.socket, err = net.ListenUDP("udp", udpAddr)
if err != nil {
return err
}
defer t.socket.Close()
// serve blocks while listening and serving UDP BitTorrent requests
// until Stop() is called or an error is returned.
func (t *Frontend) serve() error {
pool := bytepool.New(2048)
t.wg.Add(1)
defer t.wg.Done()
for {
// Check to see if we need to shutdown.
select {
case <-t.closing:
log.Debug("udp serve() received shutdown signal")
return nil
default:
}
// Read a UDP packet into a reusable buffer.
buffer := pool.Get()
t.socket.SetReadDeadline(time.Now().Add(time.Second))
n, addr, err := t.socket.ReadFromUDP(buffer)
n, addr, err := t.socket.ReadFromUDP(*buffer)
if err != nil {
pool.Put(buffer)
if netErr, ok := err.(net.Error); ok && netErr.Temporary() {
var netErr net.Error
if errors.As(err, &netErr); netErr.Temporary() {
// A temporary failure is not fatal; just pretend it never happened.
continue
}
@ -133,13 +212,20 @@ func (t *Frontend) ListenAndServe() error {
}
// Handle the request.
start := time.Now()
action, err := t.handleRequest(
var start time.Time
if t.EnableRequestTiming {
start = time.Now()
}
action, af, err := t.handleRequest(
// Make sure the IP is copied, not referenced.
Request{buffer[:n], append([]byte{}, addr.IP...)},
Request{(*buffer)[:n], append([]byte{}, addr.IP...)},
ResponseWriter{t.socket, addr},
)
recordResponseDuration(action, err, time.Since(start))
if t.EnableRequestTiming {
recordResponseDuration(action, af, err, time.Since(start))
} else {
recordResponseDuration(action, af, err, time.Duration(0))
}
}()
}
}
@ -159,12 +245,12 @@ type ResponseWriter struct {
// Write implements the io.Writer interface for a ResponseWriter.
func (w ResponseWriter) Write(b []byte) (int, error) {
w.socket.WriteToUDP(b, w.addr)
_, _ = w.socket.WriteToUDP(b, w.addr)
return len(b), nil
}
// handleRequest parses and responds to a UDP Request.
func (t *Frontend) handleRequest(r Request, w ResponseWriter) (actionName string, err error) {
func (t *Frontend) handleRequest(r Request, w ResponseWriter) (actionName string, af *bittorrent.AddressFamily, err error) {
if len(r.Packet) < 16 {
// Malformed, no client packets are less than 16 bytes.
// We explicitly return nothing in case this is a DoS attempt.
@ -177,9 +263,13 @@ func (t *Frontend) handleRequest(r Request, w ResponseWriter) (actionName string
actionID := binary.BigEndian.Uint32(r.Packet[8:12])
txID := r.Packet[12:16]
// get a connection ID generator/validator from the pool.
gen := t.genPool.Get().(*ConnectionIDGenerator)
defer t.genPool.Put(gen)
// If this isn't requesting a new connection ID and the connection ID is
// invalid, then fail.
if actionID != connectActionID && !ValidConnectionID(connID, r.IP, time.Now(), t.MaxClockSkew, t.PrivateKey) {
if actionID != connectActionID && !gen.Validate(connID, r.IP, timecache.Now(), t.MaxClockSkew) {
err = errBadConnectionID
WriteError(w, txID, err)
return
@ -195,43 +285,66 @@ func (t *Frontend) handleRequest(r Request, w ResponseWriter) (actionName string
return
}
WriteConnectionID(w, txID, NewConnectionID(r.IP, time.Now(), t.PrivateKey))
af = new(bittorrent.AddressFamily)
if r.IP.To4() != nil {
*af = bittorrent.IPv4
} else if len(r.IP) == net.IPv6len { // implies r.IP.To4() == nil
*af = bittorrent.IPv6
} else {
// Should never happen - we got the IP straight from the UDP packet.
panic(fmt.Sprintf("udp: invalid IP: neither v4 nor v6, IP: %#v", r.IP))
}
WriteConnectionID(w, txID, gen.Generate(r.IP, timecache.Now()))
case announceActionID, announceV6ActionID:
actionName = "announce"
var req *bittorrent.AnnounceRequest
req, err = ParseAnnounce(r, t.AllowIPSpoofing, actionID == announceV6ActionID)
req, err = ParseAnnounce(r, actionID == announceV6ActionID, t.ParseOptions)
if err != nil {
WriteError(w, txID, err)
return
}
af = new(bittorrent.AddressFamily)
*af = req.IP.AddressFamily
var ctx context.Context
var resp *bittorrent.AnnounceResponse
resp, err = t.logic.HandleAnnounce(context.Background(), req)
ctx, resp, err = t.logic.HandleAnnounce(context.Background(), req)
if err != nil {
WriteError(w, txID, err)
return
}
WriteAnnounce(w, txID, resp, actionID == announceV6ActionID)
WriteAnnounce(w, txID, resp, actionID == announceV6ActionID, req.IP.AddressFamily == bittorrent.IPv6)
go t.logic.AfterAnnounce(context.Background(), req, resp)
go t.logic.AfterAnnounce(ctx, req, resp)
case scrapeActionID:
actionName = "scrape"
var req *bittorrent.ScrapeRequest
req, err = ParseScrape(r)
req, err = ParseScrape(r, t.ParseOptions)
if err != nil {
WriteError(w, txID, err)
return
}
ctx := context.WithValue(context.Background(), middleware.ScrapeIsIPv6Key, len(r.IP) == net.IPv6len)
if r.IP.To4() != nil {
req.AddressFamily = bittorrent.IPv4
} else if len(r.IP) == net.IPv6len { // implies r.IP.To4() == nil
req.AddressFamily = bittorrent.IPv6
} else {
// Should never happen - we got the IP straight from the UDP packet.
panic(fmt.Sprintf("udp: invalid IP: neither v4 nor v6, IP: %#v", r.IP))
}
af = new(bittorrent.AddressFamily)
*af = req.AddressFamily
var ctx context.Context
var resp *bittorrent.ScrapeResponse
resp, err = t.logic.HandleScrape(ctx, req)
ctx, resp, err = t.logic.HandleScrape(context.Background(), req)
if err != nil {
WriteError(w, txID, err)
return
@ -239,7 +352,7 @@ func (t *Frontend) handleRequest(r Request, w ResponseWriter) (actionName string
WriteScrape(w, txID, resp)
go t.logic.AfterScrape(context.Background(), req, resp)
go t.logic.AfterScrape(ctx, req, resp)
default:
err = errUnknownAction

View file

@ -0,0 +1,28 @@
package udp_test
import (
"testing"
"github.com/chihaya/chihaya/frontend/udp"
"github.com/chihaya/chihaya/middleware"
"github.com/chihaya/chihaya/storage"
_ "github.com/chihaya/chihaya/storage/memory"
)
func TestStartStopRaceIssue437(t *testing.T) {
ps, err := storage.NewPeerStore("memory", nil)
if err != nil {
t.Fatal(err)
}
var responseConfig middleware.ResponseConfig
lgc := middleware.NewLogic(responseConfig, ps, nil, nil)
fe, err := udp.NewFrontend(lgc, udp.Config{Addr: "127.0.0.1:0"})
if err != nil {
t.Fatal(err)
}
errC := fe.Stop()
errs := <-errC
if len(errs) != 0 {
t.Fatal(errs[0])
}
}

View file

@ -15,24 +15,23 @@ const (
announceActionID
scrapeActionID
errorActionID
// action == 4 is the "old" IPv6 action used by opentracker, with a packet
// format specified at
// https://web.archive.org/web/20170503181830/http://opentracker.blog.h3q.com/2007/12/28/the-ipv6-situation/
announceV6ActionID
)
// Option-Types as described in BEP 41 and BEP 45.
const (
optionEndOfOptions byte = 0x0
optionNOP = 0x1
optionURLData = 0x2
optionNOP byte = 0x1
optionURLData byte = 0x2
)
var (
// initialConnectionID is the magic initial connection ID specified by BEP 15.
initialConnectionID = []byte{0, 0, 0x04, 0x17, 0x27, 0x10, 0x19, 0x80}
// emptyIPs are the value of an IP field that has been left blank.
emptyIPv4 = []byte{0, 0, 0, 0}
emptyIPv6 = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
// eventIDs map values described in BEP 15 to Events.
eventIDs = []bittorrent.Event{
bittorrent.None,
@ -49,16 +48,31 @@ var (
errUnknownOptionType = bittorrent.ClientError("unknown option type")
)
// ParseOptions is the configuration used to parse an Announce Request.
//
// If AllowIPSpoofing is true, IPs provided via params will be used.
type ParseOptions struct {
AllowIPSpoofing bool `yaml:"allow_ip_spoofing"`
MaxNumWant uint32 `yaml:"max_numwant"`
DefaultNumWant uint32 `yaml:"default_numwant"`
MaxScrapeInfoHashes uint32 `yaml:"max_scrape_infohashes"`
}
// Default parser config constants.
const (
defaultMaxNumWant = 100
defaultDefaultNumWant = 50
defaultMaxScrapeInfoHashes = 50
)
// ParseAnnounce parses an AnnounceRequest from a UDP request.
//
// If allowIPSpoofing is true, IPs provided via params will be used.
//
// If v6 is true the announce will be parsed as an IPv6 announce "the
// opentracker way", see
// http://opentracker.blog.h3q.com/2007/12/28/the-ipv6-situation/
func ParseAnnounce(r Request, allowIPSpoofing, v6 bool) (*bittorrent.AnnounceRequest, error) {
// If v6Action is true, the announce is parsed the
// "old opentracker way":
// https://web.archive.org/web/20170503181830/http://opentracker.blog.h3q.com/2007/12/28/the-ipv6-situation/
func ParseAnnounce(r Request, v6Action bool, opts ParseOptions) (*bittorrent.AnnounceRequest, error) {
ipEnd := 84 + net.IPv4len
if v6 {
if v6Action {
ipEnd = 84 + net.IPv6len
}
@ -78,12 +92,14 @@ func ParseAnnounce(r Request, allowIPSpoofing, v6 bool) (*bittorrent.AnnounceReq
}
ip := r.IP
ipProvided := false
ipbytes := r.Packet[84:ipEnd]
if allowIPSpoofing {
if opts.AllowIPSpoofing {
// Make sure the bytes are copied to a new slice.
copy(ip, net.IP(ipbytes))
ipProvided = true
}
if !allowIPSpoofing && r.IP == nil {
if !opts.AllowIPSpoofing && r.IP == nil {
// We have no IP address to fallback on.
return nil, errMalformedIP
}
@ -96,20 +112,29 @@ func ParseAnnounce(r Request, allowIPSpoofing, v6 bool) (*bittorrent.AnnounceReq
return nil, err
}
return &bittorrent.AnnounceRequest{
request := &bittorrent.AnnounceRequest{
Event: eventIDs[eventID],
InfoHash: bittorrent.InfoHashFromBytes(infohash),
NumWant: uint32(numWant),
NumWant: numWant,
Left: left,
Downloaded: downloaded,
Uploaded: uploaded,
IPProvided: ipProvided,
NumWantProvided: true,
EventProvided: true,
Peer: bittorrent.Peer{
ID: bittorrent.PeerIDFromBytes(peerID),
IP: ip,
IP: bittorrent.IP{IP: ip},
Port: port,
},
Params: params,
}, nil
}
if err := bittorrent.SanitizeAnnounce(request, opts.MaxNumWant, opts.DefaultNumWant); err != nil {
return nil, err
}
return request, nil
}
type buffer struct {
@ -136,7 +161,7 @@ func handleOptionalParameters(packet []byte) (bittorrent.Params, error) {
return bittorrent.ParseURLData("")
}
var buf = newBuffer()
buf := newBuffer()
defer buf.free()
for i := 0; i < len(packet); {
@ -174,7 +199,7 @@ func handleOptionalParameters(packet []byte) (bittorrent.Params, error) {
}
// ParseScrape parses a ScrapeRequest from a UDP request.
func ParseScrape(r Request) (*bittorrent.ScrapeRequest, error) {
func ParseScrape(r Request, opts ParseOptions) (*bittorrent.ScrapeRequest, error) {
// If a scrape isn't at least 36 bytes long, it's malformed.
if len(r.Packet) < 36 {
return nil, errMalformedPacket
@ -194,7 +219,11 @@ func ParseScrape(r Request) (*bittorrent.ScrapeRequest, error) {
r.Packet = r.Packet[20:]
}
return &bittorrent.ScrapeRequest{
InfoHashes: infohashes,
}, nil
// Sanitize the request.
request := &bittorrent.ScrapeRequest{InfoHashes: infohashes}
if err := bittorrent.SanitizeScrape(request, opts.MaxScrapeInfoHashes); err != nil {
return nil, err
}
return request, nil
}

View file

@ -1,6 +1,10 @@
package udp
import "testing"
import (
"errors"
"fmt"
"testing"
)
var table = []struct {
data []byte
@ -45,27 +49,29 @@ var table = []struct {
}
func TestHandleOptionalParameters(t *testing.T) {
for _, testCase := range table {
params, err := handleOptionalParameters(testCase.data)
if err != testCase.err {
if testCase.err == nil {
t.Fatalf("expected no parsing error for %x but got %s", testCase.data, err)
for _, tt := range table {
t.Run(fmt.Sprintf("%#v as %#v", tt.data, tt.values), func(t *testing.T) {
params, err := handleOptionalParameters(tt.data)
if !errors.Is(err, tt.err) {
if tt.err == nil {
t.Fatalf("expected no parsing error for %x but got %s", tt.data, err)
} else {
t.Fatalf("expected parsing error for %x", testCase.data)
t.Fatalf("expected parsing error for %x", tt.data)
}
}
if testCase.values != nil {
if tt.values != nil {
if params == nil {
t.Fatalf("expected values %v for %x", testCase.values, testCase.data)
t.Fatalf("expected values %v for %x", tt.values, tt.data)
} else {
for key, want := range testCase.values {
for key, want := range tt.values {
if got, ok := params.String(key); !ok {
t.Fatalf("params missing entry %s for data %x", key, testCase.data)
t.Fatalf("params missing entry %s for data %x", key, tt.data)
} else if got != want {
t.Fatalf("expected param %s=%s, but was %s for data %x", key, want, got, testCase.data)
t.Fatalf("expected param %s=%s, but was %s for data %x", key, want, got, tt.data)
}
}
}
}
})
}
}

View file

@ -0,0 +1,50 @@
package udp
import (
"errors"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/chihaya/chihaya/bittorrent"
)
func init() {
prometheus.MustRegister(promResponseDurationMilliseconds)
}
var promResponseDurationMilliseconds = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "chihaya_udp_response_duration_milliseconds",
Help: "The duration of time it takes to receive and write a response to an API request",
Buckets: prometheus.ExponentialBuckets(9.375, 2, 10),
},
[]string{"action", "address_family", "error"},
)
// recordResponseDuration records the duration of time to respond to a UDP
// Request in milliseconds.
func recordResponseDuration(action string, af *bittorrent.AddressFamily, err error, duration time.Duration) {
var errString string
if err != nil {
var clientErr bittorrent.ClientError
if errors.As(err, &clientErr) {
errString = clientErr.Error()
} else {
errString = "internal error"
}
}
var afString string
if af == nil {
afString = "Unknown"
} else if *af == bittorrent.IPv4 {
afString = "IPv4"
} else if *af == bittorrent.IPv6 {
afString = "IPv6"
}
promResponseDurationMilliseconds.
WithLabelValues(action, afString, errString).
Observe(float64(duration.Nanoseconds()) / float64(time.Millisecond))
}

View file

@ -2,6 +2,7 @@ package udp
import (
"encoding/binary"
"errors"
"fmt"
"io"
"time"
@ -12,45 +13,47 @@ import (
// WriteError writes the failure reason as a null-terminated string.
func WriteError(w io.Writer, txID []byte, err error) {
// If the client wasn't at fault, acknowledge it.
if _, ok := err.(bittorrent.ClientError); !ok {
err = fmt.Errorf("internal error occurred: %s", err.Error())
var clientErr bittorrent.ClientError
if !errors.As(err, &clientErr) {
err = fmt.Errorf("internal error occurred: %w", err)
}
buf := newBuffer()
writeHeader(buf, txID, errorActionID)
buf.WriteString(err.Error())
buf.WriteRune('\000')
w.Write(buf.Bytes())
_, _ = w.Write(buf.Bytes())
buf.free()
}
// WriteAnnounce encodes an announce response according to BEP 15.
// The peers returned will be resp.IPv6Peers or resp.IPv4Peers, depending on
// whether v6 is set. The action ID will be 4, according to
// http://opentracker.blog.h3q.com/2007/12/28/the-ipv6-situation/.
func WriteAnnounce(w io.Writer, txID []byte, resp *bittorrent.AnnounceResponse, v6 bool) {
// whether v6Peers is set.
// If v6Action is set, the action will be 4, according to
// https://web.archive.org/web/20170503181830/http://opentracker.blog.h3q.com/2007/12/28/the-ipv6-situation/
func WriteAnnounce(w io.Writer, txID []byte, resp *bittorrent.AnnounceResponse, v6Action, v6Peers bool) {
buf := newBuffer()
if v6 {
if v6Action {
writeHeader(buf, txID, announceV6ActionID)
} else {
writeHeader(buf, txID, announceActionID)
}
binary.Write(buf, binary.BigEndian, uint32(resp.Interval/time.Second))
binary.Write(buf, binary.BigEndian, resp.Incomplete)
binary.Write(buf, binary.BigEndian, resp.Complete)
_ = binary.Write(buf, binary.BigEndian, uint32(resp.Interval/time.Second))
_ = binary.Write(buf, binary.BigEndian, resp.Incomplete)
_ = binary.Write(buf, binary.BigEndian, resp.Complete)
peers := resp.IPv4Peers
if v6 {
if v6Peers {
peers = resp.IPv6Peers
}
for _, peer := range peers {
buf.Write(peer.IP)
binary.Write(buf, binary.BigEndian, peer.Port)
buf.Write(peer.IP.IP)
_ = binary.Write(buf, binary.BigEndian, peer.Port)
}
w.Write(buf.Bytes())
_, _ = w.Write(buf.Bytes())
buf.free()
}
@ -61,12 +64,12 @@ func WriteScrape(w io.Writer, txID []byte, resp *bittorrent.ScrapeResponse) {
writeHeader(buf, txID, scrapeActionID)
for _, scrape := range resp.Files {
binary.Write(buf, binary.BigEndian, scrape.Complete)
binary.Write(buf, binary.BigEndian, scrape.Snatches)
binary.Write(buf, binary.BigEndian, scrape.Incomplete)
_ = binary.Write(buf, binary.BigEndian, scrape.Complete)
_ = binary.Write(buf, binary.BigEndian, scrape.Snatches)
_ = binary.Write(buf, binary.BigEndian, scrape.Incomplete)
}
w.Write(buf.Bytes())
_, _ = w.Write(buf.Bytes())
buf.free()
}
@ -77,13 +80,13 @@ func WriteConnectionID(w io.Writer, txID, connID []byte) {
writeHeader(buf, txID, connectActionID)
buf.Write(connID)
w.Write(buf.Bytes())
_, _ = w.Write(buf.Bytes())
buf.free()
}
// writeHeader writes the action and transaction ID to the provided response
// buffer.
func writeHeader(w io.Writer, txID []byte, action uint32) {
binary.Write(w, binary.BigEndian, action)
w.Write(txID)
_ = binary.Write(w, binary.BigEndian, action)
_, _ = w.Write(txID)
}

70
glide.lock generated
View file

@ -1,70 +0,0 @@
hash: fe839da75efcf365317b1b5eb04bfa15cd1db10265f4947b8aff78932bf4622e
updated: 2016-09-05T18:13:39.020799284-04:00
imports:
- name: github.com/beorn7/perks
version: 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
subpackages:
- quantile
- name: github.com/golang/protobuf
version: 1f49d83d9aa00e6ce4fc8258c71cc7786aec968a
subpackages:
- proto
- name: github.com/inconshreveable/mousetrap
version: 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
- name: github.com/julienschmidt/httprouter
version: 8c199fb6259ffc1af525cc3ad52ee60ba8359669
- name: github.com/matttproud/golang_protobuf_extensions
version: c12348ce28de40eed0136aa2b644d0ee0650e56c
subpackages:
- pbutil
- name: github.com/mendsley/gojwk
version: 4d5ec6e58103388d6cb0d7d72bc72649be4f0504
- name: github.com/prometheus/client_golang
version: c5b7fccd204277076155f10851dad72b76a49317
subpackages:
- prometheus
- name: github.com/prometheus/client_model
version: fa8ad6fec33561be4280a8f0514318c79d7f6cb6
subpackages:
- go
- name: github.com/prometheus/common
version: 616e90af75cc300730196d04f3676f838d70414f
subpackages:
- expfmt
- internal/bitbucket.org/ww/goautoneg
- model
- name: github.com/prometheus/procfs
version: abf152e5f3e97f2fafac028d2cc06c1feb87ffa5
- name: github.com/SermoDigital/jose
version: 389fea327ef076853db8fae03a0f38e30e6092ab
subpackages:
- crypto
- jws
- jwt
- name: github.com/Sirupsen/logrus
version: 4b6ea7319e214d98c938f12692336f7ca9348d6b
- name: github.com/spf13/cobra
version: 9c28e4bbd74e5c3ed7aacbc552b2cab7cfdfe744
- name: github.com/spf13/pflag
version: 103ce5cd2042f2fe629c1957abb64ab3e7f50235
- name: github.com/tylerb/graceful
version: 50a48b6e73fcc75b45e22c05b79629a67c79e938
- name: golang.org/x/sys
version: a646d33e2ee3172a661fc09bca23bb4889a41bc8
subpackages:
- unix
- name: gopkg.in/yaml.v2
version: e4d366fc3c7938e2958e662b4258c7a89e1f0e3e
testImports:
- name: github.com/davecgh/go-spew
version: 6cf5744a041a0022271cefed95ba843f6d87fd51
subpackages:
- spew
- name: github.com/pmezard/go-difflib
version: 792786c7400a136282c1664665ae0a8db921c6c2
subpackages:
- difflib
- name: github.com/stretchr/testify
version: f390dcf405f7b83c997eac1b06768bb9f44dec18
subpackages:
- assert

View file

@ -1,26 +0,0 @@
package: github.com/chihaya/chihaya
import:
- package: github.com/SermoDigital/jose
version: ~1.0.0
subpackages:
- crypto
- jws
- jwt
- package: github.com/Sirupsen/logrus
version: ~0.10.0
- package: github.com/julienschmidt/httprouter
version: ~1.1.0
- package: github.com/mendsley/gojwk
- package: github.com/prometheus/client_golang
version: ~0.8.0
subpackages:
- prometheus
- package: github.com/spf13/cobra
- package: github.com/tylerb/graceful
version: ~1.2.13
- package: gopkg.in/yaml.v2
testImport:
- package: github.com/stretchr/testify
version: ~1.1.3
subpackages:
- assert

28
go.mod Normal file
View file

@ -0,0 +1,28 @@
module github.com/chihaya/chihaya
go 1.16
require (
github.com/SermoDigital/jose v0.9.2-0.20180104203859-803625baeddc
github.com/alicebob/gopher-json v0.0.0-20180125190556-5a6b3ba71ee6 // indirect
github.com/alicebob/miniredis v2.5.0+incompatible
github.com/anacrolix/dht/v2 v2.15.1 // indirect
github.com/anacrolix/missinggo/v2 v2.5.3 // indirect
github.com/anacrolix/torrent v1.40.0
github.com/go-redsync/redsync/v4 v4.5.0
github.com/gomodule/redigo v1.8.8
github.com/julienschmidt/httprouter v1.3.0
github.com/klauspost/cpuid/v2 v2.0.9 // indirect
github.com/mendsley/gojwk v0.0.0-20141217222730-4d5ec6e58103
github.com/minio/sha256-simd v1.0.0
github.com/pkg/errors v0.9.1
github.com/prometheus/client_golang v1.11.0
github.com/prometheus/common v0.32.1 // indirect
github.com/prometheus/procfs v0.7.3 // indirect
github.com/sirupsen/logrus v1.8.1
github.com/spf13/cobra v1.3.0
github.com/stretchr/testify v1.7.0
github.com/yuin/gopher-lua v0.0.0-20191220021717-ab39c6098bdb // indirect
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect
gopkg.in/yaml.v2 v2.4.0
)

1625
go.sum Normal file

File diff suppressed because it is too large Load diff

View file

@ -5,11 +5,35 @@ package clientapproval
import (
"context"
"errors"
"fmt"
yaml "gopkg.in/yaml.v2"
"github.com/chihaya/chihaya/bittorrent"
"github.com/chihaya/chihaya/middleware"
)
// Name is the name by which this middleware is registered with Chihaya.
const Name = "client approval"
func init() {
middleware.RegisterDriver(Name, driver{})
}
var _ middleware.Driver = driver{}
type driver struct{}
func (d driver) NewHook(optionBytes []byte) (middleware.Hook, error) {
var cfg Config
err := yaml.Unmarshal(optionBytes, &cfg)
if err != nil {
return nil, fmt.Errorf("invalid options for middleware %s: %w", Name, err)
}
return NewHook(cfg)
}
// ErrClientUnapproved is the error returned when a client's PeerID is invalid.
var ErrClientUnapproved = bittorrent.ClientError("unapproved client")
@ -32,6 +56,10 @@ func NewHook(cfg Config) (middleware.Hook, error) {
unapproved: make(map[bittorrent.ClientID]struct{}),
}
if len(cfg.Whitelist) > 0 && len(cfg.Blacklist) > 0 {
return nil, fmt.Errorf("using both whitelist and blacklist is invalid")
}
for _, cidString := range cfg.Whitelist {
cidBytes := []byte(cidString)
if len(cidBytes) != 6 {

View file

@ -0,0 +1,75 @@
package clientapproval
import (
"context"
"fmt"
"testing"
"github.com/stretchr/testify/require"
"github.com/chihaya/chihaya/bittorrent"
)
var cases = []struct {
cfg Config
peerID string
approved bool
}{
// Client ID is whitelisted
{
Config{
Whitelist: []string{"010203"},
},
"01020304050607080900",
true,
},
// Client ID is not whitelisted
{
Config{
Whitelist: []string{"010203"},
},
"10203040506070809000",
false,
},
// Client ID is not blacklisted
{
Config{
Blacklist: []string{"010203"},
},
"00000000001234567890",
true,
},
// Client ID is blacklisted
{
Config{
Blacklist: []string{"123456"},
},
"12345678900000000000",
false,
},
}
func TestHandleAnnounce(t *testing.T) {
for _, tt := range cases {
t.Run(fmt.Sprintf("testing peerid %s", tt.peerID), func(t *testing.T) {
h, err := NewHook(tt.cfg)
require.Nil(t, err)
ctx := context.Background()
req := &bittorrent.AnnounceRequest{}
resp := &bittorrent.AnnounceResponse{}
peerid := bittorrent.PeerIDFromString(tt.peerID)
req.Peer.ID = peerid
nctx, err := h.HandleAnnounce(ctx, req, resp)
require.Equal(t, ctx, nctx)
if tt.approved == true {
require.NotEqual(t, err, ErrClientUnapproved)
} else {
require.Equal(t, err, ErrClientUnapproved)
}
})
}
}

View file

@ -0,0 +1,84 @@
// Package fixedpeers implements a Hook that
//appends a fixed peer to every Announce request
package fixedpeers
import (
"context"
"fmt"
"net"
"strconv"
"strings"
yaml "gopkg.in/yaml.v2"
"github.com/chihaya/chihaya/bittorrent"
"github.com/chihaya/chihaya/middleware"
)
// Name is the name by which this middleware is registered with Chihaya.
const Name = "fixed peers"
func init() {
middleware.RegisterDriver(Name, driver{})
}
var _ middleware.Driver = driver{}
type driver struct{}
func (d driver) NewHook(optionBytes []byte) (middleware.Hook, error) {
var cfg Config
err := yaml.Unmarshal(optionBytes, &cfg)
if err != nil {
return nil, fmt.Errorf("invalid options for middleware %s: %w", Name, err)
}
return NewHook(cfg)
}
type Config struct {
FixedPeers []string `yaml:"fixed_peers"`
}
type hook struct {
peers []bittorrent.Peer
}
// NewHook returns an instance of the torrent approval middleware.
func NewHook(cfg Config) (middleware.Hook, error) {
var peers []bittorrent.Peer
for _, peerString := range cfg.FixedPeers {
parts := strings.Split(peerString, ":")
port, err := strconv.Atoi(parts[1])
if err != nil {
return nil, err
}
ip := net.ParseIP(parts[0]).To4()
if ip == nil {
panic("Invalid ip4 on fixed_peers")
}
peers = append(peers,
bittorrent.Peer{
ID: bittorrent.PeerID{0},
Port: uint16(port),
IP: bittorrent.IP{IP: ip},
})
}
h := &hook{
peers: peers,
}
return h, nil
}
func (h *hook) HandleAnnounce(ctx context.Context, req *bittorrent.AnnounceRequest, resp *bittorrent.AnnounceResponse) (context.Context, error) {
for _, peer := range h.peers {
resp.IPv4Peers = append(resp.IPv4Peers, peer)
resp.Complete += 1
}
return ctx, nil
}
func (h *hook) HandleScrape(ctx context.Context, req *bittorrent.ScrapeRequest, resp *bittorrent.ScrapeResponse) (context.Context, error) {
// Scrapes don't require any protection.
return ctx, nil
}

View file

@ -0,0 +1,47 @@
package fixedpeers
import (
"context"
"encoding/hex"
"net"
"testing"
"github.com/stretchr/testify/require"
"github.com/chihaya/chihaya/bittorrent"
)
func TestAppendFixedPeer(t *testing.T) {
conf := Config{
FixedPeers: []string{"8.8.8.8:4040", "1.1.1.1:111"},
}
h, err := NewHook(conf)
require.Nil(t, err)
ctx := context.Background()
req := &bittorrent.AnnounceRequest{}
resp := &bittorrent.AnnounceResponse{}
hashbytes, err := hex.DecodeString("3000000000000000000000000000000000000000")
require.Nil(t, err)
hashinfo := bittorrent.InfoHashFromBytes(hashbytes)
req.InfoHash = hashinfo
nctx, err := h.HandleAnnounce(ctx, req, resp)
require.Equal(t, ctx, nctx)
peers := []bittorrent.Peer{
bittorrent.Peer{
ID: bittorrent.PeerID{0},
Port: 4040,
IP: bittorrent.IP{net.ParseIP("8.8.8.8"), bittorrent.IPv4},
},
bittorrent.Peer{
ID: bittorrent.PeerID{0},
Port: 111,
IP: bittorrent.IP{net.ParseIP("1.1.1.1"), bittorrent.IPv4},
},
}
require.Equal(t, peers, resp.IPv4Peers)
}

View file

@ -3,14 +3,15 @@ package middleware
import (
"context"
"errors"
"net"
"github.com/chihaya/chihaya/bittorrent"
"github.com/chihaya/chihaya/storage"
)
// Hook abstracts the concept of anything that needs to interact with a
// BitTorrent client's request and response to a BitTorrent tracker.
// PreHooks and PostHooks both use the same interface.
//
// A Hook can implement stop.Stopper if clean shutdown is required.
type Hook interface {
HandleAnnounce(context.Context, *bittorrent.AnnounceRequest, *bittorrent.AnnounceResponse) (context.Context, error)
HandleScrape(context.Context, *bittorrent.ScrapeRequest, *bittorrent.ScrapeResponse) (context.Context, error)
@ -34,14 +35,16 @@ func (h *swarmInteractionHook) HandleAnnounce(ctx context.Context, req *bittorre
}
switch {
case req.Port < 100:
return ctx, nil
case req.Event == bittorrent.Stopped:
err = h.store.DeleteSeeder(req.InfoHash, req.Peer)
if err != nil && err != storage.ErrResourceDoesNotExist {
if err != nil && !errors.Is(err, storage.ErrResourceDoesNotExist) {
return ctx, err
}
err = h.store.DeleteLeecher(req.InfoHash, req.Peer)
if err != nil && err != storage.ErrResourceDoesNotExist {
if err != nil && !errors.Is(err, storage.ErrResourceDoesNotExist) {
return ctx, err
}
case req.Event == bittorrent.Completed:
@ -67,9 +70,6 @@ func (h *swarmInteractionHook) HandleScrape(ctx context.Context, _ *bittorrent.S
return ctx, nil
}
// ErrInvalidIP indicates an invalid IP for an Announce.
var ErrInvalidIP = errors.New("invalid IP")
type skipResponseHook struct{}
// SkipResponseHookKey is a key for the context of an Announce or Scrape to
@ -97,9 +97,9 @@ func (h *responseHook) HandleAnnounce(ctx context.Context, req *bittorrent.Annou
}
// Add the Scrape data to the response.
s := h.store.ScrapeSwarm(req.InfoHash, len(req.IP) == net.IPv6len)
resp.Incomplete = s.Incomplete
resp.Complete = s.Complete
s := h.store.ScrapeSwarm(req.InfoHash, req.IP.AddressFamily)
resp.Incomplete += s.Incomplete
resp.Complete += s.Complete
err = h.appendPeers(req, resp)
return ctx, err
@ -108,7 +108,7 @@ func (h *responseHook) HandleAnnounce(ctx context.Context, req *bittorrent.Annou
func (h *responseHook) appendPeers(req *bittorrent.AnnounceRequest, resp *bittorrent.AnnounceResponse) error {
seeding := req.Left == 0
peers, err := h.store.AnnouncePeers(req.InfoHash, seeding, int(req.NumWant), req.Peer)
if err != nil && err != storage.ErrResourceDoesNotExist {
if err != nil && !errors.Is(err, storage.ErrResourceDoesNotExist) {
return err
}
@ -123,13 +123,13 @@ func (h *responseHook) appendPeers(req *bittorrent.AnnounceRequest, resp *bittor
peers = append(peers, req.Peer)
}
switch len(req.IP) {
case net.IPv4len:
resp.IPv4Peers = peers
case net.IPv6len:
resp.IPv6Peers = peers
switch req.IP.AddressFamily {
case bittorrent.IPv4:
resp.IPv4Peers = append(resp.IPv4Peers, peers...)
case bittorrent.IPv6:
resp.IPv6Peers = append(resp.IPv6Peers, peers...)
default:
panic("peer IP is not IPv4 or IPv6 length")
panic("attempted to append peer that is neither IPv4 nor IPv6")
}
return nil
@ -140,10 +140,8 @@ func (h *responseHook) HandleScrape(ctx context.Context, req *bittorrent.ScrapeR
return ctx, nil
}
v6, _ := ctx.Value(ScrapeIsIPv6Key).(bool)
for _, infoHash := range req.InfoHashes {
resp.Files[infoHash] = h.store.ScrapeSwarm(infoHash, v6)
resp.Files = append(resp.Files, h.store.ScrapeSwarm(infoHash, req.AddressFamily))
}
return ctx, nil

View file

@ -9,23 +9,47 @@ package jwt
import (
"context"
"crypto"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"net/http"
"net/url"
"strings"
"time"
jc "github.com/SermoDigital/jose/crypto"
"github.com/SermoDigital/jose/jws"
"github.com/SermoDigital/jose/jwt"
log "github.com/Sirupsen/logrus"
"github.com/mendsley/gojwk"
yaml "gopkg.in/yaml.v2"
"github.com/chihaya/chihaya/bittorrent"
"github.com/chihaya/chihaya/middleware"
"github.com/chihaya/chihaya/stopper"
"github.com/chihaya/chihaya/pkg/log"
"github.com/chihaya/chihaya/pkg/stop"
)
// Name is the name by which this middleware is registered with Chihaya.
const Name = "jwt"
func init() {
middleware.RegisterDriver(Name, driver{})
}
var _ middleware.Driver = driver{}
type driver struct{}
func (d driver) NewHook(optionBytes []byte) (middleware.Hook, error) {
var cfg Config
err := yaml.Unmarshal(optionBytes, &cfg)
if err != nil {
return nil, fmt.Errorf("invalid options for middleware %s: %w", Name, err)
}
return NewHook(cfg)
}
var (
// ErrMissingJWT is returned when a JWT is missing from a request.
ErrMissingJWT = bittorrent.ClientError("unapproved request: missing jwt")
@ -43,6 +67,16 @@ type Config struct {
JWKUpdateInterval time.Duration `yaml:"jwk_set_update_interval"`
}
// LogFields implements log.Fielder for a Config.
func (cfg Config) LogFields() log.Fields {
return log.Fields{
"issuer": cfg.Issuer,
"audience": cfg.Audience,
"JWKSetURL": cfg.JWKSetURL,
"JWKUpdateInterval": cfg.JWKUpdateInterval,
}
}
type hook struct {
cfg Config
publicKeys map[string]crypto.PublicKey
@ -51,7 +85,7 @@ type hook struct {
// NewHook returns an instance of the JWT middleware.
func NewHook(cfg Config) (middleware.Hook, error) {
log.Debugf("creating new JWT middleware with config: %#v", cfg)
log.Debug("creating new JWT middleware", cfg)
h := &hook{
cfg: cfg,
publicKeys: map[string]crypto.PublicKey{},
@ -59,8 +93,7 @@ func NewHook(cfg Config) (middleware.Hook, error) {
}
log.Debug("performing initial fetch of JWKs")
err := h.updateKeys()
if err != nil {
if err := h.updateKeys(); err != nil {
return nil, errors.New("failed to fetch initial JWK Set: " + err.Error())
}
@ -71,7 +104,7 @@ func NewHook(cfg Config) (middleware.Hook, error) {
return
case <-time.After(cfg.JWKUpdateInterval):
log.Debug("performing fetch of JWKs")
h.updateKeys()
_ = h.updateKeys()
}
}
}()
@ -82,7 +115,7 @@ func NewHook(cfg Config) (middleware.Hook, error) {
func (h *hook) updateKeys() error {
resp, err := http.Get(h.cfg.JWKSetURL)
if err != nil {
log.Errorln("failed to fetch JWK Set: " + err.Error())
log.Error("failed to fetch JWK Set", log.Err(err))
return err
}
@ -90,7 +123,7 @@ func (h *hook) updateKeys() error {
err = json.NewDecoder(resp.Body).Decode(&parsedJWKs)
if err != nil {
resp.Body.Close()
log.Errorln("failed to decode JWK JSON: " + err.Error())
log.Error("failed to decode JWK JSON", log.Err(err))
return err
}
resp.Body.Close()
@ -99,7 +132,7 @@ func (h *hook) updateKeys() error {
for _, parsedJWK := range parsedJWKs.Keys {
publicKey, err := parsedJWK.DecodePublicKey()
if err != nil {
log.Errorln("failed to decode JWK into public key: " + err.Error())
log.Error("failed to decode JWK into public key", log.Err(err))
return err
}
keys[parsedJWK.Kid] = publicKey
@ -110,19 +143,19 @@ func (h *hook) updateKeys() error {
return nil
}
func (h *hook) Stop() <-chan error {
func (h *hook) Stop() stop.Result {
log.Debug("attempting to shutdown JWT middleware")
select {
case <-h.closing:
return stopper.AlreadyStopped
return stop.AlreadyStopped
default:
}
c := make(chan error)
c := make(stop.Channel)
go func() {
close(h.closing)
close(c)
c.Done()
}()
return c
return c.Result()
}
func (h *hook) HandleAnnounce(ctx context.Context, req *bittorrent.AnnounceRequest, resp *bittorrent.AnnounceResponse) (context.Context, error) {
@ -155,52 +188,64 @@ func validateJWT(ih bittorrent.InfoHash, jwtBytes []byte, cfgIss, cfgAud string,
claims := parsedJWT.Claims()
if iss, ok := claims.Issuer(); !ok || iss != cfgIss {
log.Debug("unequal or missing issuer when validating JWT", log.Fields{
"exists": ok,
"claim": iss,
"config": cfgIss,
})
return jwt.ErrInvalidISSClaim
}
if aud, ok := claims.Audience(); !ok || !validAudience(aud, cfgAud) {
if auds, ok := claims.Audience(); !ok || !in(cfgAud, auds) {
log.Debug("unequal or missing audience when validating JWT", log.Fields{
"exists": ok,
"claim": strings.Join(auds, ","),
"config": cfgAud,
})
return jwt.ErrInvalidAUDClaim
}
if ihClaim, ok := claims.Get("infohash").(string); !ok || !validInfoHash(ihClaim, ih) {
ihHex := hex.EncodeToString(ih[:])
if ihClaim, ok := claims.Get("infohash").(string); !ok || ihClaim != ihHex {
log.Debug("unequal or missing infohash when validating JWT", log.Fields{
"exists": ok,
"claim": ihClaim,
"request": ihHex,
})
return errors.New("claim \"infohash\" is invalid")
}
parsedJWS := parsedJWT.(jws.JWS)
kid, ok := parsedJWS.Protected().Get("kid").(string)
if !ok {
log.Debug("missing kid when validating JWT", log.Fields{
"exists": ok,
"claim": kid,
})
return errors.New("invalid kid")
}
publicKey, ok := publicKeys[kid]
if !ok {
log.Debug("missing public key forkid when validating JWT", log.Fields{
"kid": kid,
})
return errors.New("signed by unknown kid")
}
return parsedJWS.Verify(publicKey, jc.SigningMethodRS256)
}
func validAudience(aud []string, cfgAud string) bool {
for _, a := range aud {
if a == cfgAud {
return true
}
}
return false
}
func validInfoHash(claim string, ih bittorrent.InfoHash) bool {
if len(claim) == 20 && bittorrent.InfoHashFromString(claim) == ih {
return true
}
unescapedClaim, err := url.QueryUnescape(claim)
err = parsedJWS.Verify(publicKey, jc.SigningMethodRS256)
if err != nil {
return false
log.Debug("failed to verify signature of JWT", log.Err(err))
return err
}
if len(unescapedClaim) == 20 && bittorrent.InfoHashFromString(unescapedClaim) == ih {
return nil
}
func in(x string, xs []string) bool {
for _, y := range xs {
if x == y {
return true
}
}
return false
}

125
middleware/logic.go Normal file
View file

@ -0,0 +1,125 @@
package middleware
import (
"context"
"time"
"github.com/chihaya/chihaya/bittorrent"
"github.com/chihaya/chihaya/frontend"
"github.com/chihaya/chihaya/pkg/log"
"github.com/chihaya/chihaya/pkg/stop"
"github.com/chihaya/chihaya/storage"
)
// ResponseConfig holds the configuration used for the actual response.
//
// TODO(jzelinskie): Evaluate whether we would like to make this optional.
// We can make Chihaya extensible enough that you can program a new response
// generator at the cost of making it possible for users to create config that
// won't compose a functional tracker.
type ResponseConfig struct {
AnnounceInterval time.Duration `yaml:"announce_interval"`
MinAnnounceInterval time.Duration `yaml:"min_announce_interval"`
}
var _ frontend.TrackerLogic = &Logic{}
// NewLogic creates a new instance of a TrackerLogic that executes the provided
// middleware hooks.
func NewLogic(cfg ResponseConfig, peerStore storage.PeerStore, preHooks, postHooks []Hook) *Logic {
return &Logic{
announceInterval: cfg.AnnounceInterval,
minAnnounceInterval: cfg.MinAnnounceInterval,
peerStore: peerStore,
preHooks: append(preHooks, &responseHook{store: peerStore}),
postHooks: append(postHooks, &swarmInteractionHook{store: peerStore}),
}
}
// Logic is an implementation of the TrackerLogic that functions by
// executing a series of middleware hooks.
type Logic struct {
announceInterval time.Duration
minAnnounceInterval time.Duration
peerStore storage.PeerStore
preHooks []Hook
postHooks []Hook
}
// HandleAnnounce generates a response for an Announce.
func (l *Logic) HandleAnnounce(ctx context.Context, req *bittorrent.AnnounceRequest) (_ context.Context, resp *bittorrent.AnnounceResponse, err error) {
resp = &bittorrent.AnnounceResponse{
Interval: l.announceInterval,
MinInterval: l.minAnnounceInterval,
Compact: req.Compact,
}
for _, h := range l.preHooks {
if ctx, err = h.HandleAnnounce(ctx, req, resp); err != nil {
return nil, nil, err
}
}
log.Debug("generated announce response", resp)
return ctx, resp, nil
}
// AfterAnnounce does something with the results of an Announce after it has
// been completed.
func (l *Logic) AfterAnnounce(ctx context.Context, req *bittorrent.AnnounceRequest, resp *bittorrent.AnnounceResponse) {
var err error
for _, h := range l.postHooks {
if ctx, err = h.HandleAnnounce(ctx, req, resp); err != nil {
log.Error("post-announce hooks failed", log.Err(err))
return
}
}
}
// HandleScrape generates a response for a Scrape.
func (l *Logic) HandleScrape(ctx context.Context, req *bittorrent.ScrapeRequest) (_ context.Context, resp *bittorrent.ScrapeResponse, err error) {
resp = &bittorrent.ScrapeResponse{
Files: make([]bittorrent.Scrape, 0, len(req.InfoHashes)),
}
for _, h := range l.preHooks {
if ctx, err = h.HandleScrape(ctx, req, resp); err != nil {
return nil, nil, err
}
}
log.Debug("generated scrape response", resp)
return ctx, resp, nil
}
// AfterScrape does something with the results of a Scrape after it has been
// completed.
func (l *Logic) AfterScrape(ctx context.Context, req *bittorrent.ScrapeRequest, resp *bittorrent.ScrapeResponse) {
var err error
for _, h := range l.postHooks {
if ctx, err = h.HandleScrape(ctx, req, resp); err != nil {
log.Error("post-scrape hooks failed", log.Err(err))
return
}
}
}
// Stop stops the Logic.
//
// This stops any hooks that implement stop.Stopper.
func (l *Logic) Stop() stop.Result {
stopGroup := stop.NewGroup()
for _, hook := range l.preHooks {
stoppable, ok := hook.(stop.Stopper)
if ok {
stopGroup.Add(stoppable)
}
}
for _, hook := range l.postHooks {
stoppable, ok := hook.(stop.Stopper)
if ok {
stopGroup.Add(stoppable)
}
}
return stopGroup.Stop()
}

83
middleware/logic_test.go Normal file
View file

@ -0,0 +1,83 @@
package middleware
import (
"context"
"fmt"
"net"
"testing"
"github.com/stretchr/testify/require"
"github.com/chihaya/chihaya/bittorrent"
)
// nopHook is a Hook to measure the overhead of a no-operation Hook through
// benchmarks.
type nopHook struct{}
func (h *nopHook) HandleAnnounce(ctx context.Context, req *bittorrent.AnnounceRequest, resp *bittorrent.AnnounceResponse) (context.Context, error) {
return ctx, nil
}
func (h *nopHook) HandleScrape(ctx context.Context, req *bittorrent.ScrapeRequest, resp *bittorrent.ScrapeResponse) (context.Context, error) {
return ctx, nil
}
type hookList []Hook
func (hooks hookList) handleAnnounce(ctx context.Context, req *bittorrent.AnnounceRequest) (resp *bittorrent.AnnounceResponse, err error) {
resp = &bittorrent.AnnounceResponse{
Interval: 60,
MinInterval: 60,
Compact: true,
}
for _, h := range []Hook(hooks) {
if ctx, err = h.HandleAnnounce(ctx, req, resp); err != nil {
return nil, err
}
}
return resp, nil
}
func benchHookListV4(b *testing.B, hooks hookList) {
req := &bittorrent.AnnounceRequest{Peer: bittorrent.Peer{IP: bittorrent.IP{IP: net.ParseIP("1.2.3.4"), AddressFamily: bittorrent.IPv4}}}
benchHookList(b, hooks, req)
}
func benchHookListV6(b *testing.B, hooks hookList) {
req := &bittorrent.AnnounceRequest{Peer: bittorrent.Peer{IP: bittorrent.IP{IP: net.ParseIP("fc00::0001"), AddressFamily: bittorrent.IPv6}}}
benchHookList(b, hooks, req)
}
func benchHookList(b *testing.B, hooks hookList, req *bittorrent.AnnounceRequest) {
ctx := context.Background()
b.ResetTimer()
for i := 0; i < b.N; i++ {
resp, err := hooks.handleAnnounce(ctx, req)
require.Nil(b, err)
require.NotNil(b, resp)
}
}
func BenchmarkHookOverhead(b *testing.B) {
b.Run("none-v4", func(b *testing.B) {
benchHookListV4(b, hookList{})
})
b.Run("none-v6", func(b *testing.B) {
benchHookListV6(b, hookList{})
})
var nopHooks hookList
for i := 1; i < 4; i++ {
nopHooks = append(nopHooks, &nopHook{})
b.Run(fmt.Sprintf("%dnop-v4", i), func(b *testing.B) {
benchHookListV4(b, nopHooks)
})
b.Run(fmt.Sprintf("%dnop-v6", i), func(b *testing.B) {
benchHookListV6(b, nopHooks)
})
}
}

View file

@ -3,117 +3,92 @@
package middleware
import (
"context"
"time"
"errors"
"sync"
log "github.com/Sirupsen/logrus"
"github.com/chihaya/chihaya/bittorrent"
"github.com/chihaya/chihaya/frontend"
"github.com/chihaya/chihaya/stopper"
"github.com/chihaya/chihaya/storage"
yaml "gopkg.in/yaml.v2"
)
// Config holds the configuration common across all middleware.
type Config struct {
AnnounceInterval time.Duration `yaml:"announce_interval"`
}
var (
driversM sync.RWMutex
drivers = make(map[string]Driver)
var _ frontend.TrackerLogic = &Logic{}
// ErrDriverDoesNotExist is the error returned by NewMiddleware when a
// middleware driver with that name does not exist.
ErrDriverDoesNotExist = errors.New("middleware driver with that name does not exist")
)
// NewLogic creates a new instance of a TrackerLogic that executes the provided
// middleware hooks.
func NewLogic(cfg Config, peerStore storage.PeerStore, preHooks, postHooks []Hook) *Logic {
l := &Logic{
announceInterval: cfg.AnnounceInterval,
peerStore: peerStore,
preHooks: append(preHooks, &responseHook{store: peerStore}),
postHooks: append(postHooks, &swarmInteractionHook{store: peerStore}),
}
return l
}
// Logic is an implementation of the TrackerLogic that functions by
// executing a series of middleware hooks.
type Logic struct {
announceInterval time.Duration
peerStore storage.PeerStore
preHooks []Hook
postHooks []Hook
}
// HandleAnnounce generates a response for an Announce.
func (l *Logic) HandleAnnounce(ctx context.Context, req *bittorrent.AnnounceRequest) (resp *bittorrent.AnnounceResponse, err error) {
resp = &bittorrent.AnnounceResponse{
Interval: l.announceInterval,
MinInterval: l.announceInterval,
Compact: req.Compact,
}
for _, h := range l.preHooks {
if ctx, err = h.HandleAnnounce(ctx, req, resp); err != nil {
return nil, err
}
}
return resp, nil
}
// AfterAnnounce does something with the results of an Announce after it has
// been completed.
func (l *Logic) AfterAnnounce(ctx context.Context, req *bittorrent.AnnounceRequest, resp *bittorrent.AnnounceResponse) {
var err error
for _, h := range l.postHooks {
if ctx, err = h.HandleAnnounce(ctx, req, resp); err != nil {
log.Errorln("chihaya: post-announce hooks failed:", err.Error())
return
}
}
}
// HandleScrape generates a response for a Scrape.
func (l *Logic) HandleScrape(ctx context.Context, req *bittorrent.ScrapeRequest) (resp *bittorrent.ScrapeResponse, err error) {
resp = &bittorrent.ScrapeResponse{
Files: make(map[bittorrent.InfoHash]bittorrent.Scrape),
}
for _, h := range l.preHooks {
if ctx, err = h.HandleScrape(ctx, req, resp); err != nil {
return nil, err
}
}
return resp, nil
}
// AfterScrape does something with the results of a Scrape after it has been
// completed.
func (l *Logic) AfterScrape(ctx context.Context, req *bittorrent.ScrapeRequest, resp *bittorrent.ScrapeResponse) {
var err error
for _, h := range l.postHooks {
if ctx, err = h.HandleScrape(ctx, req, resp); err != nil {
log.Errorln("chihaya: post-scrape hooks failed:", err.Error())
return
}
}
}
// Stop stops the Logic.
// Driver is the interface used to initialize a new type of middleware.
//
// This stops any hooks that implement stopper.Stopper.
func (l *Logic) Stop() []error {
stopGroup := stopper.NewStopGroup()
for _, hook := range l.preHooks {
stoppable, ok := hook.(stopper.Stopper)
if ok {
stopGroup.Add(stoppable)
}
// The options parameter is YAML encoded bytes that should be unmarshalled into
// the hook's custom configuration.
type Driver interface {
NewHook(options []byte) (Hook, error)
}
for _, hook := range l.postHooks {
stoppable, ok := hook.(stopper.Stopper)
if ok {
stopGroup.Add(stoppable)
// RegisterDriver makes a Driver available by the provided name.
//
// If called twice with the same name, the name is blank, or if the provided
// Driver is nil, this function panics.
func RegisterDriver(name string, d Driver) {
if name == "" {
panic("middleware: could not register a Driver with an empty name")
}
if d == nil {
panic("middleware: could not register a nil Driver")
}
return stopGroup.Stop()
driversM.Lock()
defer driversM.Unlock()
if _, dup := drivers[name]; dup {
panic("middleware: RegisterDriver called twice for " + name)
}
drivers[name] = d
}
// New attempts to initialize a new middleware instance from the
// list of registered Drivers.
//
// If a driver does not exist, returns ErrDriverDoesNotExist.
func New(name string, optionBytes []byte) (Hook, error) {
driversM.RLock()
defer driversM.RUnlock()
var d Driver
d, ok := drivers[name]
if !ok {
return nil, ErrDriverDoesNotExist
}
return d.NewHook(optionBytes)
}
// HookConfig is the generic configuration format used for all registered Hooks.
type HookConfig struct {
Name string `yaml:"name"`
Options map[string]interface{} `yaml:"options"`
}
// HooksFromHookConfigs is a utility function for initializing Hooks in bulk.
func HooksFromHookConfigs(cfgs []HookConfig) (hooks []Hook, err error) {
for _, cfg := range cfgs {
// Marshal the options back into bytes.
var optionBytes []byte
optionBytes, err = yaml.Marshal(cfg.Options)
if err != nil {
return
}
var h Hook
h, err = New(cfg.Name, optionBytes)
if err != nil {
return
}
hooks = append(hooks, h)
}
return
}

View file

@ -0,0 +1,17 @@
package random
import (
"encoding/binary"
"github.com/chihaya/chihaya/bittorrent"
)
// DeriveEntropyFromRequest generates 2*64 bits of pseudo random state from an
// AnnounceRequest.
//
// Calling DeriveEntropyFromRequest multiple times yields the same values.
func DeriveEntropyFromRequest(req *bittorrent.AnnounceRequest) (uint64, uint64) {
v0 := binary.BigEndian.Uint64(req.InfoHash[:8]) + binary.BigEndian.Uint64(req.InfoHash[8:16])
v1 := binary.BigEndian.Uint64(req.Peer.ID[:8]) + binary.BigEndian.Uint64(req.Peer.ID[8:16])
return v0, v1
}

View file

@ -0,0 +1,28 @@
// Package random implements the XORShift PRNG and a way to derive random state
// from an AnnounceRequest.
package random
// GenerateAndAdvance applies XORShift128Plus on s0 and s1, returning
// the new states newS0, newS1 and a pseudo-random number v.
func GenerateAndAdvance(s0, s1 uint64) (v, newS0, newS1 uint64) {
v = s0 + s1
newS0 = s1
s0 ^= (s0 << 23)
newS1 = s0 ^ s1 ^ (s0 >> 18) ^ (s1 >> 5)
return
}
// Intn generates an int k that satisfies k >= 0 && k < n.
// n must be > 0.
// It returns the generated k and the new state of the generator.
func Intn(s0, s1 uint64, n int) (int, uint64, uint64) {
if n <= 0 {
panic("invalid n <= 0")
}
v, newS0, newS1 := GenerateAndAdvance(s0, s1)
k := int(v)
if k < 0 {
k = -k
}
return k % n, newS0, newS1
}

View file

@ -0,0 +1,38 @@
package random
import (
"math/rand"
"testing"
"time"
"github.com/stretchr/testify/require"
)
func TestIntn(t *testing.T) {
rand.Seed(time.Now().UnixNano())
s0, s1 := rand.Uint64(), rand.Uint64()
var k int
for i := 0; i < 10000; i++ {
k, s0, s1 = Intn(s0, s1, 10)
require.True(t, k >= 0, "Intn() must be >= 0")
require.True(t, k < 10, "Intn(k) must be < k")
}
}
func BenchmarkAdvanceXORShift128Plus(b *testing.B) {
s0, s1 := rand.Uint64(), rand.Uint64()
var v uint64
for i := 0; i < b.N; i++ {
v, s0, s1 = GenerateAndAdvance(s0, s1)
}
_, _, _ = v, s0, s1
}
func BenchmarkIntn(b *testing.B) {
s0, s1 := rand.Uint64(), rand.Uint64()
var v int
for i := 0; i < b.N; i++ {
v, s0, s1 = Intn(s0, s1, 1000)
}
_, _, _ = v, s0, s1
}

View file

@ -0,0 +1,109 @@
// Package torrentapproval implements a Hook that fails an Announce based on a
// whitelist or blacklist of torrent hash.
package torrentapproval
import (
"context"
"encoding/hex"
"fmt"
yaml "gopkg.in/yaml.v2"
"github.com/chihaya/chihaya/bittorrent"
"github.com/chihaya/chihaya/middleware"
)
// Name is the name by which this middleware is registered with Chihaya.
const Name = "torrent approval"
func init() {
middleware.RegisterDriver(Name, driver{})
}
var _ middleware.Driver = driver{}
type driver struct{}
func (d driver) NewHook(optionBytes []byte) (middleware.Hook, error) {
var cfg Config
err := yaml.Unmarshal(optionBytes, &cfg)
if err != nil {
return nil, fmt.Errorf("invalid options for middleware %s: %w", Name, err)
}
return NewHook(cfg)
}
// ErrTorrentUnapproved is the error returned when a torrent hash is invalid.
var ErrTorrentUnapproved = bittorrent.ClientError("unapproved torrent")
// Config represents all the values required by this middleware to validate
// torrents based on their hash value.
type Config struct {
Whitelist []string `yaml:"whitelist"`
Blacklist []string `yaml:"blacklist"`
}
type hook struct {
approved map[bittorrent.InfoHash]struct{}
unapproved map[bittorrent.InfoHash]struct{}
}
// NewHook returns an instance of the torrent approval middleware.
func NewHook(cfg Config) (middleware.Hook, error) {
h := &hook{
approved: make(map[bittorrent.InfoHash]struct{}),
unapproved: make(map[bittorrent.InfoHash]struct{}),
}
if len(cfg.Whitelist) > 0 && len(cfg.Blacklist) > 0 {
return nil, fmt.Errorf("using both whitelist and blacklist is invalid")
}
for _, hashString := range cfg.Whitelist {
hashinfo, err := hex.DecodeString(hashString)
if err != nil {
return nil, fmt.Errorf("whitelist : invalid hash %s", hashString)
}
if len(hashinfo) != 20 {
return nil, fmt.Errorf("whitelist : hash %s is not 20 byes", hashString)
}
h.approved[bittorrent.InfoHashFromBytes(hashinfo)] = struct{}{}
}
for _, hashString := range cfg.Blacklist {
hashinfo, err := hex.DecodeString(hashString)
if err != nil {
return nil, fmt.Errorf("blacklist : invalid hash %s", hashString)
}
if len(hashinfo) != 20 {
return nil, fmt.Errorf("blacklist : hash %s is not 20 byes", hashString)
}
h.unapproved[bittorrent.InfoHashFromBytes(hashinfo)] = struct{}{}
}
return h, nil
}
func (h *hook) HandleAnnounce(ctx context.Context, req *bittorrent.AnnounceRequest, resp *bittorrent.AnnounceResponse) (context.Context, error) {
infohash := req.InfoHash
if len(h.approved) > 0 {
if _, found := h.approved[infohash]; !found {
return ctx, ErrTorrentUnapproved
}
}
if len(h.unapproved) > 0 {
if _, found := h.unapproved[infohash]; found {
return ctx, ErrTorrentUnapproved
}
}
return ctx, nil
}
func (h *hook) HandleScrape(ctx context.Context, req *bittorrent.ScrapeRequest, resp *bittorrent.ScrapeResponse) (context.Context, error) {
// Scrapes don't require any protection.
return ctx, nil
}

View file

@ -0,0 +1,79 @@
package torrentapproval
import (
"context"
"encoding/hex"
"fmt"
"testing"
"github.com/stretchr/testify/require"
"github.com/chihaya/chihaya/bittorrent"
)
var cases = []struct {
cfg Config
ih string
approved bool
}{
// Infohash is whitelisted
{
Config{
Whitelist: []string{"3532cf2d327fad8448c075b4cb42c8136964a435"},
},
"3532cf2d327fad8448c075b4cb42c8136964a435",
true,
},
// Infohash is not whitelisted
{
Config{
Whitelist: []string{"3532cf2d327fad8448c075b4cb42c8136964a435"},
},
"4532cf2d327fad8448c075b4cb42c8136964a435",
false,
},
// Infohash is not blacklisted
{
Config{
Blacklist: []string{"3532cf2d327fad8448c075b4cb42c8136964a435"},
},
"4532cf2d327fad8448c075b4cb42c8136964a435",
true,
},
// Infohash is blacklisted
{
Config{
Blacklist: []string{"3532cf2d327fad8448c075b4cb42c8136964a435"},
},
"3532cf2d327fad8448c075b4cb42c8136964a435",
false,
},
}
func TestHandleAnnounce(t *testing.T) {
for _, tt := range cases {
t.Run(fmt.Sprintf("testing hash %s", tt.ih), func(t *testing.T) {
h, err := NewHook(tt.cfg)
require.Nil(t, err)
ctx := context.Background()
req := &bittorrent.AnnounceRequest{}
resp := &bittorrent.AnnounceResponse{}
hashbytes, err := hex.DecodeString(tt.ih)
require.Nil(t, err)
hashinfo := bittorrent.InfoHashFromBytes(hashbytes)
req.InfoHash = hashinfo
nctx, err := h.HandleAnnounce(ctx, req, resp)
require.Equal(t, ctx, nctx)
if tt.approved == true {
require.NotEqual(t, err, ErrTorrentUnapproved)
} else {
require.Equal(t, err, ErrTorrentUnapproved)
}
})
}
}

View file

@ -0,0 +1,115 @@
package varinterval
import (
"context"
"errors"
"fmt"
"sync"
"time"
yaml "gopkg.in/yaml.v2"
"github.com/chihaya/chihaya/bittorrent"
"github.com/chihaya/chihaya/middleware"
"github.com/chihaya/chihaya/middleware/pkg/random"
)
// Name is the name by which this middleware is registered with Chihaya.
const Name = "interval variation"
func init() {
middleware.RegisterDriver(Name, driver{})
}
var _ middleware.Driver = driver{}
type driver struct{}
func (d driver) NewHook(optionBytes []byte) (middleware.Hook, error) {
var cfg Config
err := yaml.Unmarshal(optionBytes, &cfg)
if err != nil {
return nil, fmt.Errorf("invalid options for middleware %s: %w", Name, err)
}
return NewHook(cfg)
}
// ErrInvalidModifyResponseProbability is returned for a config with an invalid
// ModifyResponseProbability.
var ErrInvalidModifyResponseProbability = errors.New("invalid modify_response_probability")
// ErrInvalidMaxIncreaseDelta is returned for a config with an invalid
// MaxIncreaseDelta.
var ErrInvalidMaxIncreaseDelta = errors.New("invalid max_increase_delta")
// Config represents the configuration for the varinterval middleware.
type Config struct {
// ModifyResponseProbability is the probability by which a response will
// be modified.
ModifyResponseProbability float32 `yaml:"modify_response_probability"`
// MaxIncreaseDelta is the amount of seconds that will be added at most.
MaxIncreaseDelta int `yaml:"max_increase_delta"`
// ModifyMinInterval specifies whether min_interval should be increased
// as well.
ModifyMinInterval bool `yaml:"modify_min_interval"`
}
func checkConfig(cfg Config) error {
if cfg.ModifyResponseProbability <= 0 || cfg.ModifyResponseProbability > 1 {
return ErrInvalidModifyResponseProbability
}
if cfg.MaxIncreaseDelta <= 0 {
return ErrInvalidMaxIncreaseDelta
}
return nil
}
type hook struct {
cfg Config
sync.Mutex
}
// NewHook creates a middleware to randomly modify the announce interval from
// the given config.
func NewHook(cfg Config) (middleware.Hook, error) {
if err := checkConfig(cfg); err != nil {
return nil, err
}
h := &hook{
cfg: cfg,
}
return h, nil
}
func (h *hook) HandleAnnounce(ctx context.Context, req *bittorrent.AnnounceRequest, resp *bittorrent.AnnounceResponse) (context.Context, error) {
s0, s1 := random.DeriveEntropyFromRequest(req)
// Generate a probability p < 1.0.
v, s0, s1 := random.Intn(s0, s1, 1<<24)
p := float32(v) / (1 << 24)
if h.cfg.ModifyResponseProbability == 1 || p < h.cfg.ModifyResponseProbability {
// Generate the increase delta.
v, _, _ = random.Intn(s0, s1, h.cfg.MaxIncreaseDelta)
deltaDuration := time.Duration(v+1) * time.Second
resp.Interval += deltaDuration
if h.cfg.ModifyMinInterval {
resp.MinInterval += deltaDuration
}
return ctx, nil
}
return ctx, nil
}
func (h *hook) HandleScrape(ctx context.Context, req *bittorrent.ScrapeRequest, resp *bittorrent.ScrapeResponse) (context.Context, error) {
// Scrapes are not altered.
return ctx, nil
}

View file

@ -0,0 +1,61 @@
package varinterval
import (
"context"
"fmt"
"testing"
"github.com/stretchr/testify/require"
"github.com/chihaya/chihaya/bittorrent"
)
var configTests = []struct {
cfg Config
expected error
}{
{
cfg: Config{0.5, 60, true},
expected: nil,
}, {
cfg: Config{1.0, 60, true},
expected: nil,
}, {
cfg: Config{0.0, 60, true},
expected: ErrInvalidModifyResponseProbability,
}, {
cfg: Config{1.1, 60, true},
expected: ErrInvalidModifyResponseProbability,
}, {
cfg: Config{0.5, 0, true},
expected: ErrInvalidMaxIncreaseDelta,
}, {
cfg: Config{0.5, -10, true},
expected: ErrInvalidMaxIncreaseDelta,
},
}
func TestCheckConfig(t *testing.T) {
for _, tt := range configTests {
t.Run(fmt.Sprintf("%#v", tt.cfg), func(t *testing.T) {
got := checkConfig(tt.cfg)
require.Equal(t, tt.expected, got, "", tt.cfg)
})
}
}
func TestHandleAnnounce(t *testing.T) {
h, err := NewHook(Config{1.0, 10, true})
require.Nil(t, err)
require.NotNil(t, h)
ctx := context.Background()
req := &bittorrent.AnnounceRequest{}
resp := &bittorrent.AnnounceResponse{}
nCtx, err := h.HandleAnnounce(ctx, req, resp)
require.Nil(t, err)
require.Equal(t, ctx, nCtx)
require.True(t, resp.Interval > 0, "interval should have been increased")
require.True(t, resp.MinInterval > 0, "min_interval should have been increased")
}

134
pkg/log/log.go Normal file
View file

@ -0,0 +1,134 @@
// Package log adds a thin wrapper around logrus to improve non-debug logging
// performance.
package log
import (
"fmt"
"io"
"github.com/sirupsen/logrus"
)
var (
l = logrus.New()
debug = false
)
// SetDebug controls debug logging.
func SetDebug(to bool) {
debug = to
l.Level = logrus.DebugLevel
}
// SetFormatter sets the formatter.
func SetFormatter(to logrus.Formatter) {
l.Formatter = to
}
// SetOutput sets the output.
func SetOutput(to io.Writer) {
l.Out = to
}
// Fields is a map of logging fields.
type Fields map[string]interface{}
// LogFields implements Fielder for Fields.
func (f Fields) LogFields() Fields {
return f
}
// A Fielder provides Fields via the LogFields method.
type Fielder interface {
LogFields() Fields
}
// err is a wrapper around an error.
type err struct {
e error
}
// LogFields provides Fields for logging.
func (e err) LogFields() Fields {
return Fields{
"error": e.e.Error(),
"type": fmt.Sprintf("%T", e.e),
}
}
// Err is a wrapper around errors that implements Fielder.
func Err(e error) Fielder {
return err{e}
}
// mergeFielders merges the Fields of multiple Fielders.
// Fields from the first Fielder will be used unchanged, Fields from subsequent
// Fielders will be prefixed with "%d.", starting from 1.
//
// must be called with len(fielders) > 0
func mergeFielders(fielders ...Fielder) logrus.Fields {
if fielders[0] == nil {
return nil
}
fields := fielders[0].LogFields()
for i := 1; i < len(fielders); i++ {
if fielders[i] == nil {
continue
}
prefix := fmt.Sprint(i, ".")
ff := fielders[i].LogFields()
for k, v := range ff {
fields[prefix+k] = v
}
}
return logrus.Fields(fields)
}
// Debug logs at the debug level if debug logging is enabled.
func Debug(v interface{}, fielders ...Fielder) {
if debug {
if len(fielders) != 0 {
l.WithFields(mergeFielders(fielders...)).Debug(v)
} else {
l.Debug(v)
}
}
}
// Info logs at the info level.
func Info(v interface{}, fielders ...Fielder) {
if len(fielders) != 0 {
l.WithFields(mergeFielders(fielders...)).Info(v)
} else {
l.Info(v)
}
}
// Warn logs at the warning level.
func Warn(v interface{}, fielders ...Fielder) {
if len(fielders) != 0 {
l.WithFields(mergeFielders(fielders...)).Warn(v)
} else {
l.Warn(v)
}
}
// Error logs at the error level.
func Error(v interface{}, fielders ...Fielder) {
if len(fielders) != 0 {
l.WithFields(mergeFielders(fielders...)).Error(v)
} else {
l.Error(v)
}
}
// Fatal logs at the fatal level and exits with a status code != 0.
func Fatal(v interface{}, fielders ...Fielder) {
if len(fielders) != 0 {
l.WithFields(mergeFielders(fielders...)).Fatal(v)
} else {
l.Fatal(v)
}
}

59
pkg/metrics/server.go Normal file
View file

@ -0,0 +1,59 @@
// Package metrics implements a standalone HTTP server for serving pprof
// profiles and Prometheus metrics.
package metrics
import (
"context"
"errors"
"net/http"
"net/http/pprof"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/chihaya/chihaya/pkg/log"
"github.com/chihaya/chihaya/pkg/stop"
)
// Server represents a standalone HTTP server for serving a Prometheus metrics
// endpoint.
type Server struct {
srv *http.Server
}
// Stop shuts down the server.
func (s *Server) Stop() stop.Result {
c := make(stop.Channel)
go func() {
c.Done(s.srv.Shutdown(context.Background()))
}()
return c.Result()
}
// NewServer creates a new instance of a Prometheus server that asynchronously
// serves requests.
func NewServer(addr string) *Server {
mux := http.NewServeMux()
mux.Handle("/metrics", promhttp.Handler())
mux.HandleFunc("/debug/pprof/", pprof.Index)
mux.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline)
mux.HandleFunc("/debug/pprof/profile", pprof.Profile)
mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
mux.HandleFunc("/debug/pprof/trace", pprof.Trace)
s := &Server{
srv: &http.Server{
Addr: addr,
Handler: mux,
},
}
go func() {
if err := s.srv.ListenAndServe(); !errors.Is(err, http.ErrServerClosed) {
log.Fatal("failed while serving prometheus", log.Err(err))
}
}()
return s
}

125
pkg/stop/stop.go Normal file
View file

@ -0,0 +1,125 @@
// Package stop implements a pattern for shutting down a group of processes.
package stop
import (
"sync"
)
// Channel is used to return zero or more errors asynchronously. Call Done()
// once to pass errors to the Channel.
type Channel chan []error
// Result is a receive-only version of Channel. Call Wait() once to receive any
// returned errors.
type Result <-chan []error
// Done adds zero or more errors to the Channel and closes it, indicating the
// caller has finished stopping. It should be called exactly once.
func (ch Channel) Done(errs ...error) {
if len(errs) > 0 && errs[0] != nil {
ch <- errs
}
close(ch)
}
// Result converts a Channel to a Result.
func (ch Channel) Result() <-chan []error {
return ch
}
// Wait blocks until Done() is called on the underlying Channel and returns any
// errors. It should be called exactly once.
func (r Result) Wait() []error {
return <-r
}
// AlreadyStopped is a closed error channel to be used by Funcs when
// an element was already stopped.
var AlreadyStopped Result
// AlreadyStoppedFunc is a Func that returns AlreadyStopped.
var AlreadyStoppedFunc = func() Result { return AlreadyStopped }
func init() {
closeMe := make(Channel)
close(closeMe)
AlreadyStopped = closeMe.Result()
}
// Stopper is an interface that allows a clean shutdown.
type Stopper interface {
// Stop returns a channel that indicates whether the stop was
// successful.
//
// The channel can either return one error or be closed.
// Closing the channel signals a clean shutdown.
// Stop() should return immediately and perform the actual shutdown in a
// separate goroutine.
Stop() Result
}
// Func is a function that can be used to provide a clean shutdown.
type Func func() Result
// Group is a collection of Stoppers that can be stopped all at once.
type Group struct {
stoppables []Func
sync.Mutex
}
// NewGroup allocates a new Group.
func NewGroup() *Group {
return &Group{
stoppables: make([]Func, 0),
}
}
// Add appends a Stopper to the Group.
func (cg *Group) Add(toAdd Stopper) {
cg.Lock()
defer cg.Unlock()
cg.stoppables = append(cg.stoppables, toAdd.Stop)
}
// AddFunc appends a Func to the Group.
func (cg *Group) AddFunc(toAddFunc Func) {
cg.Lock()
defer cg.Unlock()
cg.stoppables = append(cg.stoppables, toAddFunc)
}
// Stop stops all members of the Group.
//
// Stopping will be done in a concurrent fashion.
// The slice of errors returned contains all errors returned by stopping the
// members.
func (cg *Group) Stop() Result {
cg.Lock()
defer cg.Unlock()
whenDone := make(Channel)
waitChannels := make([]Result, 0, len(cg.stoppables))
for _, toStop := range cg.stoppables {
waitFor := toStop()
if waitFor == nil {
panic("received a nil chan from Stop")
}
waitChannels = append(waitChannels, waitFor)
}
go func() {
var errors []error
for _, waitForMe := range waitChannels {
childErrors := waitForMe.Wait()
if len(childErrors) > 0 {
errors = append(errors, childErrors...)
}
}
whenDone.Done(errors...)
}()
return whenDone.Result()
}

127
pkg/timecache/timecache.go Normal file
View file

@ -0,0 +1,127 @@
// Package timecache provides a cache for the system clock, to avoid calls to
// time.Now().
// The time is stored as one int64 which holds the number of nanoseconds since
// the Unix Epoch. The value is accessed using atomic primitives, without
// locking.
// The package runs a global singleton TimeCache that is is updated every
// second.
package timecache
import (
"sync"
"sync/atomic"
"time"
)
// t is the global TimeCache.
var t *TimeCache
func init() {
t = &TimeCache{
clock: time.Now().UnixNano(),
closed: make(chan struct{}),
running: make(chan struct{}),
}
go t.Run(1 * time.Second)
}
// A TimeCache is a cache for the current system time.
// The cached time has nanosecond precision.
type TimeCache struct {
// clock saves the current time's nanoseconds since the Epoch.
// Must be accessed atomically.
clock int64
closed chan struct{}
running chan struct{}
m sync.Mutex
}
// New returns a new TimeCache instance.
// The TimeCache must be started to update the time.
func New() *TimeCache {
return &TimeCache{
clock: time.Now().UnixNano(),
closed: make(chan struct{}),
running: make(chan struct{}),
}
}
// Run runs the TimeCache, updating the cached clock value once every interval
// and blocks until Stop is called.
func (t *TimeCache) Run(interval time.Duration) {
t.m.Lock()
select {
case <-t.running:
panic("Run called multiple times")
default:
}
close(t.running)
t.m.Unlock()
tick := time.NewTicker(interval)
defer tick.Stop()
for {
select {
case <-t.closed:
tick.Stop()
return
case now := <-tick.C:
atomic.StoreInt64(&t.clock, now.UnixNano())
}
}
}
// Stop stops the TimeCache.
// The cached time remains valid but will not be updated anymore.
// A TimeCache can not be restarted. Construct a new one instead.
// Calling Stop again is a no-op.
func (t *TimeCache) Stop() {
t.m.Lock()
defer t.m.Unlock()
select {
case <-t.closed:
return
default:
}
close(t.closed)
}
// Now returns the cached time as a time.Time value.
func (t *TimeCache) Now() time.Time {
return time.Unix(0, atomic.LoadInt64(&t.clock))
}
// NowUnixNano returns the cached time as nanoseconds since the Unix Epoch.
func (t *TimeCache) NowUnixNano() int64 {
return atomic.LoadInt64(&t.clock)
}
// NowUnix returns the cached time as seconds since the Unix Epoch.
func (t *TimeCache) NowUnix() int64 {
// Adopted from time.Unix
nsec := atomic.LoadInt64(&t.clock)
sec := nsec / 1e9
nsec -= sec * 1e9
if nsec < 0 {
sec--
}
return sec
}
// Now calls Now on the global TimeCache instance.
func Now() time.Time {
return t.Now()
}
// NowUnixNano calls NowUnixNano on the global TimeCache instance.
func NowUnixNano() int64 {
return t.NowUnixNano()
}
// NowUnix calls NowUnix on the global TimeCache instance.
func NowUnix() int64 {
return t.NowUnix()
}

View file

@ -0,0 +1,148 @@
package timecache
import (
"sync"
"testing"
"time"
"github.com/stretchr/testify/require"
)
func TestNew(t *testing.T) {
c := New()
require.NotNil(t, c)
now := c.Now()
require.False(t, now.IsZero())
nsec := c.NowUnixNano()
require.NotEqual(t, 0, nsec)
sec := c.NowUnix()
require.NotEqual(t, 0, sec)
}
func TestRunStop(t *testing.T) {
c := New()
require.NotNil(t, c)
wg := &sync.WaitGroup{}
wg.Add(1)
go func() {
defer wg.Done()
c.Run(1 * time.Second)
}()
c.Stop()
wg.Wait()
}
func TestMultipleStop(t *testing.T) {
c := New()
require.NotNil(t, c)
wg := &sync.WaitGroup{}
wg.Add(1)
go func() {
defer wg.Done()
c.Run(1 * time.Second)
}()
c.Stop()
c.Stop()
wg.Wait()
}
func doBenchmark(b *testing.B, f func(tc *TimeCache) func(*testing.PB)) {
tc := New()
require.NotNil(b, tc)
wg := &sync.WaitGroup{}
wg.Add(1)
go func() {
defer wg.Done()
tc.Run(1 * time.Second)
}()
b.RunParallel(f(tc))
tc.Stop()
wg.Wait()
}
func BenchmarkNow(b *testing.B) {
doBenchmark(b, func(tc *TimeCache) func(pb *testing.PB) {
return func(pb *testing.PB) {
var now time.Time
for pb.Next() {
now = tc.Now()
}
_ = now
}
})
}
func BenchmarkNowUnix(b *testing.B) {
doBenchmark(b, func(tc *TimeCache) func(pb *testing.PB) {
return func(pb *testing.PB) {
var now int64
for pb.Next() {
now = tc.NowUnix()
}
_ = now
}
})
}
func BenchmarkNowUnixNano(b *testing.B) {
doBenchmark(b, func(tc *TimeCache) func(pb *testing.PB) {
return func(pb *testing.PB) {
var now int64
for pb.Next() {
now = tc.NowUnixNano()
}
_ = now
}
})
}
func BenchmarkNowGlobal(b *testing.B) {
b.RunParallel(func(pb *testing.PB) {
var now time.Time
for pb.Next() {
now = Now()
}
_ = now
})
}
func BenchmarkNowUnixGlobal(b *testing.B) {
b.RunParallel(func(pb *testing.PB) {
var now int64
for pb.Next() {
now = NowUnix()
}
_ = now
})
}
func BenchmarkNowUnixNanoGlobal(b *testing.B) {
b.RunParallel(func(pb *testing.PB) {
var now int64
for pb.Next() {
now = NowUnixNano()
}
_ = now
})
}
func BenchmarkTimeNow(b *testing.B) {
b.RunParallel(func(pb *testing.PB) {
var now time.Time
for pb.Next() {
now = time.Now()
}
_ = now
})
}

View file

@ -1,97 +0,0 @@
package stopper
import (
"sync"
)
// AlreadyStopped is a closed error channel to be used by Funcs when
// an element was already stopped.
var AlreadyStopped <-chan error
// AlreadyStoppedFunc is a Func that returns AlreadyStopped.
var AlreadyStoppedFunc = func() <-chan error { return AlreadyStopped }
func init() {
closeMe := make(chan error)
close(closeMe)
AlreadyStopped = closeMe
}
// Stopper is an interface that allows a clean shutdown.
type Stopper interface {
// Stop returns a channel that indicates whether the stop was
// successful.
// The channel can either return one error or be closed. Closing the
// channel signals a clean shutdown.
// The Stop function should return immediately and perform the actual
// shutdown in a separate goroutine.
Stop() <-chan error
}
// StopGroup is a group that can be stopped.
type StopGroup struct {
stoppables []Func
sync.Mutex
}
// Func is a function that can be used to provide a clean shutdown.
type Func func() <-chan error
// NewStopGroup creates a new StopGroup.
func NewStopGroup() *StopGroup {
return &StopGroup{
stoppables: make([]Func, 0),
}
}
// Add adds a Stopper to the StopGroup.
// On the next call to Stop(), the Stopper will be stopped.
func (cg *StopGroup) Add(toAdd Stopper) {
cg.Lock()
defer cg.Unlock()
cg.stoppables = append(cg.stoppables, toAdd.Stop)
}
// AddFunc adds a Func to the StopGroup.
// On the next call to Stop(), the Func will be called.
func (cg *StopGroup) AddFunc(toAddFunc Func) {
cg.Lock()
defer cg.Unlock()
cg.stoppables = append(cg.stoppables, toAddFunc)
}
// Stop stops all members of the StopGroup.
// Stopping will be done in a concurrent fashion.
// The slice of errors returned contains all errors returned by stopping the
// members.
func (cg *StopGroup) Stop() []error {
cg.Lock()
defer cg.Unlock()
var errors []error
whenDone := make(chan struct{})
waitChannels := make([]<-chan error, 0, len(cg.stoppables))
for _, toStop := range cg.stoppables {
waitFor := toStop()
if waitFor == nil {
panic("received a nil chan from Stop")
}
waitChannels = append(waitChannels, waitFor)
}
go func() {
for _, waitForMe := range waitChannels {
err := <-waitForMe
if err != nil {
errors = append(errors, err)
}
}
close(whenDone)
}()
<-whenDone
return errors
}

View file

@ -1,61 +1,167 @@
// Package memory implements the storage interface for a Chihaya
// BitTorrent tracker keeping peer data in memory.
package memory
import (
"encoding/binary"
"errors"
"math"
"net"
"runtime"
"sync"
"time"
log "github.com/Sirupsen/logrus"
yaml "gopkg.in/yaml.v2"
"github.com/chihaya/chihaya/bittorrent"
"github.com/chihaya/chihaya/pkg/log"
"github.com/chihaya/chihaya/pkg/stop"
"github.com/chihaya/chihaya/pkg/timecache"
"github.com/chihaya/chihaya/storage"
)
// ErrInvalidGCInterval is returned for a GarbageCollectionInterval that is
// less than or equal to zero.
var ErrInvalidGCInterval = errors.New("invalid garbage collection interval")
// Name is the name by which this peer store is registered with Chihaya.
const Name = "memory"
// Default config constants.
const (
defaultShardCount = 1024
defaultPrometheusReportingInterval = time.Second * 1
defaultGarbageCollectionInterval = time.Minute * 3
defaultPeerLifetime = time.Minute * 30
)
func init() {
// Register the storage driver.
storage.RegisterDriver(Name, driver{})
}
type driver struct{}
func (d driver) NewPeerStore(icfg interface{}) (storage.PeerStore, error) {
// Marshal the config back into bytes.
bytes, err := yaml.Marshal(icfg)
if err != nil {
return nil, err
}
// Unmarshal the bytes into the proper config type.
var cfg Config
err = yaml.Unmarshal(bytes, &cfg)
if err != nil {
return nil, err
}
return New(cfg)
}
// Config holds the configuration of a memory PeerStore.
type Config struct {
GarbageCollectionInterval time.Duration `yaml:"gc_interval"`
PrometheusReportingInterval time.Duration `yaml:"prometheus_reporting_interval"`
PeerLifetime time.Duration `yaml:"peer_lifetime"`
ShardCount int `yaml:"shard_count"`
MaxNumWant int `yaml:"max_numwant"`
}
// New creates a new PeerStore backed by memory.
func New(cfg Config) (storage.PeerStore, error) {
shardCount := 1
if cfg.ShardCount > 0 {
shardCount = cfg.ShardCount
// LogFields renders the current config as a set of Logrus fields.
func (cfg Config) LogFields() log.Fields {
return log.Fields{
"name": Name,
"gcInterval": cfg.GarbageCollectionInterval,
"promReportInterval": cfg.PrometheusReportingInterval,
"peerLifetime": cfg.PeerLifetime,
"shardCount": cfg.ShardCount,
}
}
// Validate sanity checks values set in a config and returns a new config with
// default values replacing anything that is invalid.
//
// This function warns to the logger when a value is changed.
func (cfg Config) Validate() Config {
validcfg := cfg
if cfg.ShardCount <= 0 || cfg.ShardCount > (math.MaxInt/2) {
validcfg.ShardCount = defaultShardCount
log.Warn("falling back to default configuration", log.Fields{
"name": Name + ".ShardCount",
"provided": cfg.ShardCount,
"default": validcfg.ShardCount,
})
}
if cfg.GarbageCollectionInterval <= 0 {
return nil, ErrInvalidGCInterval
validcfg.GarbageCollectionInterval = defaultGarbageCollectionInterval
log.Warn("falling back to default configuration", log.Fields{
"name": Name + ".GarbageCollectionInterval",
"provided": cfg.GarbageCollectionInterval,
"default": validcfg.GarbageCollectionInterval,
})
}
if cfg.PrometheusReportingInterval <= 0 {
validcfg.PrometheusReportingInterval = defaultPrometheusReportingInterval
log.Warn("falling back to default configuration", log.Fields{
"name": Name + ".PrometheusReportingInterval",
"provided": cfg.PrometheusReportingInterval,
"default": validcfg.PrometheusReportingInterval,
})
}
if cfg.PeerLifetime <= 0 {
validcfg.PeerLifetime = defaultPeerLifetime
log.Warn("falling back to default configuration", log.Fields{
"name": Name + ".PeerLifetime",
"provided": cfg.PeerLifetime,
"default": validcfg.PeerLifetime,
})
}
return validcfg
}
// New creates a new PeerStore backed by memory.
func New(provided Config) (storage.PeerStore, error) {
cfg := provided.Validate()
ps := &peerStore{
shards: make([]*peerShard, shardCount*2),
cfg: cfg,
shards: make([]*peerShard, cfg.ShardCount*2),
closed: make(chan struct{}),
maxNumWant: cfg.MaxNumWant,
}
for i := 0; i < shardCount*2; i++ {
for i := 0; i < cfg.ShardCount*2; i++ {
ps.shards[i] = &peerShard{swarms: make(map[bittorrent.InfoHash]swarm)}
}
// Start a goroutine for garbage collection.
ps.wg.Add(1)
go func() {
defer ps.wg.Done()
for {
select {
case <-ps.closed:
return
case <-time.After(cfg.GarbageCollectionInterval):
before := time.Now().Add(-cfg.PeerLifetime)
log.Debugln("memory: purging peers with no announces since", before)
ps.collectGarbage(before)
log.Debug("storage: purging peers with no announces since", log.Fields{"before": before})
_ = ps.collectGarbage(before)
}
}
}()
// Start a goroutine for reporting statistics to Prometheus.
ps.wg.Add(1)
go func() {
defer ps.wg.Done()
t := time.NewTicker(cfg.PrometheusReportingInterval)
for {
select {
case <-ps.closed:
t.Stop()
return
case <-t.C:
before := time.Now()
ps.populateProm()
log.Debug("storage: populateProm() finished", log.Fields{"timeTaken": time.Since(before)})
}
}
}()
@ -65,8 +171,38 @@ func New(cfg Config) (storage.PeerStore, error) {
type serializedPeer string
func newPeerKey(p bittorrent.Peer) serializedPeer {
b := make([]byte, 20+2+len(p.IP.IP))
copy(b[:20], p.ID[:])
binary.BigEndian.PutUint16(b[20:22], p.Port)
copy(b[22:], p.IP.IP)
return serializedPeer(b)
}
func decodePeerKey(pk serializedPeer) bittorrent.Peer {
peer := bittorrent.Peer{
ID: bittorrent.PeerIDFromString(string(pk[:20])),
Port: binary.BigEndian.Uint16([]byte(pk[20:22])),
IP: bittorrent.IP{IP: net.IP(pk[22:])},
}
if ip := peer.IP.To4(); ip != nil {
peer.IP.IP = ip
peer.IP.AddressFamily = bittorrent.IPv4
} else if len(peer.IP.IP) == net.IPv6len { // implies toReturn.IP.To4() == nil
peer.IP.AddressFamily = bittorrent.IPv6
} else {
panic("IP is neither v4 nor v6")
}
return peer
}
type peerShard struct {
swarms map[bittorrent.InfoHash]swarm
numSeeders uint64
numLeechers uint64
sync.RWMutex
}
@ -77,51 +213,63 @@ type swarm struct {
}
type peerStore struct {
cfg Config
shards []*peerShard
closed chan struct{}
maxNumWant int
wg sync.WaitGroup
}
var _ storage.PeerStore = &peerStore{}
func (s *peerStore) shardIndex(infoHash bittorrent.InfoHash, v6 bool) uint32 {
// populateProm aggregates metrics over all shards and then posts them to
// prometheus.
func (ps *peerStore) populateProm() {
var numInfohashes, numSeeders, numLeechers uint64
for _, s := range ps.shards {
s.RLock()
numInfohashes += uint64(len(s.swarms))
numSeeders += s.numSeeders
numLeechers += s.numLeechers
s.RUnlock()
}
storage.PromInfohashesCount.Set(float64(numInfohashes))
storage.PromSeedersCount.Set(float64(numSeeders))
storage.PromLeechersCount.Set(float64(numLeechers))
}
// recordGCDuration records the duration of a GC sweep.
func recordGCDuration(duration time.Duration) {
storage.PromGCDurationMilliseconds.Observe(float64(duration.Nanoseconds()) / float64(time.Millisecond))
}
func (ps *peerStore) getClock() int64 {
return timecache.NowUnixNano()
}
func (ps *peerStore) shardIndex(infoHash bittorrent.InfoHash, af bittorrent.AddressFamily) uint32 {
// There are twice the amount of shards specified by the user, the first
// half is dedicated to IPv4 swarms and the second half is dedicated to
// IPv6 swarms.
idx := binary.BigEndian.Uint32(infoHash[:4]) % (uint32(len(s.shards)) / 2)
if v6 {
idx += uint32(len(s.shards) / 2)
idx := binary.BigEndian.Uint32(infoHash[:4]) % (uint32(len(ps.shards)) / 2)
if af == bittorrent.IPv6 {
idx += uint32(len(ps.shards) / 2)
}
return idx
}
func newPeerKey(p bittorrent.Peer) serializedPeer {
b := make([]byte, 20+2+len(p.IP))
copy(b[:20], p.ID[:])
binary.BigEndian.PutUint16(b[20:22], p.Port)
copy(b[22:], p.IP)
return serializedPeer(b)
}
func decodePeerKey(pk serializedPeer) bittorrent.Peer {
return bittorrent.Peer{
ID: bittorrent.PeerIDFromString(string(pk[:20])),
Port: binary.BigEndian.Uint16([]byte(pk[20:22])),
IP: net.IP(pk[22:]),
}
}
func (s *peerStore) PutSeeder(ih bittorrent.InfoHash, p bittorrent.Peer) error {
func (ps *peerStore) PutSeeder(ih bittorrent.InfoHash, p bittorrent.Peer) error {
select {
case <-s.closed:
case <-ps.closed:
panic("attempted to interact with stopped memory store")
default:
}
pk := newPeerKey(p)
shard := s.shards[s.shardIndex(ih, len(p.IP) == net.IPv6len)]
shard := ps.shards[ps.shardIndex(ih, p.IP.AddressFamily)]
shard.Lock()
if _, ok := shard.swarms[ih]; !ok {
@ -131,22 +279,28 @@ func (s *peerStore) PutSeeder(ih bittorrent.InfoHash, p bittorrent.Peer) error {
}
}
shard.swarms[ih].seeders[pk] = time.Now().UnixNano()
// If this peer isn't already a seeder, update the stats for the swarm.
if _, ok := shard.swarms[ih].seeders[pk]; !ok {
shard.numSeeders++
}
// Update the peer in the swarm.
shard.swarms[ih].seeders[pk] = ps.getClock()
shard.Unlock()
return nil
}
func (s *peerStore) DeleteSeeder(ih bittorrent.InfoHash, p bittorrent.Peer) error {
func (ps *peerStore) DeleteSeeder(ih bittorrent.InfoHash, p bittorrent.Peer) error {
select {
case <-s.closed:
case <-ps.closed:
panic("attempted to interact with stopped memory store")
default:
}
pk := newPeerKey(p)
shard := s.shards[s.shardIndex(ih, len(p.IP) == net.IPv6len)]
shard := ps.shards[ps.shardIndex(ih, p.IP.AddressFamily)]
shard.Lock()
if _, ok := shard.swarms[ih]; !ok {
@ -159,6 +313,7 @@ func (s *peerStore) DeleteSeeder(ih bittorrent.InfoHash, p bittorrent.Peer) erro
return storage.ErrResourceDoesNotExist
}
shard.numSeeders--
delete(shard.swarms[ih].seeders, pk)
if len(shard.swarms[ih].seeders)|len(shard.swarms[ih].leechers) == 0 {
@ -169,16 +324,16 @@ func (s *peerStore) DeleteSeeder(ih bittorrent.InfoHash, p bittorrent.Peer) erro
return nil
}
func (s *peerStore) PutLeecher(ih bittorrent.InfoHash, p bittorrent.Peer) error {
func (ps *peerStore) PutLeecher(ih bittorrent.InfoHash, p bittorrent.Peer) error {
select {
case <-s.closed:
case <-ps.closed:
panic("attempted to interact with stopped memory store")
default:
}
pk := newPeerKey(p)
shard := s.shards[s.shardIndex(ih, len(p.IP) == net.IPv6len)]
shard := ps.shards[ps.shardIndex(ih, p.IP.AddressFamily)]
shard.Lock()
if _, ok := shard.swarms[ih]; !ok {
@ -188,22 +343,28 @@ func (s *peerStore) PutLeecher(ih bittorrent.InfoHash, p bittorrent.Peer) error
}
}
shard.swarms[ih].leechers[pk] = time.Now().UnixNano()
// If this peer isn't already a leecher, update the stats for the swarm.
if _, ok := shard.swarms[ih].leechers[pk]; !ok {
shard.numLeechers++
}
// Update the peer in the swarm.
shard.swarms[ih].leechers[pk] = ps.getClock()
shard.Unlock()
return nil
}
func (s *peerStore) DeleteLeecher(ih bittorrent.InfoHash, p bittorrent.Peer) error {
func (ps *peerStore) DeleteLeecher(ih bittorrent.InfoHash, p bittorrent.Peer) error {
select {
case <-s.closed:
case <-ps.closed:
panic("attempted to interact with stopped memory store")
default:
}
pk := newPeerKey(p)
shard := s.shards[s.shardIndex(ih, len(p.IP) == net.IPv6len)]
shard := ps.shards[ps.shardIndex(ih, p.IP.AddressFamily)]
shard.Lock()
if _, ok := shard.swarms[ih]; !ok {
@ -216,6 +377,7 @@ func (s *peerStore) DeleteLeecher(ih bittorrent.InfoHash, p bittorrent.Peer) err
return storage.ErrResourceDoesNotExist
}
shard.numLeechers--
delete(shard.swarms[ih].leechers, pk)
if len(shard.swarms[ih].seeders)|len(shard.swarms[ih].leechers) == 0 {
@ -226,16 +388,16 @@ func (s *peerStore) DeleteLeecher(ih bittorrent.InfoHash, p bittorrent.Peer) err
return nil
}
func (s *peerStore) GraduateLeecher(ih bittorrent.InfoHash, p bittorrent.Peer) error {
func (ps *peerStore) GraduateLeecher(ih bittorrent.InfoHash, p bittorrent.Peer) error {
select {
case <-s.closed:
case <-ps.closed:
panic("attempted to interact with stopped memory store")
default:
}
pk := newPeerKey(p)
shard := s.shards[s.shardIndex(ih, len(p.IP) == net.IPv6len)]
shard := ps.shards[ps.shardIndex(ih, p.IP.AddressFamily)]
shard.Lock()
if _, ok := shard.swarms[ih]; !ok {
@ -245,26 +407,32 @@ func (s *peerStore) GraduateLeecher(ih bittorrent.InfoHash, p bittorrent.Peer) e
}
}
// If this peer is a leecher, update the stats for the swarm and remove them.
if _, ok := shard.swarms[ih].leechers[pk]; ok {
shard.numLeechers--
delete(shard.swarms[ih].leechers, pk)
}
shard.swarms[ih].seeders[pk] = time.Now().UnixNano()
// If this peer isn't already a seeder, update the stats for the swarm.
if _, ok := shard.swarms[ih].seeders[pk]; !ok {
shard.numSeeders++
}
// Update the peer in the swarm.
shard.swarms[ih].seeders[pk] = ps.getClock()
shard.Unlock()
return nil
}
func (s *peerStore) AnnouncePeers(ih bittorrent.InfoHash, seeder bool, numWant int, announcer bittorrent.Peer) (peers []bittorrent.Peer, err error) {
func (ps *peerStore) AnnouncePeers(ih bittorrent.InfoHash, seeder bool, numWant int, announcer bittorrent.Peer) (peers []bittorrent.Peer, err error) {
select {
case <-s.closed:
case <-ps.closed:
panic("attempted to interact with stopped memory store")
default:
}
if numWant > s.maxNumWant {
numWant = s.maxNumWant
}
shard := s.shards[s.shardIndex(ih, len(announcer.IP) == net.IPv6len)]
shard := ps.shards[ps.shardIndex(ih, announcer.IP.AddressFamily)]
shard.RLock()
if _, ok := shard.swarms[ih]; !ok {
@ -275,41 +443,40 @@ func (s *peerStore) AnnouncePeers(ih bittorrent.InfoHash, seeder bool, numWant i
if seeder {
// Append leechers as possible.
leechers := shard.swarms[ih].leechers
for p := range leechers {
decodedPeer := decodePeerKey(p)
for pk := range leechers {
if numWant == 0 {
break
}
peers = append(peers, decodedPeer)
peers = append(peers, decodePeerKey(pk))
numWant--
}
} else {
// Append as many seeders as possible.
seeders := shard.swarms[ih].seeders
for p := range seeders {
decodedPeer := decodePeerKey(p)
for pk := range seeders {
if numWant == 0 {
break
}
peers = append(peers, decodedPeer)
peers = append(peers, decodePeerKey(pk))
numWant--
}
// Append leechers until we reach numWant.
leechers := shard.swarms[ih].leechers
if numWant > 0 {
for p := range leechers {
decodedPeer := decodePeerKey(p)
leechers := shard.swarms[ih].leechers
announcerPK := newPeerKey(announcer)
for pk := range leechers {
if pk == announcerPK {
continue
}
if numWant == 0 {
break
}
if decodedPeer.Equal(announcer) {
continue
}
peers = append(peers, decodedPeer)
peers = append(peers, decodePeerKey(pk))
numWant--
}
}
@ -319,23 +486,25 @@ func (s *peerStore) AnnouncePeers(ih bittorrent.InfoHash, seeder bool, numWant i
return
}
func (s *peerStore) ScrapeSwarm(ih bittorrent.InfoHash, v6 bool) (resp bittorrent.Scrape) {
func (ps *peerStore) ScrapeSwarm(ih bittorrent.InfoHash, addressFamily bittorrent.AddressFamily) (resp bittorrent.Scrape) {
select {
case <-s.closed:
case <-ps.closed:
panic("attempted to interact with stopped memory store")
default:
}
shard := s.shards[s.shardIndex(ih, v6)]
resp.InfoHash = ih
shard := ps.shards[ps.shardIndex(ih, addressFamily)]
shard.RLock()
if _, ok := shard.swarms[ih]; !ok {
swarm, ok := shard.swarms[ih]
if !ok {
shard.RUnlock()
return
}
resp.Incomplete = uint32(len(shard.swarms[ih].leechers))
resp.Complete = uint32(len(shard.swarms[ih].seeders))
resp.Incomplete = uint32(len(swarm.leechers))
resp.Complete = uint32(len(swarm.seeders))
shard.RUnlock()
return
@ -346,15 +515,17 @@ func (s *peerStore) ScrapeSwarm(ih bittorrent.InfoHash, v6 bool) (resp bittorren
//
// This function must be able to execute while other methods on this interface
// are being executed in parallel.
func (s *peerStore) collectGarbage(cutoff time.Time) error {
func (ps *peerStore) collectGarbage(cutoff time.Time) error {
select {
case <-s.closed:
panic("attempted to interact with stopped memory store")
case <-ps.closed:
return nil
default:
}
cutoffUnix := cutoff.UnixNano()
for _, shard := range s.shards {
start := time.Now()
for _, shard := range ps.shards {
shard.RLock()
var infohashes []bittorrent.InfoHash
for ih := range shard.swarms {
@ -374,12 +545,14 @@ func (s *peerStore) collectGarbage(cutoff time.Time) error {
for pk, mtime := range shard.swarms[ih].leechers {
if mtime <= cutoffUnix {
shard.numLeechers--
delete(shard.swarms[ih].leechers, pk)
}
}
for pk, mtime := range shard.swarms[ih].seeders {
if mtime <= cutoffUnix {
shard.numSeeders--
delete(shard.swarms[ih].seeders, pk)
}
}
@ -395,19 +568,30 @@ func (s *peerStore) collectGarbage(cutoff time.Time) error {
runtime.Gosched()
}
recordGCDuration(time.Since(start))
return nil
}
func (s *peerStore) Stop() <-chan error {
toReturn := make(chan error)
func (ps *peerStore) Stop() stop.Result {
c := make(stop.Channel)
go func() {
shards := make([]*peerShard, len(s.shards))
for i := 0; i < len(s.shards); i++ {
close(ps.closed)
ps.wg.Wait()
// Explicitly deallocate our storage.
shards := make([]*peerShard, len(ps.shards))
for i := 0; i < len(ps.shards); i++ {
shards[i] = &peerShard{swarms: make(map[bittorrent.InfoHash]swarm)}
}
s.shards = shards
close(s.closed)
close(toReturn)
ps.shards = shards
c.Done()
}()
return toReturn
return c.Result()
}
func (ps *peerStore) LogFields() log.Fields {
return ps.cfg.LogFields()
}

View file

@ -2,20 +2,27 @@ package memory
import (
"testing"
"time"
s "github.com/chihaya/chihaya/storage"
)
func createNew() s.PeerStore {
ps, err := New(Config{ShardCount: 1024, GarbageCollectionInterval: 10 * time.Minute})
ps, err := New(Config{
ShardCount: 1024,
GarbageCollectionInterval: 10 * time.Minute,
PrometheusReportingInterval: 10 * time.Minute,
PeerLifetime: 30 * time.Minute,
})
if err != nil {
panic(err)
}
return ps
}
func TestPeerStore(t *testing.T) { s.TestPeerStore(t, createNew()) }
func BenchmarkNop(b *testing.B) { s.Nop(b, createNew()) }
func BenchmarkPut(b *testing.B) { s.Put(b, createNew()) }
func BenchmarkPut1k(b *testing.B) { s.Put1k(b, createNew()) }
func BenchmarkPut1kInfohash(b *testing.B) { s.Put1kInfohash(b, createNew()) }
@ -40,3 +47,5 @@ func BenchmarkAnnounceLeecher(b *testing.B) { s.AnnounceLeecher(b, cr
func BenchmarkAnnounceLeecher1kInfohash(b *testing.B) { s.AnnounceLeecher1kInfohash(b, createNew()) }
func BenchmarkAnnounceSeeder(b *testing.B) { s.AnnounceSeeder(b, createNew()) }
func BenchmarkAnnounceSeeder1kInfohash(b *testing.B) { s.AnnounceSeeder1kInfohash(b, createNew()) }
func BenchmarkScrapeSwarm(b *testing.B) { s.ScrapeSwarm(b, createNew()) }
func BenchmarkScrapeSwarm1kInfohash(b *testing.B) { s.ScrapeSwarm1kInfohash(b, createNew()) }

44
storage/prometheus.go Normal file
View file

@ -0,0 +1,44 @@
package storage
import "github.com/prometheus/client_golang/prometheus"
func init() {
// Register the metrics.
prometheus.MustRegister(
PromGCDurationMilliseconds,
PromInfohashesCount,
PromSeedersCount,
PromLeechersCount,
)
}
var (
// PromGCDurationMilliseconds is a histogram used by storage to record the
// durations of execution time required for removing expired peers.
PromGCDurationMilliseconds = prometheus.NewHistogram(prometheus.HistogramOpts{
Name: "chihaya_storage_gc_duration_milliseconds",
Help: "The time it takes to perform storage garbage collection",
Buckets: prometheus.ExponentialBuckets(9.375, 2, 10),
})
// PromInfohashesCount is a gauge used to hold the current total amount of
// unique swarms being tracked by a storage.
PromInfohashesCount = prometheus.NewGauge(prometheus.GaugeOpts{
Name: "chihaya_storage_infohashes_count",
Help: "The number of Infohashes tracked",
})
// PromSeedersCount is a gauge used to hold the current total amount of
// unique seeders per swarm.
PromSeedersCount = prometheus.NewGauge(prometheus.GaugeOpts{
Name: "chihaya_storage_seeders_count",
Help: "The number of seeders tracked",
})
// PromLeechersCount is a gauge used to hold the current total amount of
// unique leechers per swarm.
PromLeechersCount = prometheus.NewGauge(prometheus.GaugeOpts{
Name: "chihaya_storage_leechers_count",
Help: "The number of leechers tracked",
})
)

829
storage/redis/peer_store.go Normal file
View file

@ -0,0 +1,829 @@
// Package redis implements the storage interface for a Chihaya
// BitTorrent tracker keeping peer data in redis with hash.
// There two categories of hash:
//
// - IPv{4,6}_{L,S}_infohash
// To save peers that hold the infohash, used for fast searching,
// deleting, and timeout handling
//
// - IPv{4,6}
// To save all the infohashes, used for garbage collection,
// metrics aggregation and leecher graduation
//
// Tree keys are used to record the count of swarms, seeders
// and leechers for each group (IPv4, IPv6).
//
// - IPv{4,6}_infohash_count
// To record the number of infohashes.
//
// - IPv{4,6}_S_count
// To record the number of seeders.
//
// - IPv{4,6}_L_count
// To record the number of leechers.
package redis
import (
"encoding/binary"
"errors"
"net"
"strconv"
"sync"
"time"
"github.com/gomodule/redigo/redis"
yaml "gopkg.in/yaml.v2"
"github.com/chihaya/chihaya/bittorrent"
"github.com/chihaya/chihaya/pkg/log"
"github.com/chihaya/chihaya/pkg/stop"
"github.com/chihaya/chihaya/pkg/timecache"
"github.com/chihaya/chihaya/storage"
)
// Name is the name by which this peer store is registered with Chihaya.
const Name = "redis"
// Default config constants.
const (
defaultPrometheusReportingInterval = time.Second * 1
defaultGarbageCollectionInterval = time.Minute * 3
defaultPeerLifetime = time.Minute * 30
defaultRedisBroker = "redis://myRedis@127.0.0.1:6379/0"
defaultRedisReadTimeout = time.Second * 15
defaultRedisWriteTimeout = time.Second * 15
defaultRedisConnectTimeout = time.Second * 15
)
func init() {
// Register the storage driver.
storage.RegisterDriver(Name, driver{})
}
type driver struct{}
func (d driver) NewPeerStore(icfg interface{}) (storage.PeerStore, error) {
// Marshal the config back into bytes.
bytes, err := yaml.Marshal(icfg)
if err != nil {
return nil, err
}
// Unmarshal the bytes into the proper config type.
var cfg Config
err = yaml.Unmarshal(bytes, &cfg)
if err != nil {
return nil, err
}
return New(cfg)
}
// Config holds the configuration of a redis PeerStore.
type Config struct {
GarbageCollectionInterval time.Duration `yaml:"gc_interval"`
PrometheusReportingInterval time.Duration `yaml:"prometheus_reporting_interval"`
PeerLifetime time.Duration `yaml:"peer_lifetime"`
RedisBroker string `yaml:"redis_broker"`
RedisReadTimeout time.Duration `yaml:"redis_read_timeout"`
RedisWriteTimeout time.Duration `yaml:"redis_write_timeout"`
RedisConnectTimeout time.Duration `yaml:"redis_connect_timeout"`
}
// LogFields renders the current config as a set of Logrus fields.
func (cfg Config) LogFields() log.Fields {
return log.Fields{
"name": Name,
"gcInterval": cfg.GarbageCollectionInterval,
"promReportInterval": cfg.PrometheusReportingInterval,
"peerLifetime": cfg.PeerLifetime,
"redisBroker": cfg.RedisBroker,
"redisReadTimeout": cfg.RedisReadTimeout,
"redisWriteTimeout": cfg.RedisWriteTimeout,
"redisConnectTimeout": cfg.RedisConnectTimeout,
}
}
// Validate sanity checks values set in a config and returns a new config with
// default values replacing anything that is invalid.
//
// This function warns to the logger when a value is changed.
func (cfg Config) Validate() Config {
validcfg := cfg
if cfg.RedisBroker == "" {
validcfg.RedisBroker = defaultRedisBroker
log.Warn("falling back to default configuration", log.Fields{
"name": Name + ".RedisBroker",
"provided": cfg.RedisBroker,
"default": validcfg.RedisBroker,
})
}
if cfg.RedisReadTimeout <= 0 {
validcfg.RedisReadTimeout = defaultRedisReadTimeout
log.Warn("falling back to default configuration", log.Fields{
"name": Name + ".RedisReadTimeout",
"provided": cfg.RedisReadTimeout,
"default": validcfg.RedisReadTimeout,
})
}
if cfg.RedisWriteTimeout <= 0 {
validcfg.RedisWriteTimeout = defaultRedisWriteTimeout
log.Warn("falling back to default configuration", log.Fields{
"name": Name + ".RedisWriteTimeout",
"provided": cfg.RedisWriteTimeout,
"default": validcfg.RedisWriteTimeout,
})
}
if cfg.RedisConnectTimeout <= 0 {
validcfg.RedisConnectTimeout = defaultRedisConnectTimeout
log.Warn("falling back to default configuration", log.Fields{
"name": Name + ".RedisConnectTimeout",
"provided": cfg.RedisConnectTimeout,
"default": validcfg.RedisConnectTimeout,
})
}
if cfg.GarbageCollectionInterval <= 0 {
validcfg.GarbageCollectionInterval = defaultGarbageCollectionInterval
log.Warn("falling back to default configuration", log.Fields{
"name": Name + ".GarbageCollectionInterval",
"provided": cfg.GarbageCollectionInterval,
"default": validcfg.GarbageCollectionInterval,
})
}
if cfg.PrometheusReportingInterval <= 0 {
validcfg.PrometheusReportingInterval = defaultPrometheusReportingInterval
log.Warn("falling back to default configuration", log.Fields{
"name": Name + ".PrometheusReportingInterval",
"provided": cfg.PrometheusReportingInterval,
"default": validcfg.PrometheusReportingInterval,
})
}
if cfg.PeerLifetime <= 0 {
validcfg.PeerLifetime = defaultPeerLifetime
log.Warn("falling back to default configuration", log.Fields{
"name": Name + ".PeerLifetime",
"provided": cfg.PeerLifetime,
"default": validcfg.PeerLifetime,
})
}
return validcfg
}
// New creates a new PeerStore backed by redis.
func New(provided Config) (storage.PeerStore, error) {
cfg := provided.Validate()
u, err := parseRedisURL(cfg.RedisBroker)
if err != nil {
return nil, err
}
ps := &peerStore{
cfg: cfg,
rb: newRedisBackend(&provided, u, ""),
closed: make(chan struct{}),
}
// Start a goroutine for garbage collection.
ps.wg.Add(1)
go func() {
defer ps.wg.Done()
for {
select {
case <-ps.closed:
return
case <-time.After(cfg.GarbageCollectionInterval):
before := time.Now().Add(-cfg.PeerLifetime)
log.Debug("storage: purging peers with no announces since", log.Fields{"before": before})
if err = ps.collectGarbage(before); err != nil {
log.Error("storage: collectGarbage error", log.Fields{"before": before, "error": err})
}
}
}
}()
// Start a goroutine for reporting statistics to Prometheus.
ps.wg.Add(1)
go func() {
defer ps.wg.Done()
t := time.NewTicker(cfg.PrometheusReportingInterval)
for {
select {
case <-ps.closed:
t.Stop()
return
case <-t.C:
before := time.Now()
ps.populateProm()
log.Debug("storage: populateProm() finished", log.Fields{"timeTaken": time.Since(before)})
}
}
}()
return ps, nil
}
type serializedPeer string
func newPeerKey(p bittorrent.Peer) serializedPeer {
b := make([]byte, 20+2+len(p.IP.IP))
copy(b[:20], p.ID[:])
binary.BigEndian.PutUint16(b[20:22], p.Port)
copy(b[22:], p.IP.IP)
return serializedPeer(b)
}
func decodePeerKey(pk serializedPeer) bittorrent.Peer {
peer := bittorrent.Peer{
ID: bittorrent.PeerIDFromString(string(pk[:20])),
Port: binary.BigEndian.Uint16([]byte(pk[20:22])),
IP: bittorrent.IP{IP: net.IP(pk[22:])},
}
if ip := peer.IP.To4(); ip != nil {
peer.IP.IP = ip
peer.IP.AddressFamily = bittorrent.IPv4
} else if len(peer.IP.IP) == net.IPv6len { // implies toReturn.IP.To4() == nil
peer.IP.AddressFamily = bittorrent.IPv6
} else {
panic("IP is neither v4 nor v6")
}
return peer
}
type peerStore struct {
cfg Config
rb *redisBackend
closed chan struct{}
wg sync.WaitGroup
}
func (ps *peerStore) groups() []string {
return []string{bittorrent.IPv4.String(), bittorrent.IPv6.String()}
}
func (ps *peerStore) leecherInfohashKey(af, ih string) string {
return af + "_L_" + ih
}
func (ps *peerStore) seederInfohashKey(af, ih string) string {
return af + "_S_" + ih
}
func (ps *peerStore) infohashCountKey(af string) string {
return af + "_infohash_count"
}
func (ps *peerStore) seederCountKey(af string) string {
return af + "_S_count"
}
func (ps *peerStore) leecherCountKey(af string) string {
return af + "_L_count"
}
// populateProm aggregates metrics over all groups and then posts them to
// prometheus.
func (ps *peerStore) populateProm() {
var numInfohashes, numSeeders, numLeechers int64
conn := ps.rb.open()
defer conn.Close()
for _, group := range ps.groups() {
if n, err := redis.Int64(conn.Do("GET", ps.infohashCountKey(group))); err != nil && !errors.Is(err, redis.ErrNil) {
log.Error("storage: GET counter failure", log.Fields{
"key": ps.infohashCountKey(group),
"error": err,
})
} else {
numInfohashes += n
}
if n, err := redis.Int64(conn.Do("GET", ps.seederCountKey(group))); err != nil && !errors.Is(err, redis.ErrNil) {
log.Error("storage: GET counter failure", log.Fields{
"key": ps.seederCountKey(group),
"error": err,
})
} else {
numSeeders += n
}
if n, err := redis.Int64(conn.Do("GET", ps.leecherCountKey(group))); err != nil && !errors.Is(err, redis.ErrNil) {
log.Error("storage: GET counter failure", log.Fields{
"key": ps.leecherCountKey(group),
"error": err,
})
} else {
numLeechers += n
}
}
storage.PromInfohashesCount.Set(float64(numInfohashes))
storage.PromSeedersCount.Set(float64(numSeeders))
storage.PromLeechersCount.Set(float64(numLeechers))
}
func (ps *peerStore) getClock() int64 {
return timecache.NowUnixNano()
}
func (ps *peerStore) PutSeeder(ih bittorrent.InfoHash, p bittorrent.Peer) error {
addressFamily := p.IP.AddressFamily.String()
log.Debug("storage: PutSeeder", log.Fields{
"InfoHash": ih.String(),
"Peer": p,
})
select {
case <-ps.closed:
panic("attempted to interact with stopped redis store")
default:
}
pk := newPeerKey(p)
encodedSeederInfoHash := ps.seederInfohashKey(addressFamily, ih.String())
ct := ps.getClock()
conn := ps.rb.open()
defer conn.Close()
_ = conn.Send("MULTI")
_ = conn.Send("HSET", encodedSeederInfoHash, pk, ct)
_ = conn.Send("HSET", addressFamily, encodedSeederInfoHash, ct)
reply, err := redis.Int64s(conn.Do("EXEC"))
if err != nil {
return err
}
// pk is a new field.
if reply[0] == 1 {
_, err = conn.Do("INCR", ps.seederCountKey(addressFamily))
if err != nil {
return err
}
}
// encodedSeederInfoHash is a new field.
if reply[1] == 1 {
_, err = conn.Do("INCR", ps.infohashCountKey(addressFamily))
if err != nil {
return err
}
}
return nil
}
func (ps *peerStore) DeleteSeeder(ih bittorrent.InfoHash, p bittorrent.Peer) error {
addressFamily := p.IP.AddressFamily.String()
log.Debug("storage: DeleteSeeder", log.Fields{
"InfoHash": ih.String(),
"Peer": p,
})
select {
case <-ps.closed:
panic("attempted to interact with stopped redis store")
default:
}
pk := newPeerKey(p)
conn := ps.rb.open()
defer conn.Close()
encodedSeederInfoHash := ps.seederInfohashKey(addressFamily, ih.String())
delNum, err := redis.Int64(conn.Do("HDEL", encodedSeederInfoHash, pk))
if err != nil {
return err
}
if delNum == 0 {
return storage.ErrResourceDoesNotExist
}
if _, err := conn.Do("DECR", ps.seederCountKey(addressFamily)); err != nil {
return err
}
return nil
}
func (ps *peerStore) PutLeecher(ih bittorrent.InfoHash, p bittorrent.Peer) error {
addressFamily := p.IP.AddressFamily.String()
log.Debug("storage: PutLeecher", log.Fields{
"InfoHash": ih.String(),
"Peer": p,
})
select {
case <-ps.closed:
panic("attempted to interact with stopped redis store")
default:
}
// Update the peer in the swarm.
encodedLeecherInfoHash := ps.leecherInfohashKey(addressFamily, ih.String())
pk := newPeerKey(p)
ct := ps.getClock()
conn := ps.rb.open()
defer conn.Close()
_ = conn.Send("MULTI")
_ = conn.Send("HSET", encodedLeecherInfoHash, pk, ct)
_ = conn.Send("HSET", addressFamily, encodedLeecherInfoHash, ct)
reply, err := redis.Int64s(conn.Do("EXEC"))
if err != nil {
return err
}
// pk is a new field.
if reply[0] == 1 {
_, err = conn.Do("INCR", ps.leecherCountKey(addressFamily))
if err != nil {
return err
}
}
return nil
}
func (ps *peerStore) DeleteLeecher(ih bittorrent.InfoHash, p bittorrent.Peer) error {
addressFamily := p.IP.AddressFamily.String()
log.Debug("storage: DeleteLeecher", log.Fields{
"InfoHash": ih.String(),
"Peer": p,
})
select {
case <-ps.closed:
panic("attempted to interact with stopped redis store")
default:
}
conn := ps.rb.open()
defer conn.Close()
pk := newPeerKey(p)
encodedLeecherInfoHash := ps.leecherInfohashKey(addressFamily, ih.String())
delNum, err := redis.Int64(conn.Do("HDEL", encodedLeecherInfoHash, pk))
if err != nil {
return err
}
if delNum == 0 {
return storage.ErrResourceDoesNotExist
}
if _, err := conn.Do("DECR", ps.leecherCountKey(addressFamily)); err != nil {
return err
}
return nil
}
func (ps *peerStore) GraduateLeecher(ih bittorrent.InfoHash, p bittorrent.Peer) error {
addressFamily := p.IP.AddressFamily.String()
log.Debug("storage: GraduateLeecher", log.Fields{
"InfoHash": ih.String(),
"Peer": p,
})
select {
case <-ps.closed:
panic("attempted to interact with stopped redis store")
default:
}
encodedInfoHash := ih.String()
encodedLeecherInfoHash := ps.leecherInfohashKey(addressFamily, encodedInfoHash)
encodedSeederInfoHash := ps.seederInfohashKey(addressFamily, encodedInfoHash)
pk := newPeerKey(p)
ct := ps.getClock()
conn := ps.rb.open()
defer conn.Close()
_ = conn.Send("MULTI")
_ = conn.Send("HDEL", encodedLeecherInfoHash, pk)
_ = conn.Send("HSET", encodedSeederInfoHash, pk, ct)
_ = conn.Send("HSET", addressFamily, encodedSeederInfoHash, ct)
reply, err := redis.Int64s(conn.Do("EXEC"))
if err != nil {
return err
}
if reply[0] == 1 {
_, err = conn.Do("DECR", ps.leecherCountKey(addressFamily))
if err != nil {
return err
}
}
if reply[1] == 1 {
_, err = conn.Do("INCR", ps.seederCountKey(addressFamily))
if err != nil {
return err
}
}
if reply[2] == 1 {
_, err = conn.Do("INCR", ps.infohashCountKey(addressFamily))
if err != nil {
return err
}
}
return nil
}
func (ps *peerStore) AnnouncePeers(ih bittorrent.InfoHash, seeder bool, numWant int, announcer bittorrent.Peer) (peers []bittorrent.Peer, err error) {
addressFamily := announcer.IP.AddressFamily.String()
log.Debug("storage: AnnouncePeers", log.Fields{
"InfoHash": ih.String(),
"seeder": seeder,
"numWant": numWant,
"Peer": announcer,
})
select {
case <-ps.closed:
panic("attempted to interact with stopped redis store")
default:
}
encodedInfoHash := ih.String()
encodedLeecherInfoHash := ps.leecherInfohashKey(addressFamily, encodedInfoHash)
encodedSeederInfoHash := ps.seederInfohashKey(addressFamily, encodedInfoHash)
conn := ps.rb.open()
defer conn.Close()
leechers, err := conn.Do("HKEYS", encodedLeecherInfoHash)
if err != nil {
return nil, err
}
conLeechers := leechers.([]interface{})
seeders, err := conn.Do("HKEYS", encodedSeederInfoHash)
if err != nil {
return nil, err
}
conSeeders := seeders.([]interface{})
if len(conLeechers) == 0 && len(conSeeders) == 0 {
return nil, storage.ErrResourceDoesNotExist
}
if seeder {
// Append leechers as possible.
for _, pk := range conLeechers {
if numWant == 0 {
break
}
peers = append(peers, decodePeerKey(serializedPeer(pk.([]byte))))
numWant--
}
} else {
// Append as many seeders as possible.
for _, pk := range conSeeders {
if numWant == 0 {
break
}
peers = append(peers, decodePeerKey(serializedPeer(pk.([]byte))))
numWant--
}
// Append leechers until we reach numWant.
if numWant > 0 {
announcerPK := newPeerKey(announcer)
for _, pk := range conLeechers {
if pk == announcerPK {
continue
}
if numWant == 0 {
break
}
peers = append(peers, decodePeerKey(serializedPeer(pk.([]byte))))
numWant--
}
}
}
return
}
func (ps *peerStore) ScrapeSwarm(ih bittorrent.InfoHash, af bittorrent.AddressFamily) (resp bittorrent.Scrape) {
select {
case <-ps.closed:
panic("attempted to interact with stopped redis store")
default:
}
resp.InfoHash = ih
addressFamily := af.String()
encodedInfoHash := ih.String()
encodedLeecherInfoHash := ps.leecherInfohashKey(addressFamily, encodedInfoHash)
encodedSeederInfoHash := ps.seederInfohashKey(addressFamily, encodedInfoHash)
conn := ps.rb.open()
defer conn.Close()
leechersLen, err := redis.Int64(conn.Do("HLEN", encodedLeecherInfoHash))
if err != nil {
log.Error("storage: Redis HLEN failure", log.Fields{
"Hkey": encodedLeecherInfoHash,
"error": err,
})
return
}
seedersLen, err := redis.Int64(conn.Do("HLEN", encodedSeederInfoHash))
if err != nil {
log.Error("storage: Redis HLEN failure", log.Fields{
"Hkey": encodedSeederInfoHash,
"error": err,
})
return
}
resp.Incomplete = uint32(leechersLen)
resp.Complete = uint32(seedersLen)
return
}
// collectGarbage deletes all Peers from the PeerStore which are older than the
// cutoff time.
//
// This function must be able to execute while other methods on this interface
// are being executed in parallel.
//
// - The Delete(Seeder|Leecher) and GraduateLeecher methods never delete an
// infohash key from an addressFamily hash. They also never decrement the
// infohash counter.
// - The Put(Seeder|Leecher) and GraduateLeecher methods only ever add infohash
// keys to addressFamily hashes and increment the infohash counter.
// - The only method that deletes from the addressFamily hashes is
// collectGarbage, which also decrements the counters. That means that,
// even if a Delete(Seeder|Leecher) call removes the last peer from a swarm,
// the infohash counter is not changed and the infohash is left in the
// addressFamily hash until it will be cleaned up by collectGarbage.
// - collectGarbage must run regularly.
// - A WATCH ... MULTI ... EXEC block fails, if between the WATCH and the 'EXEC'
// any of the watched keys have changed. The location of the 'MULTI' doesn't
// matter.
//
// We have to analyze four cases to prove our algorithm works. I'll characterize
// them by a tuple (number of peers in a swarm before WATCH, number of peers in
// the swarm during the transaction).
//
// 1. (0,0), the easy case: The swarm is empty, we watch the key, we execute
// HLEN and find it empty. We remove it and decrement the counter. It stays
// empty the entire time, the transaction goes through.
// 2. (1,n > 0): The swarm is not empty, we watch the key, we find it non-empty,
// we unwatch the key. All good. No transaction is made, no transaction fails.
// 3. (0,1): We have to analyze this in two ways.
// - If the change happens before the HLEN call, we will see that the swarm is
// not empty and start no transaction.
// - If the change happens after the HLEN, we will attempt a transaction and it
// will fail. This is okay, the swarm is not empty, we will try cleaning it up
// next time collectGarbage runs.
// 4. (1,0): Again, two ways:
// - If the change happens before the HLEN, we will see an empty swarm. This
// situation happens if a call to Delete(Seeder|Leecher) removed the last
// peer asynchronously. We will attempt a transaction, but the transaction
// will fail. This is okay, the infohash key will remain in the addressFamily
// hash, we will attempt to clean it up the next time 'collectGarbage` runs.
// - If the change happens after the HLEN, we will not even attempt to make the
// transaction. The infohash key will remain in the addressFamil hash and
// we'll attempt to clean it up the next time collectGarbage runs.
func (ps *peerStore) collectGarbage(cutoff time.Time) error {
select {
case <-ps.closed:
return nil
default:
}
conn := ps.rb.open()
defer conn.Close()
cutoffUnix := cutoff.UnixNano()
start := time.Now()
for _, group := range ps.groups() {
// list all infohashes in the group
infohashesList, err := redis.Strings(conn.Do("HKEYS", group))
if err != nil {
return err
}
for _, ihStr := range infohashesList {
isSeeder := len(ihStr) > 5 && ihStr[5:6] == "S"
// list all (peer, timeout) pairs for the ih
ihList, err := redis.Strings(conn.Do("HGETALL", ihStr))
if err != nil {
return err
}
var pk serializedPeer
var removedPeerCount int64
for index, ihField := range ihList {
if index%2 == 1 { // value
mtime, err := strconv.ParseInt(ihField, 10, 64)
if err != nil {
return err
}
if mtime <= cutoffUnix {
log.Debug("storage: deleting peer", log.Fields{
"Peer": decodePeerKey(pk).String(),
})
ret, err := redis.Int64(conn.Do("HDEL", ihStr, pk))
if err != nil {
return err
}
removedPeerCount += ret
}
} else { // key
pk = serializedPeer([]byte(ihField))
}
}
// DECR seeder/leecher counter
decrCounter := ps.leecherCountKey(group)
if isSeeder {
decrCounter = ps.seederCountKey(group)
}
if removedPeerCount > 0 {
if _, err := conn.Do("DECRBY", decrCounter, removedPeerCount); err != nil {
return err
}
}
// use WATCH to avoid race condition
// https://redis.io/topics/transactions
_, err = conn.Do("WATCH", ihStr)
if err != nil {
return err
}
ihLen, err := redis.Int64(conn.Do("HLEN", ihStr))
if err != nil {
return err
}
if ihLen == 0 {
// Empty hashes are not shown among existing keys,
// in other words, it's removed automatically after `HDEL` the last field.
//_, err := conn.Do("DEL", ihStr)
_ = conn.Send("MULTI")
_ = conn.Send("HDEL", group, ihStr)
if isSeeder {
_ = conn.Send("DECR", ps.infohashCountKey(group))
}
_, err = redis.Values(conn.Do("EXEC"))
if err != nil && !errors.Is(err, redis.ErrNil) {
log.Error("storage: Redis EXEC failure", log.Fields{
"group": group,
"infohash": ihStr,
"error": err,
})
}
} else {
if _, err = conn.Do("UNWATCH"); err != nil && !errors.Is(err, redis.ErrNil) {
log.Error("storage: Redis UNWATCH failure", log.Fields{"error": err})
}
}
}
}
duration := float64(time.Since(start).Nanoseconds()) / float64(time.Millisecond)
log.Debug("storage: recordGCDuration", log.Fields{"timeTaken(ms)": duration})
storage.PromGCDurationMilliseconds.Observe(duration)
return nil
}
func (ps *peerStore) Stop() stop.Result {
c := make(stop.Channel)
go func() {
close(ps.closed)
ps.wg.Wait()
log.Info("storage: exiting. chihaya does not clear data in redis when exiting. chihaya keys have prefix 'IPv{4,6}_'.")
c.Done()
}()
return c.Result()
}
func (ps *peerStore) LogFields() log.Fields {
return ps.cfg.LogFields()
}

View file

@ -0,0 +1,62 @@
package redis
import (
"fmt"
"testing"
"time"
"github.com/alicebob/miniredis"
s "github.com/chihaya/chihaya/storage"
)
func createNew() s.PeerStore {
rs, err := miniredis.Run()
if err != nil {
panic(err)
}
redisURL := fmt.Sprintf("redis://@%s/0", rs.Addr())
ps, err := New(Config{
GarbageCollectionInterval: 10 * time.Minute,
PrometheusReportingInterval: 10 * time.Minute,
PeerLifetime: 30 * time.Minute,
RedisBroker: redisURL,
RedisReadTimeout: 10 * time.Second,
RedisWriteTimeout: 10 * time.Second,
RedisConnectTimeout: 10 * time.Second,
})
if err != nil {
panic(err)
}
return ps
}
func TestPeerStore(t *testing.T) { s.TestPeerStore(t, createNew()) }
func BenchmarkNop(b *testing.B) { s.Nop(b, createNew()) }
func BenchmarkPut(b *testing.B) { s.Put(b, createNew()) }
func BenchmarkPut1k(b *testing.B) { s.Put1k(b, createNew()) }
func BenchmarkPut1kInfohash(b *testing.B) { s.Put1kInfohash(b, createNew()) }
func BenchmarkPut1kInfohash1k(b *testing.B) { s.Put1kInfohash1k(b, createNew()) }
func BenchmarkPutDelete(b *testing.B) { s.PutDelete(b, createNew()) }
func BenchmarkPutDelete1k(b *testing.B) { s.PutDelete1k(b, createNew()) }
func BenchmarkPutDelete1kInfohash(b *testing.B) { s.PutDelete1kInfohash(b, createNew()) }
func BenchmarkPutDelete1kInfohash1k(b *testing.B) { s.PutDelete1kInfohash1k(b, createNew()) }
func BenchmarkDeleteNonexist(b *testing.B) { s.DeleteNonexist(b, createNew()) }
func BenchmarkDeleteNonexist1k(b *testing.B) { s.DeleteNonexist1k(b, createNew()) }
func BenchmarkDeleteNonexist1kInfohash(b *testing.B) { s.DeleteNonexist1kInfohash(b, createNew()) }
func BenchmarkDeleteNonexist1kInfohash1k(b *testing.B) { s.DeleteNonexist1kInfohash1k(b, createNew()) }
func BenchmarkPutGradDelete(b *testing.B) { s.PutGradDelete(b, createNew()) }
func BenchmarkPutGradDelete1k(b *testing.B) { s.PutGradDelete1k(b, createNew()) }
func BenchmarkPutGradDelete1kInfohash(b *testing.B) { s.PutGradDelete1kInfohash(b, createNew()) }
func BenchmarkPutGradDelete1kInfohash1k(b *testing.B) { s.PutGradDelete1kInfohash1k(b, createNew()) }
func BenchmarkGradNonexist(b *testing.B) { s.GradNonexist(b, createNew()) }
func BenchmarkGradNonexist1k(b *testing.B) { s.GradNonexist1k(b, createNew()) }
func BenchmarkGradNonexist1kInfohash(b *testing.B) { s.GradNonexist1kInfohash(b, createNew()) }
func BenchmarkGradNonexist1kInfohash1k(b *testing.B) { s.GradNonexist1kInfohash1k(b, createNew()) }
func BenchmarkAnnounceLeecher(b *testing.B) { s.AnnounceLeecher(b, createNew()) }
func BenchmarkAnnounceLeecher1kInfohash(b *testing.B) { s.AnnounceLeecher1kInfohash(b, createNew()) }
func BenchmarkAnnounceSeeder(b *testing.B) { s.AnnounceSeeder(b, createNew()) }
func BenchmarkAnnounceSeeder1kInfohash(b *testing.B) { s.AnnounceSeeder1kInfohash(b, createNew()) }
func BenchmarkScrapeSwarm(b *testing.B) { s.ScrapeSwarm(b, createNew()) }
func BenchmarkScrapeSwarm1kInfohash(b *testing.B) { s.ScrapeSwarm1kInfohash(b, createNew()) }

136
storage/redis/redis.go Normal file
View file

@ -0,0 +1,136 @@
package redis
import (
"errors"
"net/url"
"strconv"
"strings"
"time"
"github.com/go-redsync/redsync/v4"
"github.com/go-redsync/redsync/v4/redis/redigo"
redigolib "github.com/gomodule/redigo/redis"
)
// redisBackend represents a redis handler.
type redisBackend struct {
pool *redigolib.Pool
redsync *redsync.Redsync
}
// newRedisBackend creates a redisBackend instance.
func newRedisBackend(cfg *Config, u *redisURL, socketPath string) *redisBackend {
rc := &redisConnector{
URL: u,
SocketPath: socketPath,
ReadTimeout: cfg.RedisReadTimeout,
WriteTimeout: cfg.RedisWriteTimeout,
ConnectTimeout: cfg.RedisConnectTimeout,
}
pool := rc.NewPool()
redsync := redsync.New(redigo.NewPool(pool))
return &redisBackend{
pool: pool,
redsync: redsync,
}
}
// open returns or creates instance of Redis connection.
func (rb *redisBackend) open() redigolib.Conn {
return rb.pool.Get()
}
type redisConnector struct {
URL *redisURL
SocketPath string
ReadTimeout time.Duration
WriteTimeout time.Duration
ConnectTimeout time.Duration
}
// NewPool returns a new pool of Redis connections
func (rc *redisConnector) NewPool() *redigolib.Pool {
return &redigolib.Pool{
MaxIdle: 3,
IdleTimeout: 240 * time.Second,
Dial: func() (redigolib.Conn, error) {
c, err := rc.open()
if err != nil {
return nil, err
}
if rc.URL.DB != 0 {
_, err = c.Do("SELECT", rc.URL.DB)
if err != nil {
return nil, err
}
}
return c, err
},
// PINGs connections that have been idle more than 10 seconds
TestOnBorrow: func(c redigolib.Conn, t time.Time) error {
if time.Since(t) < 10*time.Second {
return nil
}
_, err := c.Do("PING")
return err
},
}
}
// Open a new Redis connection
func (rc *redisConnector) open() (redigolib.Conn, error) {
opts := []redigolib.DialOption{
redigolib.DialDatabase(rc.URL.DB),
redigolib.DialReadTimeout(rc.ReadTimeout),
redigolib.DialWriteTimeout(rc.WriteTimeout),
redigolib.DialConnectTimeout(rc.ConnectTimeout),
}
if rc.URL.Password != "" {
opts = append(opts, redigolib.DialPassword(rc.URL.Password))
}
if rc.SocketPath != "" {
return redigolib.Dial("unix", rc.SocketPath, opts...)
}
return redigolib.Dial("tcp", rc.URL.Host, opts...)
}
// A redisURL represents a parsed redisURL
// The general form represented is:
//
// redis://[password@]host][/][db]
type redisURL struct {
Host string
Password string
DB int
}
// parseRedisURL parse rawurl into redisURL
func parseRedisURL(target string) (*redisURL, error) {
var u *url.URL
u, err := url.Parse(target)
if err != nil {
return nil, err
}
if u.Scheme != "redis" {
return nil, errors.New("no redis scheme found")
}
db := 0 // default redis db
parts := strings.Split(u.Path, "/")
if len(parts) != 1 {
db, err = strconv.Atoi(parts[1])
if err != nil {
return nil, err
}
}
return &redisURL{
Host: u.Host,
Password: u.User.String(),
DB: db,
}, nil
}

View file

@ -1,49 +1,88 @@
package storage
import (
"errors"
"sync"
"github.com/chihaya/chihaya/bittorrent"
"github.com/chihaya/chihaya/stopper"
"github.com/chihaya/chihaya/pkg/log"
"github.com/chihaya/chihaya/pkg/stop"
)
// ErrResourceDoesNotExist is the error returned by all delete methods in the
// store if the requested resource does not exist.
var (
driversM sync.RWMutex
drivers = make(map[string]Driver)
)
// Driver is the interface used to initialize a new type of PeerStore.
type Driver interface {
NewPeerStore(cfg interface{}) (PeerStore, error)
}
// ErrResourceDoesNotExist is the error returned by all delete methods and the
// AnnouncePeers method of the PeerStore interface if the requested resource
// does not exist.
var ErrResourceDoesNotExist = bittorrent.ClientError("resource does not exist")
// ErrDriverDoesNotExist is the error returned by NewPeerStore when a peer
// store driver with that name does not exist.
var ErrDriverDoesNotExist = errors.New("peer store driver with that name does not exist")
// PeerStore is an interface that abstracts the interactions of storing and
// manipulating Peers such that it can be implemented for various data stores.
//
// Implementations of the PeerStore interface must do the following in addition
// to implementing the methods of the interface in the way documented:
//
// - Implement a garbage-collection strategy that ensures stale data is removed.
// For example, a timestamp on each InfoHash/Peer combination can be used
// to track the last activity for that Peer. The entire database can then
// be scanned periodically and too old Peers removed. The intervals and
// durations involved should be configurable.
// - IPv4 and IPv6 swarms must be isolated from each other.
// A PeerStore must be able to transparently handle IPv4 and IPv6 Peers, but
// must separate them. AnnouncePeers and ScrapeSwarm must return information
// about the Swarm matching the given AddressFamily only.
//
// Implementations can be tested against this interface using the tests in
// storage_tests.go and the benchmarks in storage_bench.go.
type PeerStore interface {
// PutSeeder adds a Seeder to the Swarm identified by the provided
// infoHash.
// InfoHash.
PutSeeder(infoHash bittorrent.InfoHash, p bittorrent.Peer) error
// DeleteSeeder removes a Seeder from the Swarm identified by the
// provided infoHash.
// provided InfoHash.
//
// If the Swarm or Peer does not exist, this function should return
// If the Swarm or Peer does not exist, this function returns
// ErrResourceDoesNotExist.
DeleteSeeder(infoHash bittorrent.InfoHash, p bittorrent.Peer) error
// PutLeecher adds a Leecher to the Swarm identified by the provided
// infoHash.
// InfoHash.
// If the Swarm does not exist already, it is created.
PutLeecher(infoHash bittorrent.InfoHash, p bittorrent.Peer) error
// DeleteLeecher removes a Leecher from the Swarm identified by the
// provided infoHash.
// provided InfoHash.
//
// If the Swarm or Peer does not exist, this function should return
// If the Swarm or Peer does not exist, this function returns
// ErrResourceDoesNotExist.
DeleteLeecher(infoHash bittorrent.InfoHash, p bittorrent.Peer) error
// GraduateLeecher promotes a Leecher to a Seeder in the Swarm
// identified by the provided infoHash.
// identified by the provided InfoHash.
//
// If the given Peer is not present as a Leecher, add the Peer as a
// Seeder and return no error.
// If the given Peer is not present as a Leecher or the swarm does not exist
// already, the Peer is added as a Seeder and no error is returned.
GraduateLeecher(infoHash bittorrent.InfoHash, p bittorrent.Peer) error
// AnnouncePeers is a best effort attempt to return Peers from the Swarm
// identified by the provided infoHash. The returned Peers are required
// to be either all IPv4 or all IPv6.
// identified by the provided InfoHash.
// The numWant parameter indicates the number of peers requested by the
// announcing Peer p. The seeder flag determines whether the Peer announced
// as a Seeder.
// The returned Peers are required to be either all IPv4 or all IPv6.
//
// The returned Peers should strive be:
// - as close to length equal to numWant as possible without going over
@ -52,21 +91,64 @@ type PeerStore interface {
// - if seeder is false, should ideally return more seeders than
// leechers
//
// Returns ErrResourceDoesNotExist if the provided infoHash is not tracked.
// Returns ErrResourceDoesNotExist if the provided InfoHash is not tracked.
AnnouncePeers(infoHash bittorrent.InfoHash, seeder bool, numWant int, p bittorrent.Peer) (peers []bittorrent.Peer, err error)
// ScrapeSwarm returns information required to answer a scrape request
// about a swarm identified by the given infohash.
// The v6 flag indicates whether or not the IPv6 swarm should be
// ScrapeSwarm returns information required to answer a Scrape request
// about a Swarm identified by the given InfoHash.
// The AddressFamily indicates whether or not the IPv6 swarm should be
// scraped.
// The Complete and Incomplete fields of the Scrape must be filled,
// filling the Snatches field is optional.
// If the infohash is unknown to the PeerStore, an empty Scrape is
// returned.
ScrapeSwarm(infoHash bittorrent.InfoHash, v6 bool) bittorrent.Scrape
//
// If the Swarm does not exist, an empty Scrape and no error is returned.
ScrapeSwarm(infoHash bittorrent.InfoHash, addressFamily bittorrent.AddressFamily) bittorrent.Scrape
// Stopper is an interface that expects a Stop method to stop the
// stop.Stopper is an interface that expects a Stop method to stop the
// PeerStore.
// For more details see the documentation in the stopper package.
stopper.Stopper
// For more details see the documentation in the stop package.
stop.Stopper
// log.Fielder returns a loggable version of the data used to configure and
// operate a particular PeerStore.
log.Fielder
}
// RegisterDriver makes a Driver available by the provided name.
//
// If called twice with the same name, the name is blank, or if the provided
// Driver is nil, this function panics.
func RegisterDriver(name string, d Driver) {
if name == "" {
panic("storage: could not register a Driver with an empty name")
}
if d == nil {
panic("storage: could not register a nil Driver")
}
driversM.Lock()
defer driversM.Unlock()
if _, dup := drivers[name]; dup {
panic("storage: RegisterDriver called twice for " + name)
}
drivers[name] = d
}
// NewPeerStore attempts to initialize a new PeerStore instance from
// the list of registered Drivers.
//
// If a driver does not exist, returns ErrDriverDoesNotExist.
func NewPeerStore(name string, cfg interface{}) (ps PeerStore, err error) {
driversM.RLock()
defer driversM.RUnlock()
var d Driver
d, ok := drivers[name]
if !ok {
return nil, ErrDriverDoesNotExist
}
return d.NewPeerStore(cfg)
}

View file

@ -45,7 +45,7 @@ func generatePeers() (a [1000]bittorrent.Peer) {
port := uint16(r.Uint32())
a[i] = bittorrent.Peer{
ID: bittorrent.PeerID(id),
IP: net.IP(ip),
IP: bittorrent.IP{IP: net.IP(ip), AddressFamily: bittorrent.IPv4},
Port: port,
}
}
@ -53,8 +53,10 @@ func generatePeers() (a [1000]bittorrent.Peer) {
return
}
type executionFunc func(int, PeerStore, *benchData) error
type setupFunc func(PeerStore, *benchData) error
type (
executionFunc func(int, PeerStore, *benchData) error
setupFunc func(PeerStore, *benchData) error
)
func runBenchmark(b *testing.B, ps PeerStore, parallel bool, sf setupFunc, ef executionFunc) {
bd := &benchData{generateInfohashes(), generatePeers()}
@ -95,6 +97,19 @@ func runBenchmark(b *testing.B, ps PeerStore, parallel bool, sf setupFunc, ef ex
}
}
// Nop executes a no-op for each iteration.
// It should produce the same results for each PeerStore.
// This can be used to get an estimate of the impact of the benchmark harness
// on benchmark results and an estimate of the general performance of the system
// benchmarked on.
//
// Nop can run in parallel.
func Nop(b *testing.B, ps PeerStore) {
runBenchmark(b, ps, true, putPeers, func(i int, ps PeerStore, bd *benchData) error {
return nil
})
}
// Put benchmarks the PutSeeder method of a PeerStore by repeatedly Putting the
// same Peer for the same InfoHash.
//
@ -172,6 +187,7 @@ func PutDelete1kInfohash(b *testing.B, ps PeerStore) {
runBenchmark(b, ps, false, nil, func(i int, ps PeerStore, bd *benchData) error {
err := ps.PutSeeder(bd.infohashes[i%1000], bd.peers[0])
if err != nil {
return err
}
return ps.DeleteSeeder(bd.infohashes[i%1000], bd.peers[0])
})
@ -198,7 +214,7 @@ func PutDelete1kInfohash1k(b *testing.B, ps PeerStore) {
// DeleteNonexist can run in parallel.
func DeleteNonexist(b *testing.B, ps PeerStore) {
runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error {
ps.DeleteSeeder(bd.infohashes[0], bd.peers[0])
_ = ps.DeleteSeeder(bd.infohashes[0], bd.peers[0])
return nil
})
}
@ -209,7 +225,7 @@ func DeleteNonexist(b *testing.B, ps PeerStore) {
// DeleteNonexist can run in parallel.
func DeleteNonexist1k(b *testing.B, ps PeerStore) {
runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error {
ps.DeleteSeeder(bd.infohashes[0], bd.peers[i%1000])
_ = ps.DeleteSeeder(bd.infohashes[0], bd.peers[i%1000])
return nil
})
}
@ -220,7 +236,7 @@ func DeleteNonexist1k(b *testing.B, ps PeerStore) {
// DeleteNonexist1kInfohash can run in parallel.
func DeleteNonexist1kInfohash(b *testing.B, ps PeerStore) {
runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error {
ps.DeleteSeeder(bd.infohashes[i%1000], bd.peers[0])
_ = ps.DeleteSeeder(bd.infohashes[i%1000], bd.peers[0])
return nil
})
}
@ -231,7 +247,7 @@ func DeleteNonexist1kInfohash(b *testing.B, ps PeerStore) {
// DeleteNonexist1kInfohash1k can run in parallel.
func DeleteNonexist1kInfohash1k(b *testing.B, ps PeerStore) {
runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error {
ps.DeleteSeeder(bd.infohashes[i%1000], bd.peers[(i*3)%1000])
_ = ps.DeleteSeeder(bd.infohashes[i%1000], bd.peers[(i*3)%1000])
return nil
})
}
@ -242,7 +258,7 @@ func DeleteNonexist1kInfohash1k(b *testing.B, ps PeerStore) {
// GradNonexist can run in parallel.
func GradNonexist(b *testing.B, ps PeerStore) {
runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error {
ps.GraduateLeecher(bd.infohashes[0], bd.peers[0])
_ = ps.GraduateLeecher(bd.infohashes[0], bd.peers[0])
return nil
})
}
@ -253,7 +269,7 @@ func GradNonexist(b *testing.B, ps PeerStore) {
// GradNonexist1k can run in parallel.
func GradNonexist1k(b *testing.B, ps PeerStore) {
runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error {
ps.GraduateLeecher(bd.infohashes[0], bd.peers[i%1000])
_ = ps.GraduateLeecher(bd.infohashes[0], bd.peers[i%1000])
return nil
})
}
@ -264,7 +280,7 @@ func GradNonexist1k(b *testing.B, ps PeerStore) {
// GradNonexist1kInfohash can run in parallel.
func GradNonexist1kInfohash(b *testing.B, ps PeerStore) {
runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error {
ps.GraduateLeecher(bd.infohashes[i%1000], bd.peers[0])
_ = ps.GraduateLeecher(bd.infohashes[i%1000], bd.peers[0])
return nil
})
}
@ -276,7 +292,7 @@ func GradNonexist1kInfohash(b *testing.B, ps PeerStore) {
// GradNonexist1kInfohash1k can run in parallel.
func GradNonexist1kInfohash1k(b *testing.B, ps PeerStore) {
runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error {
ps.GraduateLeecher(bd.infohashes[i%1000], bd.peers[(i*3)%1000])
_ = ps.GraduateLeecher(bd.infohashes[i%1000], bd.peers[(i*3)%1000])
return nil
})
}
@ -415,3 +431,24 @@ func AnnounceSeeder1kInfohash(b *testing.B, ps PeerStore) {
return err
})
}
// ScrapeSwarm benchmarks the ScrapeSwarm method of a PeerStore.
// The swarm scraped has 500 seeders and 500 leechers.
//
// ScrapeSwarm can run in parallel.
func ScrapeSwarm(b *testing.B, ps PeerStore) {
runBenchmark(b, ps, true, putPeers, func(i int, ps PeerStore, bd *benchData) error {
ps.ScrapeSwarm(bd.infohashes[0], bittorrent.IPv4)
return nil
})
}
// ScrapeSwarm1kInfohash behaves like ScrapeSwarm with one of 1000 infohashes.
//
// ScrapeSwarm1kInfohash can run in parallel.
func ScrapeSwarm1kInfohash(b *testing.B, ps PeerStore) {
runBenchmark(b, ps, true, putPeers, func(i int, ps PeerStore, bd *benchData) error {
ps.ScrapeSwarm(bd.infohashes[i%1000], bittorrent.IPv4)
return nil
})
}

163
storage/storage_tests.go Normal file
View file

@ -0,0 +1,163 @@
package storage
import (
"net"
"testing"
"github.com/stretchr/testify/require"
"github.com/chihaya/chihaya/bittorrent"
)
// PeerEqualityFunc is the boolean function to use to check two Peers for
// equality.
// Depending on the implementation of the PeerStore, this can be changed to
// use (Peer).EqualEndpoint instead.
var PeerEqualityFunc = func(p1, p2 bittorrent.Peer) bool { return p1.Equal(p2) }
// TestPeerStore tests a PeerStore implementation against the interface.
func TestPeerStore(t *testing.T, p PeerStore) {
testData := []struct {
ih bittorrent.InfoHash
peer bittorrent.Peer
}{
{
bittorrent.InfoHashFromString("00000000000000000001"),
bittorrent.Peer{ID: bittorrent.PeerIDFromString("00000000000000000001"), Port: 1, IP: bittorrent.IP{IP: net.ParseIP("1.1.1.1").To4(), AddressFamily: bittorrent.IPv4}},
},
{
bittorrent.InfoHashFromString("00000000000000000002"),
bittorrent.Peer{ID: bittorrent.PeerIDFromString("00000000000000000002"), Port: 2, IP: bittorrent.IP{IP: net.ParseIP("abab::0001"), AddressFamily: bittorrent.IPv6}},
},
}
v4Peer := bittorrent.Peer{ID: bittorrent.PeerIDFromString("99999999999999999994"), IP: bittorrent.IP{IP: net.ParseIP("99.99.99.99").To4(), AddressFamily: bittorrent.IPv4}, Port: 9994}
v6Peer := bittorrent.Peer{ID: bittorrent.PeerIDFromString("99999999999999999996"), IP: bittorrent.IP{IP: net.ParseIP("fc00::0001"), AddressFamily: bittorrent.IPv6}, Port: 9996}
for _, c := range testData {
peer := v4Peer
if c.peer.IP.AddressFamily == bittorrent.IPv6 {
peer = v6Peer
}
// Test ErrDNE for non-existent swarms.
err := p.DeleteLeecher(c.ih, c.peer)
require.Equal(t, ErrResourceDoesNotExist, err)
err = p.DeleteSeeder(c.ih, c.peer)
require.Equal(t, ErrResourceDoesNotExist, err)
_, err = p.AnnouncePeers(c.ih, false, 50, peer)
require.Equal(t, ErrResourceDoesNotExist, err)
// Test empty scrape response for non-existent swarms.
scrape := p.ScrapeSwarm(c.ih, c.peer.IP.AddressFamily)
require.Equal(t, uint32(0), scrape.Complete)
require.Equal(t, uint32(0), scrape.Incomplete)
require.Equal(t, uint32(0), scrape.Snatches)
// Insert dummy Peer to keep swarm active
// Has the same address family as c.peer
err = p.PutLeecher(c.ih, peer)
require.Nil(t, err)
// Test ErrDNE for non-existent seeder.
err = p.DeleteSeeder(c.ih, peer)
require.Equal(t, ErrResourceDoesNotExist, err)
// Test PutLeecher -> Announce -> DeleteLeecher -> Announce
err = p.PutLeecher(c.ih, c.peer)
require.Nil(t, err)
peers, err := p.AnnouncePeers(c.ih, true, 50, peer)
require.Nil(t, err)
require.True(t, containsPeer(peers, c.peer))
// non-seeder announce should still return the leecher
peers, err = p.AnnouncePeers(c.ih, false, 50, peer)
require.Nil(t, err)
require.True(t, containsPeer(peers, c.peer))
scrape = p.ScrapeSwarm(c.ih, c.peer.IP.AddressFamily)
require.Equal(t, uint32(2), scrape.Incomplete)
require.Equal(t, uint32(0), scrape.Complete)
err = p.DeleteLeecher(c.ih, c.peer)
require.Nil(t, err)
peers, err = p.AnnouncePeers(c.ih, true, 50, peer)
require.Nil(t, err)
require.False(t, containsPeer(peers, c.peer))
// Test PutSeeder -> Announce -> DeleteSeeder -> Announce
err = p.PutSeeder(c.ih, c.peer)
require.Nil(t, err)
// Should be leecher to see the seeder
peers, err = p.AnnouncePeers(c.ih, false, 50, peer)
require.Nil(t, err)
require.True(t, containsPeer(peers, c.peer))
scrape = p.ScrapeSwarm(c.ih, c.peer.IP.AddressFamily)
require.Equal(t, uint32(1), scrape.Incomplete)
require.Equal(t, uint32(1), scrape.Complete)
err = p.DeleteSeeder(c.ih, c.peer)
require.Nil(t, err)
peers, err = p.AnnouncePeers(c.ih, false, 50, peer)
require.Nil(t, err)
require.False(t, containsPeer(peers, c.peer))
// Test PutLeecher -> Graduate -> Announce -> DeleteLeecher -> Announce
err = p.PutLeecher(c.ih, c.peer)
require.Nil(t, err)
err = p.GraduateLeecher(c.ih, c.peer)
require.Nil(t, err)
// Has to be leecher to see the graduated seeder
peers, err = p.AnnouncePeers(c.ih, false, 50, peer)
require.Nil(t, err)
require.True(t, containsPeer(peers, c.peer))
// Deleting the Peer as a Leecher should have no effect
err = p.DeleteLeecher(c.ih, c.peer)
require.Equal(t, ErrResourceDoesNotExist, err)
// Verify it's still there
peers, err = p.AnnouncePeers(c.ih, false, 50, peer)
require.Nil(t, err)
require.True(t, containsPeer(peers, c.peer))
// Clean up
err = p.DeleteLeecher(c.ih, peer)
require.Nil(t, err)
// Test ErrDNE for missing leecher
err = p.DeleteLeecher(c.ih, peer)
require.Equal(t, ErrResourceDoesNotExist, err)
err = p.DeleteSeeder(c.ih, c.peer)
require.Nil(t, err)
err = p.DeleteSeeder(c.ih, c.peer)
require.Equal(t, ErrResourceDoesNotExist, err)
}
e := p.Stop()
require.Nil(t, <-e)
}
func containsPeer(peers []bittorrent.Peer, p bittorrent.Peer) bool {
for _, peer := range peers {
if PeerEqualityFunc(peer, p) {
return true
}
}
return false
}