diff --git a/http/routes.go b/http/routes.go index 1ea023d..bffc418 100644 --- a/http/routes.go +++ b/http/routes.go @@ -12,6 +12,7 @@ import ( "github.com/julienschmidt/httprouter" + "github.com/chihaya/chihaya/stats" "github.com/chihaya/chihaya/tracker" "github.com/chihaya/chihaya/tracker/models" ) @@ -42,6 +43,8 @@ func (s *Server) serveAnnounce(w http.ResponseWriter, r *http.Request, p httprou return http.StatusInternalServerError, err } + stats.RecordEvent(stats.Announce) + return http.StatusOK, nil } @@ -60,6 +63,8 @@ func (s *Server) serveScrape(w http.ResponseWriter, r *http.Request, p httproute return http.StatusInternalServerError, err } + stats.RecordEvent(stats.Scrape) + return http.StatusOK, nil } diff --git a/tracker/announce.go b/tracker/announce.go index 205dec4..75c6001 100644 --- a/tracker/announce.go +++ b/tracker/announce.go @@ -56,6 +56,7 @@ func (tkr *Tracker) HandleAnnounce(ann *models.Announce, w Writer) error { if err != nil { return err } + stats.RecordEvent(stats.NewTorrent) case tkr.cfg.Private && err == ErrTorrentDNE: w.WriteError(err) @@ -87,10 +88,9 @@ func (tkr *Tracker) HandleAnnounce(ann *models.Announce, w Writer) error { // Rather than deleting the torrent explicitly, let the tracker driver // ensure there are no race conditions. conn.PurgeInactiveTorrent(torrent.Infohash) + stats.RecordEvent(stats.ReapedTorrent) } - stats.RecordEvent(stats.Announce) - return w.WriteAnnounce(newAnnounceResponse(ann, peer, torrent)) } @@ -120,6 +120,11 @@ func updateSwarm(c Conn, ann *models.Announce, p *models.Peer, t *models.Torrent return } t.Seeders[p.ID] = *p + if p.IPv4() { + stats.RecordEvent(stats.NewSeedIPv4) + } else if p.IPv6() { + stats.RecordEvent(stats.NewSeedIPv6) + } } else { err = c.PutLeecher(t.Infohash, p) @@ -127,6 +132,11 @@ func updateSwarm(c Conn, ann *models.Announce, p *models.Peer, t *models.Torrent return } t.Leechers[p.ID] = *p + if p.IPv4() { + stats.RecordEvent(stats.NewLeechIPv4) + } else if p.IPv6() { + stats.RecordEvent(stats.NewLeechIPv6) + } } created = true } @@ -145,12 +155,23 @@ func handleEvent(c Conn, ann *models.Announce, p *models.Peer, u *models.User, t return } delete(t.Seeders, p.ID) + if p.IPv4() { + stats.RecordEvent(stats.DeletedSeedIPv4) + } else if p.IPv6() { + stats.RecordEvent(stats.DeletedSeedIPv6) + } + } else if t.InLeecherPool(p) { err = c.DeleteLeecher(t.Infohash, p.ID) if err != nil { return } delete(t.Leechers, p.ID) + if p.IPv4() { + stats.RecordEvent(stats.DeletedLeechIPv4) + } else if p.IPv6() { + stats.RecordEvent(stats.DeletedLeechIPv6) + } } case ann.Event == "completed": @@ -175,6 +196,11 @@ func handleEvent(c Conn, ann *models.Announce, p *models.Peer, u *models.User, t } } snatched = true + if p.IPv4() { + stats.RecordEvent(stats.CompletedIPv4) + } else if p.IPv6() { + stats.RecordEvent(stats.CompletedIPv6) + } case t.InLeecherPool(p) && ann.Left == 0: // A leecher completed but the event was never received. @@ -182,6 +208,7 @@ func handleEvent(c Conn, ann *models.Announce, p *models.Peer, u *models.User, t if err != nil { return } + // TODO Should this return snatched=true and stats for completed? } return diff --git a/tracker/models/models.go b/tracker/models/models.go index 5e88437..e2cd0ab 100644 --- a/tracker/models/models.go +++ b/tracker/models/models.go @@ -70,6 +70,14 @@ func NewPeer(a *Announce, u *User, t *Torrent) *Peer { } } +func (p *Peer) IPv4() bool { + return len(p.IP.To4()) == 4 +} + +func (p *Peer) IPv6() bool { + return len(p.IP.To16()) == 16 +} + // Torrent is a swarm for a given torrent file. type Torrent struct { ID uint64 `json:"id"` diff --git a/tracker/scrape.go b/tracker/scrape.go index e33ec3a..02f3f1a 100644 --- a/tracker/scrape.go +++ b/tracker/scrape.go @@ -4,10 +4,7 @@ package tracker -import ( - "github.com/chihaya/chihaya/stats" - "github.com/chihaya/chihaya/tracker/models" -) +import "github.com/chihaya/chihaya/tracker/models" // HandleScrape encapsulates all the logic of handling a BitTorrent client's // scrape without being coupled to any transport protocol. @@ -41,7 +38,5 @@ func (tkr *Tracker) HandleScrape(scrape *models.Scrape, w Writer) (err error) { torrents = append(torrents, torrent) } - stats.RecordEvent(stats.Scrape) - return w.WriteScrape(&models.ScrapeResponse{torrents}) }