mirror of
				https://source.quilibrium.com/quilibrium/ceremonyclient.git
				synced 2025-11-03 23:57:26 +00:00 
			
		
		
		
	perf grinding at the extremes
- removed defer/recover/panic where warranted - found and eliminated race conditions when extremely high connection pressure was hit - resolved race condition in blossomsub around mesh maintenance - incorporated resource manager scaling to work off low/hi watermark values
This commit is contained in:
		
							parent
							
								
									da8fcccf0d
								
							
						
					
					
						commit
						6a7cbab864
					
				@ -23,9 +23,11 @@ func TestBackoff_Update(t *testing.T) {
 | 
			
		||||
 | 
			
		||||
	b := newBackoff(ctx, size, cleanupInterval, maxBackoffAttempts)
 | 
			
		||||
 | 
			
		||||
	b.mu.Lock()
 | 
			
		||||
	if len(b.info) > 0 {
 | 
			
		||||
		t.Fatal("non-empty info map for backoff")
 | 
			
		||||
	}
 | 
			
		||||
	b.mu.Unlock()
 | 
			
		||||
 | 
			
		||||
	if d, err := b.updateAndGet(id1); d != time.Duration(0) || err != nil {
 | 
			
		||||
		t.Fatalf("invalid initialization: %v, \t, %s", d, err)
 | 
			
		||||
@ -64,9 +66,11 @@ func TestBackoff_Update(t *testing.T) {
 | 
			
		||||
		t.Fatalf("invalid backoff result, expected: %v, got: %v", MinBackoffDelay, got)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	b.mu.Lock()
 | 
			
		||||
	// sets last tried of id2 to long ago that it resets back upon next try.
 | 
			
		||||
	// update attempts on id2 are below threshold, hence peer should never go beyond backoff attempt threshold.
 | 
			
		||||
	b.info[id2].lastTried = time.Now().Add(-TimeToLive)
 | 
			
		||||
	b.mu.Unlock()
 | 
			
		||||
	got, err = b.updateAndGet(id2)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Fatalf("unexpected error post update: %s", err)
 | 
			
		||||
@ -75,10 +79,11 @@ func TestBackoff_Update(t *testing.T) {
 | 
			
		||||
		t.Fatalf("invalid ttl expiration, expected: %v, got: %v", time.Duration(0), got)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	b.mu.Lock()
 | 
			
		||||
	if len(b.info) != 2 {
 | 
			
		||||
		t.Fatalf("pre-invalidation attempt, info map size mismatch, expected: %d, got: %d", 2, len(b.info))
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	b.mu.Unlock()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestBackoff_Clean(t *testing.T) {
 | 
			
		||||
@ -96,12 +101,16 @@ func TestBackoff_Clean(t *testing.T) {
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			t.Fatalf("unexpected error post update: %s", err)
 | 
			
		||||
		}
 | 
			
		||||
		b.mu.Lock()
 | 
			
		||||
		b.info[id].lastTried = time.Now().Add(-TimeToLive) // enforces expiry
 | 
			
		||||
		b.mu.Unlock()
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	b.mu.Lock()
 | 
			
		||||
	if len(b.info) != size {
 | 
			
		||||
		t.Fatalf("info map size mismatch, expected: %d, got: %d", size, len(b.info))
 | 
			
		||||
	}
 | 
			
		||||
	b.mu.Unlock()
 | 
			
		||||
 | 
			
		||||
	// waits for a cleanup loop to kick-in
 | 
			
		||||
	time.Sleep(2 * cleanupInterval)
 | 
			
		||||
@ -115,8 +124,10 @@ func TestBackoff_Clean(t *testing.T) {
 | 
			
		||||
		t.Fatalf("invalid backoff result, expected: %v, got: %v", time.Duration(0), got)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	b.mu.Lock()
 | 
			
		||||
	// except "some-new-peer" every other records must be cleaned up
 | 
			
		||||
	if len(b.info) != 1 {
 | 
			
		||||
		t.Fatalf("info map size mismatch, expected: %d, got: %d", 1, len(b.info))
 | 
			
		||||
	}
 | 
			
		||||
	b.mu.Unlock()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -8,6 +8,7 @@ import (
 | 
			
		||||
	"math/rand"
 | 
			
		||||
	"slices"
 | 
			
		||||
	"sort"
 | 
			
		||||
	"sync"
 | 
			
		||||
	"time"
 | 
			
		||||
 | 
			
		||||
	pb "source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb"
 | 
			
		||||
@ -451,6 +452,7 @@ type BlossomSubRouter struct {
 | 
			
		||||
	backoff  map[string]map[peer.ID]time.Time // prune backoff
 | 
			
		||||
	connect  chan connectInfo                 // px connection requests
 | 
			
		||||
	cab      peerstore.AddrBook
 | 
			
		||||
	meshMx   sync.RWMutex
 | 
			
		||||
 | 
			
		||||
	protos  []protocol.ID
 | 
			
		||||
	feature BlossomSubFeatureTest
 | 
			
		||||
@ -625,9 +627,11 @@ func (bs *BlossomSubRouter) RemovePeer(p peer.ID) {
 | 
			
		||||
	log.Debugf("PEERDOWN: Remove disconnected peer %s", p)
 | 
			
		||||
	bs.tracer.RemovePeer(p)
 | 
			
		||||
	delete(bs.peers, p)
 | 
			
		||||
	bs.meshMx.Lock()
 | 
			
		||||
	for _, peers := range bs.mesh {
 | 
			
		||||
		delete(peers, p)
 | 
			
		||||
	}
 | 
			
		||||
	bs.meshMx.Unlock()
 | 
			
		||||
	for _, peers := range bs.fanout {
 | 
			
		||||
		delete(peers, p)
 | 
			
		||||
	}
 | 
			
		||||
@ -651,8 +655,10 @@ func (bs *BlossomSubRouter) EnoughPeers(bitmask []byte, suggested int) bool {
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	bs.meshMx.RLock()
 | 
			
		||||
	// BlossomSub peers
 | 
			
		||||
	bsPeers = len(bs.mesh[string(bitmask)])
 | 
			
		||||
	bs.meshMx.RUnlock()
 | 
			
		||||
 | 
			
		||||
	if suggested == 0 {
 | 
			
		||||
		suggested = bs.params.Dlo
 | 
			
		||||
@ -724,7 +730,9 @@ func (bs *BlossomSubRouter) handleIHave(p peer.ID, ctl *pb.ControlMessage) []*pb
 | 
			
		||||
	iwant := make(map[string]struct{})
 | 
			
		||||
	for _, ihave := range ctl.GetIhave() {
 | 
			
		||||
		bitmask := ihave.GetBitmask()
 | 
			
		||||
		bs.meshMx.RLock()
 | 
			
		||||
		_, ok := bs.mesh[string(bitmask)]
 | 
			
		||||
		bs.meshMx.RUnlock()
 | 
			
		||||
		if !ok {
 | 
			
		||||
			continue
 | 
			
		||||
		}
 | 
			
		||||
@ -830,7 +838,9 @@ func (bs *BlossomSubRouter) handleGraft(p peer.ID, ctl *pb.ControlMessage) []*pb
 | 
			
		||||
			continue
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		bs.meshMx.RLock()
 | 
			
		||||
		peers, ok := bs.mesh[string(bitmask)]
 | 
			
		||||
		bs.meshMx.RUnlock()
 | 
			
		||||
		if !ok {
 | 
			
		||||
			// don't do PX when there is an unknown bitmask to avoid leaking our peers
 | 
			
		||||
			doPX = false
 | 
			
		||||
@ -919,7 +929,9 @@ func (bs *BlossomSubRouter) handlePrune(p peer.ID, ctl *pb.ControlMessage) {
 | 
			
		||||
 | 
			
		||||
	for _, prune := range ctl.GetPrune() {
 | 
			
		||||
		bitmask := prune.GetBitmask()
 | 
			
		||||
		bs.meshMx.RLock()
 | 
			
		||||
		peers, ok := bs.mesh[string(bitmask)]
 | 
			
		||||
		bs.meshMx.RUnlock()
 | 
			
		||||
		if !ok {
 | 
			
		||||
			continue
 | 
			
		||||
		}
 | 
			
		||||
@ -1101,7 +1113,9 @@ func (bs *BlossomSubRouter) Publish(msg *Message) {
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			// BlossomSub peers
 | 
			
		||||
			bs.meshMx.RLock()
 | 
			
		||||
			gmap, ok := bs.mesh[string(bitmask)]
 | 
			
		||||
			bs.meshMx.RUnlock()
 | 
			
		||||
			if !ok {
 | 
			
		||||
				// we are not in the mesh for bitmask, use fanout peers
 | 
			
		||||
				gmap, ok = bs.fanout[string(bitmask)]
 | 
			
		||||
@ -1137,7 +1151,9 @@ func (bs *BlossomSubRouter) Publish(msg *Message) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (bs *BlossomSubRouter) Join(bitmask []byte) {
 | 
			
		||||
	bs.meshMx.RLock()
 | 
			
		||||
	gmap, ok := bs.mesh[string(bitmask)]
 | 
			
		||||
	bs.meshMx.RUnlock()
 | 
			
		||||
	if ok {
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
@ -1172,7 +1188,9 @@ func (bs *BlossomSubRouter) Join(bitmask []byte) {
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		bs.meshMx.Lock()
 | 
			
		||||
		bs.mesh[string(bitmask)] = gmap
 | 
			
		||||
		bs.meshMx.Unlock()
 | 
			
		||||
		delete(bs.fanout, string(bitmask))
 | 
			
		||||
		delete(bs.lastpub, string(bitmask))
 | 
			
		||||
	} else {
 | 
			
		||||
@ -1184,7 +1202,9 @@ func (bs *BlossomSubRouter) Join(bitmask []byte) {
 | 
			
		||||
			return !direct && !doBackOff && bs.score.Score(p) >= 0
 | 
			
		||||
		})
 | 
			
		||||
		gmap = peerListToMap(peers)
 | 
			
		||||
		bs.meshMx.Lock()
 | 
			
		||||
		bs.mesh[string(bitmask)] = gmap
 | 
			
		||||
		bs.meshMx.Unlock()
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for p := range gmap {
 | 
			
		||||
@ -1195,7 +1215,9 @@ func (bs *BlossomSubRouter) Join(bitmask []byte) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (bs *BlossomSubRouter) Leave(bitmask []byte) {
 | 
			
		||||
	bs.meshMx.RLock()
 | 
			
		||||
	gmap, ok := bs.mesh[string(bitmask)]
 | 
			
		||||
	bs.meshMx.RUnlock()
 | 
			
		||||
	if !ok {
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
@ -1203,7 +1225,9 @@ func (bs *BlossomSubRouter) Leave(bitmask []byte) {
 | 
			
		||||
	log.Debugf("LEAVE %s", bitmask)
 | 
			
		||||
	bs.tracer.Leave(bitmask)
 | 
			
		||||
 | 
			
		||||
	bs.meshMx.Lock()
 | 
			
		||||
	delete(bs.mesh, string(bitmask))
 | 
			
		||||
	bs.meshMx.Unlock()
 | 
			
		||||
 | 
			
		||||
	for p := range gmap {
 | 
			
		||||
		log.Debugf("LEAVE: Remove mesh link to %s in %s", p, bitmask)
 | 
			
		||||
@ -1252,7 +1276,9 @@ func (bs *BlossomSubRouter) sendRPC(p peer.ID, out *RPC) {
 | 
			
		||||
		delete(bs.gossip, p)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	bs.p.peersMx.RLock()
 | 
			
		||||
	mch, ok := bs.p.peers[p]
 | 
			
		||||
	bs.p.peersMx.RUnlock()
 | 
			
		||||
	if !ok {
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
@ -1485,6 +1511,7 @@ func (bs *BlossomSubRouter) heartbeat() {
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// maintain the mesh for bitmasks we have joined
 | 
			
		||||
	bs.meshMx.Lock()
 | 
			
		||||
	for bitmask, peers := range bs.mesh {
 | 
			
		||||
		bitmask := []byte(bitmask)
 | 
			
		||||
		prunePeer := func(p peer.ID) {
 | 
			
		||||
@ -1667,6 +1694,7 @@ func (bs *BlossomSubRouter) heartbeat() {
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	bs.meshMx.Unlock()
 | 
			
		||||
 | 
			
		||||
	// expire fanout for bitmasks we haven't published to in a while
 | 
			
		||||
	now := time.Now().UnixNano()
 | 
			
		||||
@ -1921,7 +1949,9 @@ func (bs *BlossomSubRouter) piggybackControl(p peer.ID, out *RPC, ctl *pb.Contro
 | 
			
		||||
 | 
			
		||||
	for _, graft := range ctl.GetGraft() {
 | 
			
		||||
		bitmask := graft.GetBitmask()
 | 
			
		||||
		bs.meshMx.RLock()
 | 
			
		||||
		peers, ok := bs.mesh[string(bitmask)]
 | 
			
		||||
		bs.meshMx.RUnlock()
 | 
			
		||||
		if !ok {
 | 
			
		||||
			continue
 | 
			
		||||
		}
 | 
			
		||||
@ -1933,7 +1963,9 @@ func (bs *BlossomSubRouter) piggybackControl(p peer.ID, out *RPC, ctl *pb.Contro
 | 
			
		||||
 | 
			
		||||
	for _, prune := range ctl.GetPrune() {
 | 
			
		||||
		bitmask := prune.GetBitmask()
 | 
			
		||||
		bs.meshMx.RLock()
 | 
			
		||||
		peers, ok := bs.mesh[string(bitmask)]
 | 
			
		||||
		bs.meshMx.RUnlock()
 | 
			
		||||
		if !ok {
 | 
			
		||||
			toprune = append(toprune, prune)
 | 
			
		||||
			continue
 | 
			
		||||
 | 
			
		||||
@ -65,7 +65,7 @@ func TestBlossomSubAttackSpamIWANT(t *testing.T) {
 | 
			
		||||
		data := make([]byte, 16)
 | 
			
		||||
		rand.Read(data)
 | 
			
		||||
 | 
			
		||||
		if err = bitmasks[0].Publish(ctx, bitmasks[0].bitmask, data); err != nil {
 | 
			
		||||
		if err := bitmasks[0].Publish(ctx, bitmasks[0].bitmask, data); err != nil {
 | 
			
		||||
			t.Fatal(err)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
@ -676,6 +676,7 @@ func TestBlossomSubAttackInvalidMessageSpam(t *testing.T) {
 | 
			
		||||
	ps, err := NewBlossomSub(ctx, legit,
 | 
			
		||||
		WithEventTracer(tracer),
 | 
			
		||||
		WithPeerScore(params, thresholds),
 | 
			
		||||
		WithMessageSignaturePolicy(StrictSign),
 | 
			
		||||
	)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Fatal(err)
 | 
			
		||||
 | 
			
		||||
@ -2257,7 +2257,9 @@ func TestBlossomSubJoinBitmask(t *testing.T) {
 | 
			
		||||
 | 
			
		||||
	time.Sleep(time.Second)
 | 
			
		||||
 | 
			
		||||
	router0.meshMx.RLock()
 | 
			
		||||
	meshMap := router0.mesh[string([]byte{0x00, 0x00, 0x80, 0x00})]
 | 
			
		||||
	router0.meshMx.RUnlock()
 | 
			
		||||
	if len(meshMap) != 1 {
 | 
			
		||||
		t.Fatalf("Unexpect peer included in the mesh")
 | 
			
		||||
	}
 | 
			
		||||
@ -2820,14 +2822,14 @@ func TestBloomRouting(t *testing.T) {
 | 
			
		||||
			// Normally the expectation is that any subscription will do when using a bloom bitmask
 | 
			
		||||
			// But we need to verify one gets it.
 | 
			
		||||
			g := sync.WaitGroup{}
 | 
			
		||||
			g.Add(len(sub))
 | 
			
		||||
			g.Add(len(sub) + 1)
 | 
			
		||||
			errch := make(chan error)
 | 
			
		||||
			var errs []error
 | 
			
		||||
			for _, s := range sub {
 | 
			
		||||
				s := s
 | 
			
		||||
				go func() {
 | 
			
		||||
					defer g.Done()
 | 
			
		||||
					nctx, _ := context.WithDeadline(ctx, time.Now().Add(10*time.Millisecond))
 | 
			
		||||
					nctx, _ := context.WithDeadline(ctx, time.Now().Add(100*time.Millisecond))
 | 
			
		||||
					got, err := s.Next(nctx)
 | 
			
		||||
					if err != nil {
 | 
			
		||||
						errch <- err
 | 
			
		||||
@ -2842,7 +2844,7 @@ func TestBloomRouting(t *testing.T) {
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			go func() {
 | 
			
		||||
				for i := 0; i < len(sub); i++ {
 | 
			
		||||
				for _ = range sub {
 | 
			
		||||
					select {
 | 
			
		||||
					case err := <-errch:
 | 
			
		||||
						if err != nil {
 | 
			
		||||
@ -2850,6 +2852,7 @@ func TestBloomRouting(t *testing.T) {
 | 
			
		||||
						}
 | 
			
		||||
					}
 | 
			
		||||
				}
 | 
			
		||||
				g.Done()
 | 
			
		||||
			}()
 | 
			
		||||
			g.Wait()
 | 
			
		||||
			if len(errs) == len(sub) {
 | 
			
		||||
@ -2919,6 +2922,7 @@ func TestBloomPropagationOverSubTreeTopology(t *testing.T) {
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		for _, subs := range chs {
 | 
			
		||||
			subs := subs
 | 
			
		||||
			g := sync.WaitGroup{}
 | 
			
		||||
			g.Add(len(subs))
 | 
			
		||||
			nctx, cancel := context.WithCancel(ctx)
 | 
			
		||||
@ -3141,6 +3145,7 @@ func containsBitmask(bitmask []byte, slice []byte) bool {
 | 
			
		||||
 | 
			
		||||
func assertReceivedBitmaskSubgroup(t *testing.T, ctx context.Context, subs [][]*Subscription, msg []byte) {
 | 
			
		||||
	for i, subs := range subs {
 | 
			
		||||
		subs := subs
 | 
			
		||||
		g := sync.WaitGroup{}
 | 
			
		||||
		g.Add(len(subs))
 | 
			
		||||
		nctx, cancel := context.WithCancel(ctx)
 | 
			
		||||
@ -3148,7 +3153,7 @@ func assertReceivedBitmaskSubgroup(t *testing.T, ctx context.Context, subs [][]*
 | 
			
		||||
		for _, s := range subs {
 | 
			
		||||
			s := s
 | 
			
		||||
			go func() {
 | 
			
		||||
				nctx, _ := context.WithDeadline(nctx, time.Now().Add(10*time.Millisecond))
 | 
			
		||||
				nctx, _ := context.WithDeadline(nctx, time.Now().Add(100*time.Millisecond))
 | 
			
		||||
				got, err := s.Next(nctx)
 | 
			
		||||
				if err != nil {
 | 
			
		||||
					g.Done()
 | 
			
		||||
 | 
			
		||||
@ -150,7 +150,8 @@ type PubSub struct {
 | 
			
		||||
	blacklist     Blacklist
 | 
			
		||||
	blacklistPeer chan peer.ID
 | 
			
		||||
 | 
			
		||||
	peers map[peer.ID]chan *RPC
 | 
			
		||||
	peers   map[peer.ID]chan *RPC
 | 
			
		||||
	peersMx sync.RWMutex
 | 
			
		||||
 | 
			
		||||
	inboundStreamsMx sync.Mutex
 | 
			
		||||
	inboundStreams   map[peer.ID]network.Stream
 | 
			
		||||
@ -565,11 +566,13 @@ func WithAppSpecificRpcInspector(inspector func(peer.ID, *RPC) error) Option {
 | 
			
		||||
// processLoop handles all inputs arriving on the channels
 | 
			
		||||
func (p *PubSub) processLoop(ctx context.Context) {
 | 
			
		||||
	defer func() {
 | 
			
		||||
		p.peersMx.Lock()
 | 
			
		||||
		// Clean up go routines.
 | 
			
		||||
		for _, ch := range p.peers {
 | 
			
		||||
			close(ch)
 | 
			
		||||
		}
 | 
			
		||||
		p.peers = nil
 | 
			
		||||
		p.peersMx.Unlock()
 | 
			
		||||
		p.bitmasks = nil
 | 
			
		||||
		p.seenMessages.Done()
 | 
			
		||||
	}()
 | 
			
		||||
@ -582,7 +585,9 @@ func (p *PubSub) processLoop(ctx context.Context) {
 | 
			
		||||
		case s := <-p.newPeerStream:
 | 
			
		||||
			pid := s.Conn().RemotePeer()
 | 
			
		||||
 | 
			
		||||
			p.peersMx.RLock()
 | 
			
		||||
			ch, ok := p.peers[pid]
 | 
			
		||||
			p.peersMx.RUnlock()
 | 
			
		||||
			if !ok {
 | 
			
		||||
				log.Warn("new stream for unknown peer: ", pid)
 | 
			
		||||
				s.Reset()
 | 
			
		||||
@ -592,7 +597,9 @@ func (p *PubSub) processLoop(ctx context.Context) {
 | 
			
		||||
			if p.blacklist.Contains(pid) {
 | 
			
		||||
				log.Warn("closing stream for blacklisted peer: ", pid)
 | 
			
		||||
				close(ch)
 | 
			
		||||
				p.peersMx.Lock()
 | 
			
		||||
				delete(p.peers, pid)
 | 
			
		||||
				p.peersMx.Unlock()
 | 
			
		||||
				s.Reset()
 | 
			
		||||
				continue
 | 
			
		||||
			}
 | 
			
		||||
@ -600,7 +607,9 @@ func (p *PubSub) processLoop(ctx context.Context) {
 | 
			
		||||
			p.rt.AddPeer(pid, s.Protocol())
 | 
			
		||||
 | 
			
		||||
		case pid := <-p.newPeerError:
 | 
			
		||||
			p.peersMx.Lock()
 | 
			
		||||
			delete(p.peers, pid)
 | 
			
		||||
			p.peersMx.Unlock()
 | 
			
		||||
 | 
			
		||||
		case <-p.peerDead:
 | 
			
		||||
			p.handleDeadPeers()
 | 
			
		||||
@ -650,10 +659,14 @@ func (p *PubSub) processLoop(ctx context.Context) {
 | 
			
		||||
			log.Infof("Blacklisting peer %s", pid)
 | 
			
		||||
			p.blacklist.Add(pid)
 | 
			
		||||
 | 
			
		||||
			p.peersMx.RLock()
 | 
			
		||||
			ch, ok := p.peers[pid]
 | 
			
		||||
			p.peersMx.RUnlock()
 | 
			
		||||
			if ok {
 | 
			
		||||
				close(ch)
 | 
			
		||||
				p.peersMx.Lock()
 | 
			
		||||
				delete(p.peers, pid)
 | 
			
		||||
				p.peersMx.Unlock()
 | 
			
		||||
				for t, tmap := range p.bitmasks {
 | 
			
		||||
					if _, ok := tmap[pid]; ok {
 | 
			
		||||
						delete(tmap, pid)
 | 
			
		||||
@ -682,6 +695,7 @@ peerloop:
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		var peerset []peer.ID
 | 
			
		||||
		p.peersMx.RLock()
 | 
			
		||||
		for p := range p.peers {
 | 
			
		||||
			_, ok := tmap[p]
 | 
			
		||||
			if !ok {
 | 
			
		||||
@ -689,6 +703,7 @@ peerloop:
 | 
			
		||||
			}
 | 
			
		||||
			peerset = append(peerset, p)
 | 
			
		||||
		}
 | 
			
		||||
		p.peersMx.RUnlock()
 | 
			
		||||
 | 
			
		||||
		if len(peers) == 0 {
 | 
			
		||||
			peers = peerset
 | 
			
		||||
@ -728,10 +743,13 @@ func (p *PubSub) handlePendingPeers() {
 | 
			
		||||
			continue
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		p.peersMx.RLock()
 | 
			
		||||
		if _, ok := p.peers[pid]; ok {
 | 
			
		||||
			p.peersMx.RUnlock()
 | 
			
		||||
			log.Debug("already have connection to peer: ", pid)
 | 
			
		||||
			continue
 | 
			
		||||
		}
 | 
			
		||||
		p.peersMx.RUnlock()
 | 
			
		||||
 | 
			
		||||
		if p.blacklist.Contains(pid) {
 | 
			
		||||
			log.Warn("ignoring connection from blacklisted peer: ", pid)
 | 
			
		||||
@ -741,7 +759,9 @@ func (p *PubSub) handlePendingPeers() {
 | 
			
		||||
		messages := make(chan *RPC, p.peerOutboundQueueSize)
 | 
			
		||||
		messages <- p.getHelloPacket()
 | 
			
		||||
		go p.handleNewPeer(p.ctx, pid, messages)
 | 
			
		||||
		p.peersMx.Lock()
 | 
			
		||||
		p.peers[pid] = messages
 | 
			
		||||
		p.peersMx.Unlock()
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -758,13 +778,17 @@ func (p *PubSub) handleDeadPeers() {
 | 
			
		||||
	p.peerDeadPrioLk.Unlock()
 | 
			
		||||
 | 
			
		||||
	for pid := range deadPeers {
 | 
			
		||||
		p.peersMx.RLock()
 | 
			
		||||
		ch, ok := p.peers[pid]
 | 
			
		||||
		p.peersMx.RUnlock()
 | 
			
		||||
		if !ok {
 | 
			
		||||
			continue
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		close(ch)
 | 
			
		||||
		p.peersMx.Lock()
 | 
			
		||||
		delete(p.peers, pid)
 | 
			
		||||
		p.peersMx.Unlock()
 | 
			
		||||
 | 
			
		||||
		for t, tmap := range p.bitmasks {
 | 
			
		||||
			if _, ok := tmap[pid]; ok {
 | 
			
		||||
@ -787,7 +811,9 @@ func (p *PubSub) handleDeadPeers() {
 | 
			
		||||
			log.Debugf("peer declared dead but still connected; respawning writer: %s", pid)
 | 
			
		||||
			messages := make(chan *RPC, p.peerOutboundQueueSize)
 | 
			
		||||
			messages <- p.getHelloPacket()
 | 
			
		||||
			p.peersMx.Lock()
 | 
			
		||||
			p.peers[pid] = messages
 | 
			
		||||
			p.peersMx.Unlock()
 | 
			
		||||
			go p.handleNewPeerWithBackoff(p.ctx, pid, backoffDelay, messages)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
@ -951,6 +977,7 @@ func (p *PubSub) announce(bitmask []byte, sub bool) {
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	out := rpcWithSubs(subopt)
 | 
			
		||||
	p.peersMx.RLock()
 | 
			
		||||
	for pid, peer := range p.peers {
 | 
			
		||||
		select {
 | 
			
		||||
		case peer <- out:
 | 
			
		||||
@ -961,6 +988,7 @@ func (p *PubSub) announce(bitmask []byte, sub bool) {
 | 
			
		||||
			go p.announceRetry(pid, bitmask, sub)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	p.peersMx.RUnlock()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (p *PubSub) announceRetry(pid peer.ID, bitmask []byte, sub bool) {
 | 
			
		||||
@ -984,7 +1012,9 @@ func (p *PubSub) announceRetry(pid peer.ID, bitmask []byte, sub bool) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (p *PubSub) doAnnounceRetry(pid peer.ID, bitmask []byte, sub bool) {
 | 
			
		||||
	p.peersMx.RLock()
 | 
			
		||||
	peer, ok := p.peers[pid]
 | 
			
		||||
	p.peersMx.RUnlock()
 | 
			
		||||
	if !ok {
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@ -198,10 +198,10 @@ func (as *AmbientAutoNAT) background() {
 | 
			
		||||
		// probe finished.
 | 
			
		||||
		case err, ok := <-as.dialResponses:
 | 
			
		||||
			if !ok {
 | 
			
		||||
				close(as.backgroundRunning)
 | 
			
		||||
				as.subscriber.Close()
 | 
			
		||||
				as.emitReachabilityChanged.Close()
 | 
			
		||||
				timer.Stop()
 | 
			
		||||
				as.emitReachabilityChanged.Close()
 | 
			
		||||
				as.subscriber.Close()
 | 
			
		||||
				close(as.backgroundRunning)
 | 
			
		||||
				return
 | 
			
		||||
			}
 | 
			
		||||
			if IsDialRefused(err) {
 | 
			
		||||
@ -215,10 +215,10 @@ func (as *AmbientAutoNAT) background() {
 | 
			
		||||
			timerRunning = false
 | 
			
		||||
			retryProbe = false
 | 
			
		||||
		case <-as.ctx.Done():
 | 
			
		||||
			close(as.backgroundRunning)
 | 
			
		||||
			as.subscriber.Close()
 | 
			
		||||
			as.emitReachabilityChanged.Close()
 | 
			
		||||
			timer.Stop()
 | 
			
		||||
			as.emitReachabilityChanged.Close()
 | 
			
		||||
			as.subscriber.Close()
 | 
			
		||||
			close(as.backgroundRunning)
 | 
			
		||||
			return
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -563,8 +563,8 @@ func (h *BasicHost) background() {
 | 
			
		||||
		case <-ticker.C:
 | 
			
		||||
		case <-h.addrChangeChan:
 | 
			
		||||
		case <-h.ctx.Done():
 | 
			
		||||
			h.refCount.Done()
 | 
			
		||||
			ticker.Stop()
 | 
			
		||||
			h.refCount.Done()
 | 
			
		||||
			return
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@ -177,9 +177,9 @@ func NewStatsTraceReporter() (StatsTraceReporter, error) {
 | 
			
		||||
 | 
			
		||||
func (r StatsTraceReporter) ConsumeEvent(evt TraceEvt) {
 | 
			
		||||
	tags := metricshelper.GetStringSlice()
 | 
			
		||||
	defer metricshelper.PutStringSlice(tags)
 | 
			
		||||
 | 
			
		||||
	r.consumeEventWithLabelSlice(evt, tags)
 | 
			
		||||
	metricshelper.PutStringSlice(tags)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Separate func so that we can test that this function does not allocate. The syncPool may allocate.
 | 
			
		||||
 | 
			
		||||
@ -212,9 +212,9 @@ type TraceEvt struct {
 | 
			
		||||
 | 
			
		||||
func (t *trace) push(evt TraceEvt) {
 | 
			
		||||
	t.mx.Lock()
 | 
			
		||||
	defer t.mx.Unlock()
 | 
			
		||||
 | 
			
		||||
	if t.done {
 | 
			
		||||
		t.mx.Unlock()
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
	evt.Time = time.Now().Format(time.RFC3339Nano)
 | 
			
		||||
@ -229,19 +229,13 @@ func (t *trace) push(evt TraceEvt) {
 | 
			
		||||
	if t.path != "" {
 | 
			
		||||
		t.pendingWrites = append(t.pendingWrites, evt)
 | 
			
		||||
	}
 | 
			
		||||
	t.mx.Unlock()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (t *trace) backgroundWriter(out io.WriteCloser) {
 | 
			
		||||
	defer t.wg.Done()
 | 
			
		||||
	defer out.Close()
 | 
			
		||||
 | 
			
		||||
	gzOut := gzip.NewWriter(out)
 | 
			
		||||
	defer gzOut.Close()
 | 
			
		||||
 | 
			
		||||
	jsonOut := json.NewEncoder(gzOut)
 | 
			
		||||
 | 
			
		||||
	ticker := time.NewTicker(time.Second)
 | 
			
		||||
	defer ticker.Stop()
 | 
			
		||||
 | 
			
		||||
	var pend []interface{}
 | 
			
		||||
 | 
			
		||||
@ -267,6 +261,11 @@ func (t *trace) backgroundWriter(out io.WriteCloser) {
 | 
			
		||||
				t.mx.Lock()
 | 
			
		||||
				t.done = true
 | 
			
		||||
				t.mx.Unlock()
 | 
			
		||||
 | 
			
		||||
				ticker.Stop()
 | 
			
		||||
				gzOut.Close()
 | 
			
		||||
				out.Close()
 | 
			
		||||
				t.wg.Done()
 | 
			
		||||
				return
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
@ -275,6 +274,11 @@ func (t *trace) backgroundWriter(out io.WriteCloser) {
 | 
			
		||||
				t.mx.Lock()
 | 
			
		||||
				t.done = true
 | 
			
		||||
				t.mx.Unlock()
 | 
			
		||||
 | 
			
		||||
				ticker.Stop()
 | 
			
		||||
				gzOut.Close()
 | 
			
		||||
				out.Close()
 | 
			
		||||
				t.wg.Done()
 | 
			
		||||
				return
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
@ -282,11 +286,19 @@ func (t *trace) backgroundWriter(out io.WriteCloser) {
 | 
			
		||||
			getEvents()
 | 
			
		||||
 | 
			
		||||
			if len(pend) == 0 {
 | 
			
		||||
				ticker.Stop()
 | 
			
		||||
				gzOut.Close()
 | 
			
		||||
				out.Close()
 | 
			
		||||
				t.wg.Done()
 | 
			
		||||
				return
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			if err := t.writeEvents(pend, jsonOut); err != nil {
 | 
			
		||||
				log.Warnf("error writing rcmgr trace: %s", err)
 | 
			
		||||
				ticker.Stop()
 | 
			
		||||
				gzOut.Close()
 | 
			
		||||
				out.Close()
 | 
			
		||||
				t.wg.Done()
 | 
			
		||||
				return
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
@ -294,6 +306,10 @@ func (t *trace) backgroundWriter(out io.WriteCloser) {
 | 
			
		||||
				log.Warnf("error flushing rcmgr trace: %s", err)
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			ticker.Stop()
 | 
			
		||||
			gzOut.Close()
 | 
			
		||||
			out.Close()
 | 
			
		||||
			t.wg.Done()
 | 
			
		||||
			return
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@ -306,13 +306,11 @@ func (h *Host) Serve() error {
 | 
			
		||||
	h.httpTransportInit()
 | 
			
		||||
 | 
			
		||||
	closedWaitingForListeners := false
 | 
			
		||||
	defer func() {
 | 
			
		||||
 | 
			
		||||
	if len(h.ListenAddrs) == 0 && h.StreamHost == nil {
 | 
			
		||||
		if !closedWaitingForListeners {
 | 
			
		||||
			close(h.httpTransport.waitingForListeners)
 | 
			
		||||
		}
 | 
			
		||||
	}()
 | 
			
		||||
 | 
			
		||||
	if len(h.ListenAddrs) == 0 && h.StreamHost == nil {
 | 
			
		||||
		return ErrNoListeners
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@ -329,6 +327,9 @@ func (h *Host) Serve() error {
 | 
			
		||||
	if h.StreamHost != nil {
 | 
			
		||||
		listener, err := streamHostListen(h.StreamHost)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			if !closedWaitingForListeners {
 | 
			
		||||
				close(h.httpTransport.waitingForListeners)
 | 
			
		||||
			}
 | 
			
		||||
			return err
 | 
			
		||||
		}
 | 
			
		||||
		h.httpTransport.listeners = append(h.httpTransport.listeners, listener)
 | 
			
		||||
@ -348,6 +349,9 @@ func (h *Host) Serve() error {
 | 
			
		||||
	err := h.setupListeners(errCh)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		closeAllListeners()
 | 
			
		||||
		if !closedWaitingForListeners {
 | 
			
		||||
			close(h.httpTransport.waitingForListeners)
 | 
			
		||||
		}
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@ -356,6 +360,9 @@ func (h *Host) Serve() error {
 | 
			
		||||
 | 
			
		||||
	if len(h.httpTransport.listeners) == 0 || len(h.httpTransport.listenAddrs) == 0 {
 | 
			
		||||
		closeAllListeners()
 | 
			
		||||
		if !closedWaitingForListeners {
 | 
			
		||||
			close(h.httpTransport.waitingForListeners)
 | 
			
		||||
		}
 | 
			
		||||
		return ErrNoListeners
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@ -372,7 +379,9 @@ func (h *Host) Serve() error {
 | 
			
		||||
		<-errCh
 | 
			
		||||
	}
 | 
			
		||||
	close(errCh)
 | 
			
		||||
 | 
			
		||||
	if !closedWaitingForListeners {
 | 
			
		||||
		close(h.httpTransport.waitingForListeners)
 | 
			
		||||
	}
 | 
			
		||||
	return err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -437,8 +446,9 @@ func (s *streamReadCloser) Close() error {
 | 
			
		||||
func (rt *streamRoundTripper) GetPeerMetadata() (PeerMeta, error) {
 | 
			
		||||
	ctx := context.Background()
 | 
			
		||||
	ctx, cancel := context.WithDeadline(ctx, time.Now().Add(WellKnownRequestTimeout))
 | 
			
		||||
	defer cancel()
 | 
			
		||||
	return rt.httpHost.getAndStorePeerMetadata(ctx, rt, rt.server)
 | 
			
		||||
	peerMeta, err := rt.httpHost.getAndStorePeerMetadata(ctx, rt, rt.server)
 | 
			
		||||
	cancel()
 | 
			
		||||
	return peerMeta, err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// RoundTrip implements http.RoundTripper.
 | 
			
		||||
@ -460,11 +470,11 @@ func (rt *streamRoundTripper) RoundTrip(r *http.Request) (*http.Response, error)
 | 
			
		||||
	r.Header.Add("connection", "close")
 | 
			
		||||
 | 
			
		||||
	go func() {
 | 
			
		||||
		defer s.CloseWrite()
 | 
			
		||||
		r.Write(s)
 | 
			
		||||
		if r.Body != nil {
 | 
			
		||||
			r.Body.Close()
 | 
			
		||||
		}
 | 
			
		||||
		s.CloseWrite()
 | 
			
		||||
	}()
 | 
			
		||||
 | 
			
		||||
	if deadline, ok := r.Context().Deadline(); ok {
 | 
			
		||||
@ -511,12 +521,13 @@ func (rt *roundTripperForSpecificServer) GetPeerMetadata() (PeerMeta, error) {
 | 
			
		||||
 | 
			
		||||
	ctx := context.Background()
 | 
			
		||||
	ctx, cancel := context.WithDeadline(ctx, time.Now().Add(WellKnownRequestTimeout))
 | 
			
		||||
	defer cancel()
 | 
			
		||||
	wk, err := rt.httpHost.getAndStorePeerMetadata(ctx, rt, rt.server)
 | 
			
		||||
	if err == nil {
 | 
			
		||||
		rt.cachedProtos = wk
 | 
			
		||||
		cancel()
 | 
			
		||||
		return wk, nil
 | 
			
		||||
	}
 | 
			
		||||
	cancel()
 | 
			
		||||
	return wk, err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -579,14 +590,15 @@ func (rt *namespacedRoundTripper) RoundTrip(r *http.Request) (*http.Response, er
 | 
			
		||||
func (h *Host) NamespaceRoundTripper(roundtripper http.RoundTripper, p protocol.ID, server peer.ID) (*namespacedRoundTripper, error) {
 | 
			
		||||
	ctx := context.Background()
 | 
			
		||||
	ctx, cancel := context.WithDeadline(ctx, time.Now().Add(WellKnownRequestTimeout))
 | 
			
		||||
	defer cancel()
 | 
			
		||||
	protos, err := h.getAndStorePeerMetadata(ctx, roundtripper, server)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		cancel()
 | 
			
		||||
		return &namespacedRoundTripper{}, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	v, ok := protos[p]
 | 
			
		||||
	if !ok {
 | 
			
		||||
		cancel()
 | 
			
		||||
		return &namespacedRoundTripper{}, fmt.Errorf("no protocol %s for server %s", p, server)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@ -598,9 +610,11 @@ func (h *Host) NamespaceRoundTripper(roundtripper http.RoundTripper, p protocol.
 | 
			
		||||
 | 
			
		||||
	u, err := url.Parse(path)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		cancel()
 | 
			
		||||
		return &namespacedRoundTripper{}, fmt.Errorf("invalid path %s for protocol %s for server %s", v.Path, p, server)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	cancel()
 | 
			
		||||
	return &namespacedRoundTripper{
 | 
			
		||||
		RoundTripper:      roundtripper,
 | 
			
		||||
		protocolPrefix:    u.Path,
 | 
			
		||||
@ -866,9 +880,9 @@ func requestPeerMeta(ctx context.Context, roundtripper http.RoundTripper, wellKn
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
	defer resp.Body.Close()
 | 
			
		||||
 | 
			
		||||
	if resp.StatusCode != http.StatusOK {
 | 
			
		||||
		resp.Body.Close()
 | 
			
		||||
		return nil, fmt.Errorf("unexpected status code: %d", resp.StatusCode)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@ -878,9 +892,11 @@ func requestPeerMeta(ctx context.Context, roundtripper http.RoundTripper, wellKn
 | 
			
		||||
		N: peerMetadataLimit,
 | 
			
		||||
	}).Decode(&meta)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		resp.Body.Close()
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	resp.Body.Close()
 | 
			
		||||
	return meta, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -48,20 +48,23 @@ func SendPing(client http.Client) error {
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
	defer resp.Body.Close()
 | 
			
		||||
 | 
			
		||||
	if resp.StatusCode != http.StatusOK {
 | 
			
		||||
		resp.Body.Close()
 | 
			
		||||
		return fmt.Errorf("unexpected status code: %d", resp.StatusCode)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	rBody := [pingSize]byte{}
 | 
			
		||||
	_, err = io.ReadFull(resp.Body, rBody[:])
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		resp.Body.Close()
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if !bytes.Equal(body[:], rBody[:]) {
 | 
			
		||||
		resp.Body.Close()
 | 
			
		||||
		return errors.New("ping body mismatch")
 | 
			
		||||
	}
 | 
			
		||||
	resp.Body.Close()
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -133,9 +133,8 @@ func (cg *BasicConnectionGater) BlockPeer(p peer.ID) error {
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	cg.Lock()
 | 
			
		||||
	defer cg.Unlock()
 | 
			
		||||
	cg.blockedPeers[p] = struct{}{}
 | 
			
		||||
 | 
			
		||||
	cg.Unlock()
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -150,23 +149,21 @@ func (cg *BasicConnectionGater) UnblockPeer(p peer.ID) error {
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	cg.Lock()
 | 
			
		||||
	defer cg.Unlock()
 | 
			
		||||
 | 
			
		||||
	delete(cg.blockedPeers, p)
 | 
			
		||||
 | 
			
		||||
	cg.Unlock()
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ListBlockedPeers return a list of blocked peers
 | 
			
		||||
func (cg *BasicConnectionGater) ListBlockedPeers() []peer.ID {
 | 
			
		||||
	cg.RLock()
 | 
			
		||||
	defer cg.RUnlock()
 | 
			
		||||
 | 
			
		||||
	result := make([]peer.ID, 0, len(cg.blockedPeers))
 | 
			
		||||
	for p := range cg.blockedPeers {
 | 
			
		||||
		result = append(result, p)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	cg.RUnlock()
 | 
			
		||||
	return result
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -182,10 +179,9 @@ func (cg *BasicConnectionGater) BlockAddr(ip net.IP) error {
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	cg.Lock()
 | 
			
		||||
	defer cg.Unlock()
 | 
			
		||||
 | 
			
		||||
	cg.blockedAddrs[ip.String()] = struct{}{}
 | 
			
		||||
 | 
			
		||||
	cg.Unlock()
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -200,24 +196,22 @@ func (cg *BasicConnectionGater) UnblockAddr(ip net.IP) error {
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	cg.Lock()
 | 
			
		||||
	defer cg.Unlock()
 | 
			
		||||
 | 
			
		||||
	delete(cg.blockedAddrs, ip.String())
 | 
			
		||||
 | 
			
		||||
	cg.Unlock()
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ListBlockedAddrs return a list of blocked IP addresses
 | 
			
		||||
func (cg *BasicConnectionGater) ListBlockedAddrs() []net.IP {
 | 
			
		||||
	cg.RLock()
 | 
			
		||||
	defer cg.RUnlock()
 | 
			
		||||
 | 
			
		||||
	result := make([]net.IP, 0, len(cg.blockedAddrs))
 | 
			
		||||
	for ipStr := range cg.blockedAddrs {
 | 
			
		||||
		ip := net.ParseIP(ipStr)
 | 
			
		||||
		result = append(result, ip)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	cg.RUnlock()
 | 
			
		||||
	return result
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -233,10 +227,9 @@ func (cg *BasicConnectionGater) BlockSubnet(ipnet *net.IPNet) error {
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	cg.Lock()
 | 
			
		||||
	defer cg.Unlock()
 | 
			
		||||
 | 
			
		||||
	cg.blockedSubnets[ipnet.String()] = ipnet
 | 
			
		||||
 | 
			
		||||
	cg.Unlock()
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -251,23 +244,21 @@ func (cg *BasicConnectionGater) UnblockSubnet(ipnet *net.IPNet) error {
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	cg.Lock()
 | 
			
		||||
	defer cg.Unlock()
 | 
			
		||||
 | 
			
		||||
	delete(cg.blockedSubnets, ipnet.String())
 | 
			
		||||
 | 
			
		||||
	cg.Unlock()
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ListBlockedSubnets return a list of blocked IP subnets
 | 
			
		||||
func (cg *BasicConnectionGater) ListBlockedSubnets() []*net.IPNet {
 | 
			
		||||
	cg.RLock()
 | 
			
		||||
	defer cg.RUnlock()
 | 
			
		||||
 | 
			
		||||
	result := make([]*net.IPNet, 0, len(cg.blockedSubnets))
 | 
			
		||||
	for _, ipnet := range cg.blockedSubnets {
 | 
			
		||||
		result = append(result, ipnet)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	cg.RUnlock()
 | 
			
		||||
	return result
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -276,60 +267,66 @@ var _ connmgr.ConnectionGater = (*BasicConnectionGater)(nil)
 | 
			
		||||
 | 
			
		||||
func (cg *BasicConnectionGater) InterceptPeerDial(p peer.ID) (allow bool) {
 | 
			
		||||
	cg.RLock()
 | 
			
		||||
	defer cg.RUnlock()
 | 
			
		||||
 | 
			
		||||
	_, block := cg.blockedPeers[p]
 | 
			
		||||
	cg.RUnlock()
 | 
			
		||||
	return !block
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (cg *BasicConnectionGater) InterceptAddrDial(p peer.ID, a ma.Multiaddr) (allow bool) {
 | 
			
		||||
	// we have already filtered blocked peers in InterceptPeerDial, so we just check the IP
 | 
			
		||||
	cg.RLock()
 | 
			
		||||
	defer cg.RUnlock()
 | 
			
		||||
 | 
			
		||||
	ip, err := manet.ToIP(a)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		cg.RUnlock()
 | 
			
		||||
		log.Warnf("error converting multiaddr to IP addr: %s", err)
 | 
			
		||||
		return true
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	_, block := cg.blockedAddrs[ip.String()]
 | 
			
		||||
	if block {
 | 
			
		||||
		cg.RUnlock()
 | 
			
		||||
		return false
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for _, ipnet := range cg.blockedSubnets {
 | 
			
		||||
		if ipnet.Contains(ip) {
 | 
			
		||||
			cg.RUnlock()
 | 
			
		||||
			return false
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	cg.RUnlock()
 | 
			
		||||
	return true
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (cg *BasicConnectionGater) InterceptAccept(cma network.ConnMultiaddrs) (allow bool) {
 | 
			
		||||
	cg.RLock()
 | 
			
		||||
	defer cg.RUnlock()
 | 
			
		||||
 | 
			
		||||
	a := cma.RemoteMultiaddr()
 | 
			
		||||
 | 
			
		||||
	ip, err := manet.ToIP(a)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		cg.RUnlock()
 | 
			
		||||
		log.Warnf("error converting multiaddr to IP addr: %s", err)
 | 
			
		||||
		return true
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	_, block := cg.blockedAddrs[ip.String()]
 | 
			
		||||
	if block {
 | 
			
		||||
		cg.RUnlock()
 | 
			
		||||
		return false
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for _, ipnet := range cg.blockedSubnets {
 | 
			
		||||
		if ipnet.Contains(ip) {
 | 
			
		||||
			cg.RUnlock()
 | 
			
		||||
			return false
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	cg.RUnlock()
 | 
			
		||||
	return true
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -341,9 +338,9 @@ func (cg *BasicConnectionGater) InterceptSecured(dir network.Direction, p peer.I
 | 
			
		||||
 | 
			
		||||
	// we have already filtered addrs in InterceptAccept, so we just check the peer ID
 | 
			
		||||
	cg.RLock()
 | 
			
		||||
	defer cg.RUnlock()
 | 
			
		||||
 | 
			
		||||
	_, block := cg.blockedPeers[p]
 | 
			
		||||
	cg.RUnlock()
 | 
			
		||||
	return !block
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -169,8 +169,6 @@ func (cm *BasicConnMgr) memoryEmergency() {
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	cm.trimMutex.Lock()
 | 
			
		||||
	defer atomic.AddUint64(&cm.trimCount, 1)
 | 
			
		||||
	defer cm.trimMutex.Unlock()
 | 
			
		||||
 | 
			
		||||
	// Trim connections without paying attention to the silence period.
 | 
			
		||||
	for _, c := range cm.getConnsToCloseEmergency(target) {
 | 
			
		||||
@ -182,6 +180,8 @@ func (cm *BasicConnMgr) memoryEmergency() {
 | 
			
		||||
	cm.lastTrimMu.Lock()
 | 
			
		||||
	cm.lastTrim = cm.clock.Now()
 | 
			
		||||
	cm.lastTrimMu.Unlock()
 | 
			
		||||
	atomic.AddUint64(&cm.trimCount, 1)
 | 
			
		||||
	cm.trimMutex.Unlock()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (cm *BasicConnMgr) Close() error {
 | 
			
		||||
@ -198,7 +198,6 @@ func (cm *BasicConnMgr) Close() error {
 | 
			
		||||
 | 
			
		||||
func (cm *BasicConnMgr) Protect(id peer.ID, tag string) {
 | 
			
		||||
	cm.plk.Lock()
 | 
			
		||||
	defer cm.plk.Unlock()
 | 
			
		||||
 | 
			
		||||
	tags, ok := cm.protected[id]
 | 
			
		||||
	if !ok {
 | 
			
		||||
@ -206,37 +205,42 @@ func (cm *BasicConnMgr) Protect(id peer.ID, tag string) {
 | 
			
		||||
		cm.protected[id] = tags
 | 
			
		||||
	}
 | 
			
		||||
	tags[tag] = struct{}{}
 | 
			
		||||
	cm.plk.Unlock()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (cm *BasicConnMgr) Unprotect(id peer.ID, tag string) (protected bool) {
 | 
			
		||||
	cm.plk.Lock()
 | 
			
		||||
	defer cm.plk.Unlock()
 | 
			
		||||
 | 
			
		||||
	tags, ok := cm.protected[id]
 | 
			
		||||
	if !ok {
 | 
			
		||||
		cm.plk.Unlock()
 | 
			
		||||
		return false
 | 
			
		||||
	}
 | 
			
		||||
	if delete(tags, tag); len(tags) == 0 {
 | 
			
		||||
		delete(cm.protected, id)
 | 
			
		||||
		cm.plk.Unlock()
 | 
			
		||||
		return false
 | 
			
		||||
	}
 | 
			
		||||
	cm.plk.Unlock()
 | 
			
		||||
	return true
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (cm *BasicConnMgr) IsProtected(id peer.ID, tag string) (protected bool) {
 | 
			
		||||
	cm.plk.Lock()
 | 
			
		||||
	defer cm.plk.Unlock()
 | 
			
		||||
 | 
			
		||||
	tags, ok := cm.protected[id]
 | 
			
		||||
	if !ok {
 | 
			
		||||
		cm.plk.Unlock()
 | 
			
		||||
		return false
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if tag == "" {
 | 
			
		||||
		cm.plk.Unlock()
 | 
			
		||||
		return true
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	_, protected = tags[tag]
 | 
			
		||||
	cm.plk.Unlock()
 | 
			
		||||
	return protected
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -281,22 +285,30 @@ func (p peerInfos) SortByValueAndStreams(segments *segments, sortByMoreStreams b
 | 
			
		||||
		// lock this to protect from concurrent modifications from connect/disconnect events
 | 
			
		||||
		leftSegment := segments.get(left.id)
 | 
			
		||||
		leftSegment.Lock()
 | 
			
		||||
		defer leftSegment.Unlock()
 | 
			
		||||
 | 
			
		||||
		rightSegment := segments.get(right.id)
 | 
			
		||||
		rsLocked := false
 | 
			
		||||
		if leftSegment != rightSegment {
 | 
			
		||||
			// These two peers are not in the same segment, lets get the lock
 | 
			
		||||
			rightSegment.Lock()
 | 
			
		||||
			defer rightSegment.Unlock()
 | 
			
		||||
			rsLocked = true
 | 
			
		||||
		}
 | 
			
		||||
		segments.bucketsMu.Unlock()
 | 
			
		||||
 | 
			
		||||
		// temporary peers are preferred for pruning.
 | 
			
		||||
		if left.temp != right.temp {
 | 
			
		||||
			leftSegment.Unlock()
 | 
			
		||||
			if rsLocked {
 | 
			
		||||
				rightSegment.Unlock()
 | 
			
		||||
			}
 | 
			
		||||
			return left.temp
 | 
			
		||||
		}
 | 
			
		||||
		// otherwise, compare by value.
 | 
			
		||||
		if left.value != right.value {
 | 
			
		||||
			leftSegment.Unlock()
 | 
			
		||||
			if rsLocked {
 | 
			
		||||
				rightSegment.Unlock()
 | 
			
		||||
			}
 | 
			
		||||
			return left.value < right.value
 | 
			
		||||
		}
 | 
			
		||||
		incomingAndStreams := func(m map[network.Conn]time.Time) (incoming bool, numStreams int) {
 | 
			
		||||
@ -313,13 +325,25 @@ func (p peerInfos) SortByValueAndStreams(segments *segments, sortByMoreStreams b
 | 
			
		||||
		rightIncoming, rightStreams := incomingAndStreams(right.conns)
 | 
			
		||||
		// prefer closing inactive connections (no streams open)
 | 
			
		||||
		if rightStreams != leftStreams && (leftStreams == 0 || rightStreams == 0) {
 | 
			
		||||
			leftSegment.Unlock()
 | 
			
		||||
			if rsLocked {
 | 
			
		||||
				rightSegment.Unlock()
 | 
			
		||||
			}
 | 
			
		||||
			return leftStreams < rightStreams
 | 
			
		||||
		}
 | 
			
		||||
		// incoming connections are preferred for pruning
 | 
			
		||||
		if leftIncoming != rightIncoming {
 | 
			
		||||
			leftSegment.Unlock()
 | 
			
		||||
			if rsLocked {
 | 
			
		||||
				rightSegment.Unlock()
 | 
			
		||||
			}
 | 
			
		||||
			return leftIncoming
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		leftSegment.Unlock()
 | 
			
		||||
		if rsLocked {
 | 
			
		||||
			rightSegment.Unlock()
 | 
			
		||||
		}
 | 
			
		||||
		if sortByMoreStreams {
 | 
			
		||||
			// prune connections with a higher number of streams first
 | 
			
		||||
			return rightStreams < leftStreams
 | 
			
		||||
@ -345,15 +369,12 @@ func (cm *BasicConnMgr) TrimOpenConns(_ context.Context) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (cm *BasicConnMgr) background() {
 | 
			
		||||
	defer cm.refCount.Done()
 | 
			
		||||
 | 
			
		||||
	interval := cm.cfg.gracePeriod / 2
 | 
			
		||||
	if cm.cfg.silencePeriod != 0 {
 | 
			
		||||
		interval = cm.cfg.silencePeriod
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	ticker := cm.clock.Ticker(interval)
 | 
			
		||||
	defer ticker.Stop()
 | 
			
		||||
 | 
			
		||||
	for {
 | 
			
		||||
		select {
 | 
			
		||||
@ -363,6 +384,8 @@ func (cm *BasicConnMgr) background() {
 | 
			
		||||
				continue
 | 
			
		||||
			}
 | 
			
		||||
		case <-cm.ctx.Done():
 | 
			
		||||
			cm.refCount.Done()
 | 
			
		||||
			ticker.Stop()
 | 
			
		||||
			return
 | 
			
		||||
		}
 | 
			
		||||
		cm.trim()
 | 
			
		||||
@ -373,7 +396,6 @@ func (cm *BasicConnMgr) doTrim() {
 | 
			
		||||
	// This logic is mimicking the implementation of sync.Once in the standard library.
 | 
			
		||||
	count := atomic.LoadUint64(&cm.trimCount)
 | 
			
		||||
	cm.trimMutex.Lock()
 | 
			
		||||
	defer cm.trimMutex.Unlock()
 | 
			
		||||
	if count == atomic.LoadUint64(&cm.trimCount) {
 | 
			
		||||
		cm.trim()
 | 
			
		||||
		cm.lastTrimMu.Lock()
 | 
			
		||||
@ -381,6 +403,7 @@ func (cm *BasicConnMgr) doTrim() {
 | 
			
		||||
		cm.lastTrimMu.Unlock()
 | 
			
		||||
		atomic.AddUint64(&cm.trimCount, 1)
 | 
			
		||||
	}
 | 
			
		||||
	cm.trimMutex.Unlock()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// trim starts the trim, if the last trim happened before the configured silence period.
 | 
			
		||||
@ -544,10 +567,10 @@ func (cm *BasicConnMgr) getConnsToClose() []network.Conn {
 | 
			
		||||
func (cm *BasicConnMgr) GetTagInfo(p peer.ID) *connmgr.TagInfo {
 | 
			
		||||
	s := cm.segments.get(p)
 | 
			
		||||
	s.Lock()
 | 
			
		||||
	defer s.Unlock()
 | 
			
		||||
 | 
			
		||||
	pi, ok := s.peers[p]
 | 
			
		||||
	if !ok {
 | 
			
		||||
		s.Unlock()
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@ -567,7 +590,7 @@ func (cm *BasicConnMgr) GetTagInfo(p peer.ID) *connmgr.TagInfo {
 | 
			
		||||
	for c, t := range pi.conns {
 | 
			
		||||
		out.Conns[c.RemoteMultiaddr().String()] = t
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	s.Unlock()
 | 
			
		||||
	return out
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -575,37 +598,37 @@ func (cm *BasicConnMgr) GetTagInfo(p peer.ID) *connmgr.TagInfo {
 | 
			
		||||
func (cm *BasicConnMgr) TagPeer(p peer.ID, tag string, val int) {
 | 
			
		||||
	s := cm.segments.get(p)
 | 
			
		||||
	s.Lock()
 | 
			
		||||
	defer s.Unlock()
 | 
			
		||||
 | 
			
		||||
	pi := s.tagInfoFor(p, cm.clock.Now())
 | 
			
		||||
 | 
			
		||||
	// Update the total value of the peer.
 | 
			
		||||
	pi.value += val - pi.tags[tag]
 | 
			
		||||
	pi.tags[tag] = val
 | 
			
		||||
	s.Unlock()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// UntagPeer is called to disassociate a string and integer from a given peer.
 | 
			
		||||
func (cm *BasicConnMgr) UntagPeer(p peer.ID, tag string) {
 | 
			
		||||
	s := cm.segments.get(p)
 | 
			
		||||
	s.Lock()
 | 
			
		||||
	defer s.Unlock()
 | 
			
		||||
 | 
			
		||||
	pi, ok := s.peers[p]
 | 
			
		||||
	if !ok {
 | 
			
		||||
		log.Info("tried to remove tag from untracked peer: ", p)
 | 
			
		||||
		s.Unlock()
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Update the total value of the peer.
 | 
			
		||||
	pi.value -= pi.tags[tag]
 | 
			
		||||
	delete(pi.tags, tag)
 | 
			
		||||
	s.Unlock()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// UpsertTag is called to insert/update a peer tag
 | 
			
		||||
func (cm *BasicConnMgr) UpsertTag(p peer.ID, tag string, upsert func(int) int) {
 | 
			
		||||
	s := cm.segments.get(p)
 | 
			
		||||
	s.Lock()
 | 
			
		||||
	defer s.Unlock()
 | 
			
		||||
 | 
			
		||||
	pi := s.tagInfoFor(p, cm.clock.Now())
 | 
			
		||||
 | 
			
		||||
@ -613,6 +636,7 @@ func (cm *BasicConnMgr) UpsertTag(p peer.ID, tag string, upsert func(int) int) {
 | 
			
		||||
	newval := upsert(oldval)
 | 
			
		||||
	pi.value += newval - oldval
 | 
			
		||||
	pi.tags[tag] = newval
 | 
			
		||||
	s.Unlock()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// CMInfo holds the configuration for BasicConnMgr, as well as status data.
 | 
			
		||||
@ -670,7 +694,6 @@ func (nn *cmNotifee) Connected(n network.Network, c network.Conn) {
 | 
			
		||||
	p := c.RemotePeer()
 | 
			
		||||
	s := cm.segments.get(p)
 | 
			
		||||
	s.Lock()
 | 
			
		||||
	defer s.Unlock()
 | 
			
		||||
 | 
			
		||||
	id := c.RemotePeer()
 | 
			
		||||
	pinfo, ok := s.peers[id]
 | 
			
		||||
@ -694,11 +717,13 @@ func (nn *cmNotifee) Connected(n network.Network, c network.Conn) {
 | 
			
		||||
	_, ok = pinfo.conns[c]
 | 
			
		||||
	if ok {
 | 
			
		||||
		log.Error("received connected notification for conn we are already tracking: ", p)
 | 
			
		||||
		s.Unlock()
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	pinfo.conns[c] = cm.clock.Now()
 | 
			
		||||
	cm.connCount.Add(1)
 | 
			
		||||
	s.Unlock()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Disconnected is called by notifiers to inform that an existing connection has been closed or terminated.
 | 
			
		||||
@ -709,17 +734,18 @@ func (nn *cmNotifee) Disconnected(n network.Network, c network.Conn) {
 | 
			
		||||
	p := c.RemotePeer()
 | 
			
		||||
	s := cm.segments.get(p)
 | 
			
		||||
	s.Lock()
 | 
			
		||||
	defer s.Unlock()
 | 
			
		||||
 | 
			
		||||
	cinf, ok := s.peers[p]
 | 
			
		||||
	if !ok {
 | 
			
		||||
		log.Error("received disconnected notification for peer we are not tracking: ", p)
 | 
			
		||||
		s.Unlock()
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	_, ok = cinf.conns[c]
 | 
			
		||||
	if !ok {
 | 
			
		||||
		log.Error("received disconnected notification for conn we are not tracking: ", p)
 | 
			
		||||
		s.Unlock()
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@ -728,6 +754,7 @@ func (nn *cmNotifee) Disconnected(n network.Network, c network.Conn) {
 | 
			
		||||
		delete(s.peers, p)
 | 
			
		||||
	}
 | 
			
		||||
	cm.connCount.Add(-1)
 | 
			
		||||
	s.Unlock()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Listen is no-op in this implementation.
 | 
			
		||||
 | 
			
		||||
@ -100,9 +100,9 @@ func NewDecayer(cfg *DecayerCfg, mgr *BasicConnMgr) (*decayer, error) {
 | 
			
		||||
 | 
			
		||||
func (d *decayer) RegisterDecayingTag(name string, interval time.Duration, decayFn connmgr.DecayFn, bumpFn connmgr.BumpFn) (connmgr.DecayingTag, error) {
 | 
			
		||||
	d.tagsMu.Lock()
 | 
			
		||||
	defer d.tagsMu.Unlock()
 | 
			
		||||
 | 
			
		||||
	if _, ok := d.knownTags[name]; ok {
 | 
			
		||||
		d.tagsMu.Unlock()
 | 
			
		||||
		return nil, fmt.Errorf("decaying tag with name %s already exists", name)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@ -128,6 +128,7 @@ func (d *decayer) RegisterDecayingTag(name string, interval time.Duration, decay
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	d.knownTags[name] = tag
 | 
			
		||||
	d.tagsMu.Unlock()
 | 
			
		||||
	return tag, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -150,10 +151,7 @@ func (d *decayer) Close() error {
 | 
			
		||||
//  2. Applies score bumps.
 | 
			
		||||
//  3. Yields when closed.
 | 
			
		||||
func (d *decayer) process() {
 | 
			
		||||
	defer close(d.doneCh)
 | 
			
		||||
 | 
			
		||||
	ticker := d.clock.Ticker(d.cfg.Resolution)
 | 
			
		||||
	defer ticker.Stop()
 | 
			
		||||
 | 
			
		||||
	var (
 | 
			
		||||
		bmp   bumpCmd
 | 
			
		||||
@ -276,6 +274,8 @@ func (d *decayer) process() {
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
		case <-d.closeCh:
 | 
			
		||||
			ticker.Stop()
 | 
			
		||||
			close(d.doneCh)
 | 
			
		||||
			return
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@ -63,8 +63,8 @@ func DiscoverNAT(ctx context.Context) (*NAT, error) {
 | 
			
		||||
	}
 | 
			
		||||
	nat.refCount.Add(1)
 | 
			
		||||
	go func() {
 | 
			
		||||
		defer nat.refCount.Done()
 | 
			
		||||
		nat.background()
 | 
			
		||||
		nat.refCount.Done()
 | 
			
		||||
	}()
 | 
			
		||||
	return nat, nil
 | 
			
		||||
}
 | 
			
		||||
@ -101,15 +101,17 @@ func (nat *NAT) Close() error {
 | 
			
		||||
 | 
			
		||||
func (nat *NAT) GetMapping(protocol string, port int) (addr netip.AddrPort, found bool) {
 | 
			
		||||
	nat.mappingmu.Lock()
 | 
			
		||||
	defer nat.mappingmu.Unlock()
 | 
			
		||||
 | 
			
		||||
	if !nat.extAddr.IsValid() {
 | 
			
		||||
		nat.mappingmu.Unlock()
 | 
			
		||||
		return netip.AddrPort{}, false
 | 
			
		||||
	}
 | 
			
		||||
	extPort, found := nat.mappings[entry{protocol: protocol, port: port}]
 | 
			
		||||
	if !found {
 | 
			
		||||
		nat.mappingmu.Unlock()
 | 
			
		||||
		return netip.AddrPort{}, false
 | 
			
		||||
	}
 | 
			
		||||
	nat.mappingmu.Unlock()
 | 
			
		||||
	return netip.AddrPortFrom(nat.extAddr, uint16(extPort)), true
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -126,9 +128,9 @@ func (nat *NAT) AddMapping(ctx context.Context, protocol string, port int) error
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	nat.mappingmu.Lock()
 | 
			
		||||
	defer nat.mappingmu.Unlock()
 | 
			
		||||
 | 
			
		||||
	if nat.closed {
 | 
			
		||||
		nat.mappingmu.Unlock()
 | 
			
		||||
		return errors.New("closed")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@ -136,6 +138,7 @@ func (nat *NAT) AddMapping(ctx context.Context, protocol string, port int) error
 | 
			
		||||
	// allowing users -- in the optimistic case -- to use results right after.
 | 
			
		||||
	extPort := nat.establishMapping(ctx, protocol, port)
 | 
			
		||||
	nat.mappings[entry{protocol: protocol, port: port}] = extPort
 | 
			
		||||
	nat.mappingmu.Unlock()
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -143,17 +146,19 @@ func (nat *NAT) AddMapping(ctx context.Context, protocol string, port int) error
 | 
			
		||||
// It blocks until the NAT has removed the mapping.
 | 
			
		||||
func (nat *NAT) RemoveMapping(ctx context.Context, protocol string, port int) error {
 | 
			
		||||
	nat.mappingmu.Lock()
 | 
			
		||||
	defer nat.mappingmu.Unlock()
 | 
			
		||||
 | 
			
		||||
	switch protocol {
 | 
			
		||||
	case "tcp", "udp":
 | 
			
		||||
		e := entry{protocol: protocol, port: port}
 | 
			
		||||
		if _, ok := nat.mappings[e]; ok {
 | 
			
		||||
			delete(nat.mappings, e)
 | 
			
		||||
			nat.mappingmu.Unlock()
 | 
			
		||||
			return nat.nat.DeletePortMapping(ctx, protocol, port)
 | 
			
		||||
		}
 | 
			
		||||
		nat.mappingmu.Unlock()
 | 
			
		||||
		return errors.New("unknown mapping")
 | 
			
		||||
	default:
 | 
			
		||||
		nat.mappingmu.Unlock()
 | 
			
		||||
		return fmt.Errorf("invalid protocol: %s", protocol)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
@ -166,7 +171,6 @@ func (nat *NAT) background() {
 | 
			
		||||
	nextAddrUpdate := now.Add(CacheTime)
 | 
			
		||||
 | 
			
		||||
	t := time.NewTimer(minTime(nextMappingUpdate, nextAddrUpdate).Sub(now)) // don't use a ticker here. We don't know how long establishing the mappings takes.
 | 
			
		||||
	defer t.Stop()
 | 
			
		||||
 | 
			
		||||
	var in []entry
 | 
			
		||||
	var out []int // port numbers
 | 
			
		||||
@ -209,12 +213,13 @@ func (nat *NAT) background() {
 | 
			
		||||
		case <-nat.ctx.Done():
 | 
			
		||||
			nat.mappingmu.Lock()
 | 
			
		||||
			ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
 | 
			
		||||
			defer cancel()
 | 
			
		||||
			for e := range nat.mappings {
 | 
			
		||||
				delete(nat.mappings, e)
 | 
			
		||||
				nat.nat.DeletePortMapping(ctx, e.protocol, e.port)
 | 
			
		||||
			}
 | 
			
		||||
			nat.mappingmu.Unlock()
 | 
			
		||||
			t.Stop()
 | 
			
		||||
			cancel()
 | 
			
		||||
			return
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@ -60,11 +60,12 @@ func (c *pskConn) Write(in []byte) (int, error) {
 | 
			
		||||
		c.writeS20 = salsa20.New(c.psk, nonce)
 | 
			
		||||
	}
 | 
			
		||||
	out := pool.Get(len(in))
 | 
			
		||||
	defer pool.Put(out)
 | 
			
		||||
 | 
			
		||||
	c.writeS20.XORKeyStream(out, in) // encrypt
 | 
			
		||||
 | 
			
		||||
	return c.Conn.Write(out) // send
 | 
			
		||||
	n, err := c.Conn.Write(out)
 | 
			
		||||
	pool.Put(out)
 | 
			
		||||
	return n, err // send
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
var _ net.Conn = (*pskConn)(nil)
 | 
			
		||||
 | 
			
		||||
@ -25,7 +25,6 @@ func setupPSKConns(ctx context.Context, t *testing.T) (net.Conn, net.Conn) {
 | 
			
		||||
 | 
			
		||||
func TestPSKSimpelMessges(t *testing.T) {
 | 
			
		||||
	ctx, cancel := context.WithCancel(context.TODO())
 | 
			
		||||
	defer cancel()
 | 
			
		||||
 | 
			
		||||
	psk1, psk2 := setupPSKConns(ctx, t)
 | 
			
		||||
	msg1 := []byte("hello world")
 | 
			
		||||
@ -53,11 +52,11 @@ func TestPSKSimpelMessges(t *testing.T) {
 | 
			
		||||
	if !bytes.Equal(msg1, out1) {
 | 
			
		||||
		t.Fatalf("input and output are not the same")
 | 
			
		||||
	}
 | 
			
		||||
	cancel()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestPSKFragmentation(t *testing.T) {
 | 
			
		||||
	ctx, cancel := context.WithCancel(context.TODO())
 | 
			
		||||
	defer cancel()
 | 
			
		||||
 | 
			
		||||
	psk1, psk2 := setupPSKConns(ctx, t)
 | 
			
		||||
 | 
			
		||||
@ -87,4 +86,5 @@ func TestPSKFragmentation(t *testing.T) {
 | 
			
		||||
	if err := <-wch; err != nil {
 | 
			
		||||
		t.Fatal(err)
 | 
			
		||||
	}
 | 
			
		||||
	cancel()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -51,12 +51,12 @@ func (n *network) getDialer(network string) *dialer {
 | 
			
		||||
	n.mu.RUnlock()
 | 
			
		||||
	if d == nil {
 | 
			
		||||
		n.mu.Lock()
 | 
			
		||||
		defer n.mu.Unlock()
 | 
			
		||||
 | 
			
		||||
		if n.dialer == nil {
 | 
			
		||||
			n.dialer = newDialer(n.listeners)
 | 
			
		||||
		}
 | 
			
		||||
		d = n.dialer
 | 
			
		||||
		n.mu.Unlock()
 | 
			
		||||
	}
 | 
			
		||||
	return d
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -68,7 +68,6 @@ func (t *Transport) Listen(laddr ma.Multiaddr) (manet.Listener, error) {
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	n.mu.Lock()
 | 
			
		||||
	defer n.mu.Unlock()
 | 
			
		||||
 | 
			
		||||
	if n.listeners == nil {
 | 
			
		||||
		n.listeners = make(map[*listener]struct{})
 | 
			
		||||
@ -76,5 +75,6 @@ func (t *Transport) Listen(laddr ma.Multiaddr) (manet.Listener, error) {
 | 
			
		||||
	n.listeners[list] = struct{}{}
 | 
			
		||||
	n.dialer = nil
 | 
			
		||||
 | 
			
		||||
	n.mu.Unlock()
 | 
			
		||||
	return list, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -77,13 +77,13 @@ type blackHoleFilter struct {
 | 
			
		||||
// fraction over the last n outcomes is less than the minSuccessFraction of the filter.
 | 
			
		||||
func (b *blackHoleFilter) RecordResult(success bool) {
 | 
			
		||||
	b.mu.Lock()
 | 
			
		||||
	defer b.mu.Unlock()
 | 
			
		||||
 | 
			
		||||
	if b.state == blackHoleStateBlocked && success {
 | 
			
		||||
		// If the call succeeds in a blocked state we reset to allowed.
 | 
			
		||||
		// This is better than slowly accumulating values till we cross the minSuccessFraction
 | 
			
		||||
		// threshold since a blackhole is a binary property.
 | 
			
		||||
		b.reset()
 | 
			
		||||
		b.mu.Unlock()
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@ -101,22 +101,25 @@ func (b *blackHoleFilter) RecordResult(success bool) {
 | 
			
		||||
 | 
			
		||||
	b.updateState()
 | 
			
		||||
	b.trackMetrics()
 | 
			
		||||
	b.mu.Unlock()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// HandleRequest returns the result of applying the black hole filter for the request.
 | 
			
		||||
func (b *blackHoleFilter) HandleRequest() blackHoleResult {
 | 
			
		||||
	b.mu.Lock()
 | 
			
		||||
	defer b.mu.Unlock()
 | 
			
		||||
 | 
			
		||||
	b.requests++
 | 
			
		||||
 | 
			
		||||
	b.trackMetrics()
 | 
			
		||||
 | 
			
		||||
	if b.state == blackHoleStateAllowed {
 | 
			
		||||
		b.mu.Unlock()
 | 
			
		||||
		return blackHoleResultAllowed
 | 
			
		||||
	} else if b.state == blackHoleStateProbing || b.requests%b.n == 0 {
 | 
			
		||||
		b.mu.Unlock()
 | 
			
		||||
		return blackHoleResultProbing
 | 
			
		||||
	} else {
 | 
			
		||||
		b.mu.Unlock()
 | 
			
		||||
		return blackHoleResultBlocked
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -50,18 +50,19 @@ func newConnectednessEventEmitter(connectedness func(peer.ID) network.Connectedn
 | 
			
		||||
 | 
			
		||||
func (c *connectednessEventEmitter) AddConn(p peer.ID) {
 | 
			
		||||
	c.mx.RLock()
 | 
			
		||||
	defer c.mx.RUnlock()
 | 
			
		||||
	if c.ctx.Err() != nil {
 | 
			
		||||
		c.mx.RUnlock()
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	c.newConns <- p
 | 
			
		||||
	c.mx.RUnlock()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *connectednessEventEmitter) RemoveConn(p peer.ID) {
 | 
			
		||||
	c.mx.RLock()
 | 
			
		||||
	defer c.mx.RUnlock()
 | 
			
		||||
	if c.ctx.Err() != nil {
 | 
			
		||||
		c.mx.RUnlock()
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@ -80,6 +81,7 @@ func (c *connectednessEventEmitter) RemoveConn(p peer.ID) {
 | 
			
		||||
	case c.removeConnNotif <- struct{}{}:
 | 
			
		||||
	default:
 | 
			
		||||
	}
 | 
			
		||||
	c.mx.RUnlock()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *connectednessEventEmitter) Close() {
 | 
			
		||||
@ -88,7 +90,6 @@ func (c *connectednessEventEmitter) Close() {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *connectednessEventEmitter) runEmitter() {
 | 
			
		||||
	defer c.wg.Done()
 | 
			
		||||
	for {
 | 
			
		||||
		select {
 | 
			
		||||
		case p := <-c.newConns:
 | 
			
		||||
@ -97,7 +98,6 @@ func (c *connectednessEventEmitter) runEmitter() {
 | 
			
		||||
			c.sendConnRemovedNotifications()
 | 
			
		||||
		case <-c.ctx.Done():
 | 
			
		||||
			c.mx.Lock() // Wait for all pending AddConn & RemoveConn operations to complete
 | 
			
		||||
			defer c.mx.Unlock()
 | 
			
		||||
			for {
 | 
			
		||||
				select {
 | 
			
		||||
				case p := <-c.newConns:
 | 
			
		||||
@ -105,6 +105,8 @@ func (c *connectednessEventEmitter) runEmitter() {
 | 
			
		||||
				case <-c.removeConnNotif:
 | 
			
		||||
					c.sendConnRemovedNotifications()
 | 
			
		||||
				default:
 | 
			
		||||
					c.mx.Unlock()
 | 
			
		||||
					c.wg.Done()
 | 
			
		||||
					return
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
@ -67,7 +67,6 @@ func (ad *activeDial) dial(ctx context.Context) (*Conn, error) {
 | 
			
		||||
 | 
			
		||||
func (ds *dialSync) getActiveDial(p peer.ID) (*activeDial, error) {
 | 
			
		||||
	ds.mutex.Lock()
 | 
			
		||||
	defer ds.mutex.Unlock()
 | 
			
		||||
 | 
			
		||||
	actd, ok := ds.dials[p]
 | 
			
		||||
	if !ok {
 | 
			
		||||
@ -84,6 +83,7 @@ func (ds *dialSync) getActiveDial(p peer.ID) (*activeDial, error) {
 | 
			
		||||
	}
 | 
			
		||||
	// increase ref count before dropping mutex
 | 
			
		||||
	actd.refCnt++
 | 
			
		||||
	ds.mutex.Unlock()
 | 
			
		||||
	return actd, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -98,7 +98,6 @@ func (ds *dialSync) Dial(ctx context.Context, p peer.ID) (*Conn, error) {
 | 
			
		||||
	conn, err := ad.dial(ctx)
 | 
			
		||||
 | 
			
		||||
	ds.mutex.Lock()
 | 
			
		||||
	defer ds.mutex.Unlock()
 | 
			
		||||
 | 
			
		||||
	ad.refCnt--
 | 
			
		||||
	if ad.refCnt == 0 {
 | 
			
		||||
@ -111,5 +110,6 @@ func (ds *dialSync) Dial(ctx context.Context, p peer.ID) (*Conn, error) {
 | 
			
		||||
		delete(ds.dials, p)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	ds.mutex.Unlock()
 | 
			
		||||
	return conn, err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -109,8 +109,6 @@ func newDialWorker(s *Swarm, p peer.ID, reqch <-chan dialRequest, cl Clock) *dia
 | 
			
		||||
// The loop exits when w.reqch is closed.
 | 
			
		||||
func (w *dialWorker) loop() {
 | 
			
		||||
	w.wg.Add(1)
 | 
			
		||||
	defer w.wg.Done()
 | 
			
		||||
	defer w.s.limiter.clearAllPeerDials(w.peer)
 | 
			
		||||
 | 
			
		||||
	// dq is used to pace dials to different addresses of the peer
 | 
			
		||||
	dq := newDialQueue()
 | 
			
		||||
@ -120,7 +118,6 @@ func (w *dialWorker) loop() {
 | 
			
		||||
	startTime := w.cl.Now()
 | 
			
		||||
	// dialTimer is the dialTimer used to trigger dials
 | 
			
		||||
	dialTimer := w.cl.InstantTimer(startTime.Add(math.MaxInt64))
 | 
			
		||||
	defer dialTimer.Stop()
 | 
			
		||||
 | 
			
		||||
	timerRunning := true
 | 
			
		||||
	// scheduleNextDial updates timer for triggering the next dial
 | 
			
		||||
@ -164,6 +161,9 @@ loop:
 | 
			
		||||
				if w.s.metricsTracer != nil {
 | 
			
		||||
					w.s.metricsTracer.DialCompleted(w.connected, totalDials)
 | 
			
		||||
				}
 | 
			
		||||
				w.wg.Done()
 | 
			
		||||
				w.s.limiter.clearAllPeerDials(w.peer)
 | 
			
		||||
				dialTimer.Stop()
 | 
			
		||||
				return
 | 
			
		||||
			}
 | 
			
		||||
			// We have received a new request. If we do not have a suitable connection,
 | 
			
		||||
 | 
			
		||||
@ -124,12 +124,12 @@ func (dl *dialLimiter) freePeerToken(dj *dialJob) {
 | 
			
		||||
 | 
			
		||||
func (dl *dialLimiter) finishedDial(dj *dialJob) {
 | 
			
		||||
	dl.lk.Lock()
 | 
			
		||||
	defer dl.lk.Unlock()
 | 
			
		||||
	if dl.shouldConsumeFd(dj.addr) {
 | 
			
		||||
		dl.freeFDToken()
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	dl.freePeerToken(dj)
 | 
			
		||||
	dl.lk.Unlock()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (dl *dialLimiter) shouldConsumeFd(addr ma.Multiaddr) bool {
 | 
			
		||||
@ -182,33 +182,32 @@ func (dl *dialLimiter) addCheckPeerLimit(dj *dialJob) {
 | 
			
		||||
// it will put it on the waitlist for the requested token.
 | 
			
		||||
func (dl *dialLimiter) AddDialJob(dj *dialJob) {
 | 
			
		||||
	dl.lk.Lock()
 | 
			
		||||
	defer dl.lk.Unlock()
 | 
			
		||||
 | 
			
		||||
	log.Debugf("[limiter] adding a dial job through limiter: %v", dj.addr)
 | 
			
		||||
	dl.addCheckPeerLimit(dj)
 | 
			
		||||
	dl.lk.Unlock()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (dl *dialLimiter) clearAllPeerDials(p peer.ID) {
 | 
			
		||||
	dl.lk.Lock()
 | 
			
		||||
	defer dl.lk.Unlock()
 | 
			
		||||
	delete(dl.waitingOnPeerLimit, p)
 | 
			
		||||
	log.Debugf("[limiter] clearing all peer dials: %v", p)
 | 
			
		||||
	// NB: the waitingOnFd list doesn't need to be cleaned out here, we will
 | 
			
		||||
	// remove them as we encounter them because they are 'cancelled' at this
 | 
			
		||||
	// point
 | 
			
		||||
	dl.lk.Unlock()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// executeDial calls the dialFunc, and reports the result through the response
 | 
			
		||||
// channel when finished. Once the response is sent it also releases all tokens
 | 
			
		||||
// it held during the dial.
 | 
			
		||||
func (dl *dialLimiter) executeDial(j *dialJob) {
 | 
			
		||||
	defer dl.finishedDial(j)
 | 
			
		||||
	if j.cancelled() {
 | 
			
		||||
		dl.finishedDial(j)
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	dctx, cancel := context.WithTimeout(j.ctx, j.timeout)
 | 
			
		||||
	defer cancel()
 | 
			
		||||
 | 
			
		||||
	con, err := dl.dialFunc(dctx, j.peer, j.addr, j.resp)
 | 
			
		||||
	kind := transport.UpdateKindDialSuccessful
 | 
			
		||||
@ -222,4 +221,6 @@ func (dl *dialLimiter) executeDial(j *dialJob) {
 | 
			
		||||
			con.Close()
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	dl.finishedDial(j)
 | 
			
		||||
	cancel()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -328,10 +328,10 @@ func (s *Swarm) close() {
 | 
			
		||||
		if closer, ok := t.(io.Closer); ok {
 | 
			
		||||
			wg.Add(1)
 | 
			
		||||
			go func(c io.Closer) {
 | 
			
		||||
				defer wg.Done()
 | 
			
		||||
				if err := closer.Close(); err != nil {
 | 
			
		||||
					log.Errorf("error when closing down transport %T: %s", c, err)
 | 
			
		||||
				}
 | 
			
		||||
				wg.Done()
 | 
			
		||||
			}(closer)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
@ -526,14 +526,12 @@ func (s *Swarm) waitForDirectConn(ctx context.Context, p peer.ID) (*Conn, error)
 | 
			
		||||
 | 
			
		||||
	// apply the DialPeer timeout
 | 
			
		||||
	ctx, cancel := context.WithTimeout(ctx, network.GetDialPeerTimeout(ctx))
 | 
			
		||||
	defer cancel()
 | 
			
		||||
 | 
			
		||||
	// Wait for notification.
 | 
			
		||||
	select {
 | 
			
		||||
	case <-ctx.Done():
 | 
			
		||||
		// Remove ourselves from the notification list
 | 
			
		||||
		s.directConnNotifs.Lock()
 | 
			
		||||
		defer s.directConnNotifs.Unlock()
 | 
			
		||||
 | 
			
		||||
		s.directConnNotifs.m[p] = slices.DeleteFunc(
 | 
			
		||||
			s.directConnNotifs.m[p],
 | 
			
		||||
@ -542,17 +540,22 @@ func (s *Swarm) waitForDirectConn(ctx context.Context, p peer.ID) (*Conn, error)
 | 
			
		||||
		if len(s.directConnNotifs.m[p]) == 0 {
 | 
			
		||||
			delete(s.directConnNotifs.m, p)
 | 
			
		||||
		}
 | 
			
		||||
		s.directConnNotifs.Unlock()
 | 
			
		||||
		cancel()
 | 
			
		||||
		return nil, ctx.Err()
 | 
			
		||||
	case <-ch:
 | 
			
		||||
		// We do not need to remove ourselves from the list here as the notifier
 | 
			
		||||
		// clears the map entry
 | 
			
		||||
		c := s.bestConnToPeer(p)
 | 
			
		||||
		if c == nil {
 | 
			
		||||
			cancel()
 | 
			
		||||
			return nil, network.ErrNoConn
 | 
			
		||||
		}
 | 
			
		||||
		if c.Stat().Limited {
 | 
			
		||||
			cancel()
 | 
			
		||||
			return nil, network.ErrLimitedConn
 | 
			
		||||
		}
 | 
			
		||||
		cancel()
 | 
			
		||||
		return c, nil
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
@ -562,12 +565,12 @@ func (s *Swarm) ConnsToPeer(p peer.ID) []network.Conn {
 | 
			
		||||
	// TODO: Consider sorting the connection list best to worst. Currently,
 | 
			
		||||
	// it's sorted oldest to newest.
 | 
			
		||||
	s.conns.RLock()
 | 
			
		||||
	defer s.conns.RUnlock()
 | 
			
		||||
	conns := s.conns.m[p]
 | 
			
		||||
	output := make([]network.Conn, len(conns))
 | 
			
		||||
	for i, c := range conns {
 | 
			
		||||
		output[i] = c
 | 
			
		||||
	}
 | 
			
		||||
	s.conns.RUnlock()
 | 
			
		||||
	return output
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -610,7 +613,6 @@ func (s *Swarm) bestConnToPeer(p peer.ID) *Conn {
 | 
			
		||||
	// For now, prefers direct connections over Relayed connections.
 | 
			
		||||
	// For tie-breaking, select the newest non-closed connection with the most streams.
 | 
			
		||||
	s.conns.RLock()
 | 
			
		||||
	defer s.conns.RUnlock()
 | 
			
		||||
 | 
			
		||||
	var best *Conn
 | 
			
		||||
	for _, c := range s.conns.m[p] {
 | 
			
		||||
@ -622,6 +624,7 @@ func (s *Swarm) bestConnToPeer(p peer.ID) *Conn {
 | 
			
		||||
			best = c
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	s.conns.RUnlock()
 | 
			
		||||
	return best
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -648,9 +651,9 @@ func isDirectConn(c *Conn) bool {
 | 
			
		||||
// network.Connected`.
 | 
			
		||||
func (s *Swarm) Connectedness(p peer.ID) network.Connectedness {
 | 
			
		||||
	s.conns.RLock()
 | 
			
		||||
	defer s.conns.RUnlock()
 | 
			
		||||
 | 
			
		||||
	return s.connectednessUnlocked(p)
 | 
			
		||||
	connectedness := s.connectednessUnlocked(p)
 | 
			
		||||
	s.conns.RUnlock()
 | 
			
		||||
	return connectedness
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// connectednessUnlocked returns the connectedness of a peer.
 | 
			
		||||
@ -676,7 +679,6 @@ func (s *Swarm) connectednessUnlocked(p peer.ID) network.Connectedness {
 | 
			
		||||
// Conns returns a slice of all connections.
 | 
			
		||||
func (s *Swarm) Conns() []network.Conn {
 | 
			
		||||
	s.conns.RLock()
 | 
			
		||||
	defer s.conns.RUnlock()
 | 
			
		||||
 | 
			
		||||
	conns := make([]network.Conn, 0, len(s.conns.m))
 | 
			
		||||
	for _, cs := range s.conns.m {
 | 
			
		||||
@ -684,6 +686,7 @@ func (s *Swarm) Conns() []network.Conn {
 | 
			
		||||
			conns = append(conns, c)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	s.conns.RUnlock()
 | 
			
		||||
	return conns
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -720,12 +723,13 @@ func (s *Swarm) ClosePeer(p peer.ID) error {
 | 
			
		||||
// Peers returns a copy of the set of peers swarm is connected to.
 | 
			
		||||
func (s *Swarm) Peers() []peer.ID {
 | 
			
		||||
	s.conns.RLock()
 | 
			
		||||
	defer s.conns.RUnlock()
 | 
			
		||||
 | 
			
		||||
	peers := make([]peer.ID, 0, len(s.conns.m))
 | 
			
		||||
	for p := range s.conns.m {
 | 
			
		||||
		peers = append(peers, p)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	s.conns.RUnlock()
 | 
			
		||||
	return peers
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -11,8 +11,9 @@ import (
 | 
			
		||||
// ListenAddresses returns a list of addresses at which this swarm listens.
 | 
			
		||||
func (s *Swarm) ListenAddresses() []ma.Multiaddr {
 | 
			
		||||
	s.listeners.RLock()
 | 
			
		||||
	defer s.listeners.RUnlock()
 | 
			
		||||
	return s.listenAddressesNoLock()
 | 
			
		||||
	mas := s.listenAddressesNoLock()
 | 
			
		||||
	s.listeners.RUnlock()
 | 
			
		||||
	return mas
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (s *Swarm) listenAddressesNoLock() []ma.Multiaddr {
 | 
			
		||||
 | 
			
		||||
@ -88,13 +88,13 @@ func (c *Conn) doClose() {
 | 
			
		||||
	go func() {
 | 
			
		||||
		// prevents us from issuing close notifications before finishing the open notifications
 | 
			
		||||
		c.notifyLk.Lock()
 | 
			
		||||
		defer c.notifyLk.Unlock()
 | 
			
		||||
 | 
			
		||||
		// Only notify for disconnection if we notified for connection
 | 
			
		||||
		c.swarm.notifyAll(func(f network.Notifiee) {
 | 
			
		||||
			f.Disconnected(c.swarm, c)
 | 
			
		||||
		})
 | 
			
		||||
		c.swarm.refs.Done()
 | 
			
		||||
		c.notifyLk.Unlock()
 | 
			
		||||
	}()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -112,11 +112,11 @@ func (c *Conn) removeStream(s *Stream) {
 | 
			
		||||
// swarm ref count.
 | 
			
		||||
func (c *Conn) start() {
 | 
			
		||||
	go func() {
 | 
			
		||||
		defer c.swarm.refs.Done()
 | 
			
		||||
		defer c.Close()
 | 
			
		||||
		for {
 | 
			
		||||
			ts, err := c.conn.AcceptStream()
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				c.swarm.refs.Done()
 | 
			
		||||
				c.Close()
 | 
			
		||||
				return
 | 
			
		||||
			}
 | 
			
		||||
			scope, err := c.swarm.ResourceManager().OpenStream(c.RemotePeer(), network.DirInbound)
 | 
			
		||||
@ -192,8 +192,9 @@ func (c *Conn) ConnState() network.ConnectionState {
 | 
			
		||||
// Stat returns metadata pertaining to this connection
 | 
			
		||||
func (c *Conn) Stat() network.ConnStats {
 | 
			
		||||
	c.streams.Lock()
 | 
			
		||||
	defer c.streams.Unlock()
 | 
			
		||||
	return c.stat
 | 
			
		||||
	stats := c.stat
 | 
			
		||||
	c.streams.Unlock()
 | 
			
		||||
	return stats
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewStream returns a new Stream from this connection
 | 
			
		||||
@ -260,11 +261,11 @@ func (c *Conn) addStream(ts network.MuxedStream, dir network.Direction, scope ne
 | 
			
		||||
// GetStreams returns the streams associated with this connection.
 | 
			
		||||
func (c *Conn) GetStreams() []network.Stream {
 | 
			
		||||
	c.streams.Lock()
 | 
			
		||||
	defer c.streams.Unlock()
 | 
			
		||||
	streams := make([]network.Stream, 0, len(c.streams.m))
 | 
			
		||||
	for s := range c.streams.m {
 | 
			
		||||
		streams = append(streams, s)
 | 
			
		||||
	}
 | 
			
		||||
	c.streams.Unlock()
 | 
			
		||||
	return streams
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -122,10 +122,10 @@ func (db *DialBackoff) init(ctx context.Context) {
 | 
			
		||||
 | 
			
		||||
func (db *DialBackoff) background(ctx context.Context) {
 | 
			
		||||
	ticker := time.NewTicker(BackoffMax)
 | 
			
		||||
	defer ticker.Stop()
 | 
			
		||||
	for {
 | 
			
		||||
		select {
 | 
			
		||||
		case <-ctx.Done():
 | 
			
		||||
			ticker.Stop()
 | 
			
		||||
			return
 | 
			
		||||
		case <-ticker.C:
 | 
			
		||||
			db.cleanup()
 | 
			
		||||
@ -137,9 +137,9 @@ func (db *DialBackoff) background(ctx context.Context) {
 | 
			
		||||
// peer p at address addr
 | 
			
		||||
func (db *DialBackoff) Backoff(p peer.ID, addr ma.Multiaddr) (backoff bool) {
 | 
			
		||||
	db.lock.RLock()
 | 
			
		||||
	defer db.lock.RUnlock()
 | 
			
		||||
 | 
			
		||||
	ap, found := db.entries[p][string(addr.Bytes())]
 | 
			
		||||
	db.lock.RUnlock()
 | 
			
		||||
	return found && time.Now().Before(ap.until)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -163,7 +163,6 @@ var BackoffMax = time.Minute * 5
 | 
			
		||||
func (db *DialBackoff) AddBackoff(p peer.ID, addr ma.Multiaddr) {
 | 
			
		||||
	saddr := string(addr.Bytes())
 | 
			
		||||
	db.lock.Lock()
 | 
			
		||||
	defer db.lock.Unlock()
 | 
			
		||||
	bp, ok := db.entries[p]
 | 
			
		||||
	if !ok {
 | 
			
		||||
		bp = make(map[string]*backoffAddr, 1)
 | 
			
		||||
@ -175,6 +174,7 @@ func (db *DialBackoff) AddBackoff(p peer.ID, addr ma.Multiaddr) {
 | 
			
		||||
			tries: 1,
 | 
			
		||||
			until: time.Now().Add(BackoffBase),
 | 
			
		||||
		}
 | 
			
		||||
		db.lock.Unlock()
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@ -184,19 +184,19 @@ func (db *DialBackoff) AddBackoff(p peer.ID, addr ma.Multiaddr) {
 | 
			
		||||
	}
 | 
			
		||||
	ba.until = time.Now().Add(backoffTime)
 | 
			
		||||
	ba.tries++
 | 
			
		||||
	db.lock.Unlock()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Clear removes a backoff record. Clients should call this after a
 | 
			
		||||
// successful Dial.
 | 
			
		||||
func (db *DialBackoff) Clear(p peer.ID) {
 | 
			
		||||
	db.lock.Lock()
 | 
			
		||||
	defer db.lock.Unlock()
 | 
			
		||||
	delete(db.entries, p)
 | 
			
		||||
	db.lock.Unlock()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (db *DialBackoff) cleanup() {
 | 
			
		||||
	db.lock.Lock()
 | 
			
		||||
	defer db.lock.Unlock()
 | 
			
		||||
	now := time.Now()
 | 
			
		||||
	for p, e := range db.entries {
 | 
			
		||||
		good := false
 | 
			
		||||
@ -214,6 +214,7 @@ func (db *DialBackoff) cleanup() {
 | 
			
		||||
			delete(db.entries, p)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	db.lock.Unlock()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// DialPeer connects to a peer. Use network.WithForceDirectDial to force a
 | 
			
		||||
@ -260,7 +261,6 @@ func (s *Swarm) dialPeer(ctx context.Context, p peer.ID) (*Conn, error) {
 | 
			
		||||
 | 
			
		||||
	// apply the DialPeer timeout
 | 
			
		||||
	ctx, cancel := context.WithTimeout(ctx, network.GetDialPeerTimeout(ctx))
 | 
			
		||||
	defer cancel()
 | 
			
		||||
 | 
			
		||||
	conn, err = s.dsync.Dial(ctx, p)
 | 
			
		||||
	if err == nil {
 | 
			
		||||
@ -269,8 +269,10 @@ func (s *Swarm) dialPeer(ctx context.Context, p peer.ID) (*Conn, error) {
 | 
			
		||||
		if conn.RemotePeer() != p {
 | 
			
		||||
			conn.Close()
 | 
			
		||||
			log.Errorw("Handshake failed to properly authenticate peer", "authenticated", conn.RemotePeer(), "expected", p)
 | 
			
		||||
			cancel()
 | 
			
		||||
			return nil, fmt.Errorf("unexpected peer")
 | 
			
		||||
		}
 | 
			
		||||
		cancel()
 | 
			
		||||
		return conn, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@ -278,14 +280,17 @@ func (s *Swarm) dialPeer(ctx context.Context, p peer.ID) (*Conn, error) {
 | 
			
		||||
 | 
			
		||||
	if ctx.Err() != nil {
 | 
			
		||||
		// Context error trumps any dial errors as it was likely the ultimate cause.
 | 
			
		||||
		cancel()
 | 
			
		||||
		return nil, ctx.Err()
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if s.ctx.Err() != nil {
 | 
			
		||||
		// Ok, so the swarm is shutting down.
 | 
			
		||||
		cancel()
 | 
			
		||||
		return nil, ErrSwarmClosed
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	cancel()
 | 
			
		||||
	return nil, err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -105,7 +105,7 @@ func (s *Swarm) AddListenAddr(a ma.Multiaddr) error {
 | 
			
		||||
	})
 | 
			
		||||
 | 
			
		||||
	go func() {
 | 
			
		||||
		defer func() {
 | 
			
		||||
		cleanup := func() {
 | 
			
		||||
			s.listeners.Lock()
 | 
			
		||||
			_, ok := s.listeners.m[list]
 | 
			
		||||
			if ok {
 | 
			
		||||
@ -124,13 +124,14 @@ func (s *Swarm) AddListenAddr(a ma.Multiaddr) error {
 | 
			
		||||
				n.ListenClose(s, maddr)
 | 
			
		||||
			})
 | 
			
		||||
			s.refs.Done()
 | 
			
		||||
		}()
 | 
			
		||||
		}
 | 
			
		||||
		for {
 | 
			
		||||
			c, err := list.Accept()
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				if !errors.Is(err, transport.ErrListenerClosed) {
 | 
			
		||||
					log.Errorf("swarm listener for %s accept error: %s", a, err)
 | 
			
		||||
				}
 | 
			
		||||
				cleanup()
 | 
			
		||||
				return
 | 
			
		||||
			}
 | 
			
		||||
			canonicallog.LogPeerStatus(100, c.RemotePeer(), c.RemoteMultiaddr(), "connection_status", "established", "dir", "inbound")
 | 
			
		||||
@ -141,17 +142,19 @@ func (s *Swarm) AddListenAddr(a ma.Multiaddr) error {
 | 
			
		||||
			log.Debugf("swarm listener accepted connection: %s <-> %s", c.LocalMultiaddr(), c.RemoteMultiaddr())
 | 
			
		||||
			s.refs.Add(1)
 | 
			
		||||
			go func() {
 | 
			
		||||
				defer s.refs.Done()
 | 
			
		||||
				_, err := s.addConn(c, network.DirInbound)
 | 
			
		||||
				switch err {
 | 
			
		||||
				case nil:
 | 
			
		||||
				case ErrSwarmClosed:
 | 
			
		||||
					// ignore.
 | 
			
		||||
					s.refs.Done()
 | 
			
		||||
					return
 | 
			
		||||
				default:
 | 
			
		||||
					log.Warnw("adding connection failed", "to", a, "error", err)
 | 
			
		||||
					s.refs.Done()
 | 
			
		||||
					return
 | 
			
		||||
				}
 | 
			
		||||
				s.refs.Done()
 | 
			
		||||
			}()
 | 
			
		||||
		}
 | 
			
		||||
	}()
 | 
			
		||||
 | 
			
		||||
@ -183,7 +183,6 @@ func appendConnectionState(tags []string, cs network.ConnectionState) []string {
 | 
			
		||||
 | 
			
		||||
func (m *metricsTracer) OpenedConnection(dir network.Direction, p crypto.PubKey, cs network.ConnectionState, laddr ma.Multiaddr) {
 | 
			
		||||
	tags := metricshelper.GetStringSlice()
 | 
			
		||||
	defer metricshelper.PutStringSlice(tags)
 | 
			
		||||
 | 
			
		||||
	*tags = append(*tags, metricshelper.GetDirection(dir))
 | 
			
		||||
	*tags = appendConnectionState(*tags, cs)
 | 
			
		||||
@ -195,11 +194,11 @@ func (m *metricsTracer) OpenedConnection(dir network.Direction, p crypto.PubKey,
 | 
			
		||||
	*tags = append(*tags, metricshelper.GetDirection(dir))
 | 
			
		||||
	*tags = append(*tags, p.Type().String())
 | 
			
		||||
	keyTypes.WithLabelValues(*tags...).Inc()
 | 
			
		||||
	metricshelper.PutStringSlice(tags)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (m *metricsTracer) ClosedConnection(dir network.Direction, duration time.Duration, cs network.ConnectionState, laddr ma.Multiaddr) {
 | 
			
		||||
	tags := metricshelper.GetStringSlice()
 | 
			
		||||
	defer metricshelper.PutStringSlice(tags)
 | 
			
		||||
 | 
			
		||||
	*tags = append(*tags, metricshelper.GetDirection(dir))
 | 
			
		||||
	*tags = appendConnectionState(*tags, cs)
 | 
			
		||||
@ -207,16 +206,17 @@ func (m *metricsTracer) ClosedConnection(dir network.Direction, duration time.Du
 | 
			
		||||
	*tags = append(*tags, ipv)
 | 
			
		||||
	connsClosed.WithLabelValues(*tags...).Inc()
 | 
			
		||||
	connDuration.WithLabelValues(*tags...).Observe(duration.Seconds())
 | 
			
		||||
	metricshelper.PutStringSlice(tags)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (m *metricsTracer) CompletedHandshake(t time.Duration, cs network.ConnectionState, laddr ma.Multiaddr) {
 | 
			
		||||
	tags := metricshelper.GetStringSlice()
 | 
			
		||||
	defer metricshelper.PutStringSlice(tags)
 | 
			
		||||
 | 
			
		||||
	*tags = appendConnectionState(*tags, cs)
 | 
			
		||||
	ipv, _ := metricshelper.GetIPVersion(laddr)
 | 
			
		||||
	*tags = append(*tags, ipv)
 | 
			
		||||
	connHandshakeLatency.WithLabelValues(*tags...).Observe(t.Seconds())
 | 
			
		||||
	metricshelper.PutStringSlice(tags)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (m *metricsTracer) FailedDialing(addr ma.Multiaddr, dialErr error, cause error) {
 | 
			
		||||
@ -246,17 +246,16 @@ func (m *metricsTracer) FailedDialing(addr ma.Multiaddr, dialErr error, cause er
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	tags := metricshelper.GetStringSlice()
 | 
			
		||||
	defer metricshelper.PutStringSlice(tags)
 | 
			
		||||
 | 
			
		||||
	*tags = append(*tags, transport, e)
 | 
			
		||||
	ipv, _ := metricshelper.GetIPVersion(addr)
 | 
			
		||||
	*tags = append(*tags, ipv)
 | 
			
		||||
	dialError.WithLabelValues(*tags...).Inc()
 | 
			
		||||
	metricshelper.PutStringSlice(tags)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (m *metricsTracer) DialCompleted(success bool, totalDials int) {
 | 
			
		||||
	tags := metricshelper.GetStringSlice()
 | 
			
		||||
	defer metricshelper.PutStringSlice(tags)
 | 
			
		||||
	if success {
 | 
			
		||||
		*tags = append(*tags, "success")
 | 
			
		||||
	} else {
 | 
			
		||||
@ -272,6 +271,7 @@ func (m *metricsTracer) DialCompleted(success bool, totalDials int) {
 | 
			
		||||
	}
 | 
			
		||||
	*tags = append(*tags, numDials)
 | 
			
		||||
	dialsPerPeer.WithLabelValues(*tags...).Inc()
 | 
			
		||||
	metricshelper.PutStringSlice(tags)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (m *metricsTracer) DialRankingDelay(d time.Duration) {
 | 
			
		||||
@ -281,11 +281,11 @@ func (m *metricsTracer) DialRankingDelay(d time.Duration) {
 | 
			
		||||
func (m *metricsTracer) UpdatedBlackHoleFilterState(name string, state blackHoleState,
 | 
			
		||||
	nextProbeAfter int, successFraction float64) {
 | 
			
		||||
	tags := metricshelper.GetStringSlice()
 | 
			
		||||
	defer metricshelper.PutStringSlice(tags)
 | 
			
		||||
 | 
			
		||||
	*tags = append(*tags, name)
 | 
			
		||||
 | 
			
		||||
	blackHoleFilterState.WithLabelValues(*tags...).Set(float64(state))
 | 
			
		||||
	blackHoleFilterSuccessFraction.WithLabelValues(*tags...).Set(successFraction)
 | 
			
		||||
	blackHoleFilterNextRequestAllowedAfter.WithLabelValues(*tags...).Set(float64(nextProbeAfter))
 | 
			
		||||
	metricshelper.PutStringSlice(tags)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -93,8 +93,8 @@ func (s *Stream) Reset() error {
 | 
			
		||||
 | 
			
		||||
func (s *Stream) closeAndRemoveStream() {
 | 
			
		||||
	s.closeMx.Lock()
 | 
			
		||||
	defer s.closeMx.Unlock()
 | 
			
		||||
	if s.isClosed {
 | 
			
		||||
		s.closeMx.Unlock()
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
	s.isClosed = true
 | 
			
		||||
@ -104,6 +104,7 @@ func (s *Stream) closeAndRemoveStream() {
 | 
			
		||||
	if s.acceptStreamGoroutineCompleted {
 | 
			
		||||
		s.conn.removeStream(s)
 | 
			
		||||
	}
 | 
			
		||||
	s.closeMx.Unlock()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// CloseWrite closes the stream for writing, flushing all data and sending an EOF.
 | 
			
		||||
@ -121,14 +122,15 @@ func (s *Stream) CloseRead() error {
 | 
			
		||||
 | 
			
		||||
func (s *Stream) completeAcceptStreamGoroutine() {
 | 
			
		||||
	s.closeMx.Lock()
 | 
			
		||||
	defer s.closeMx.Unlock()
 | 
			
		||||
	if s.acceptStreamGoroutineCompleted {
 | 
			
		||||
		s.closeMx.Unlock()
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
	s.acceptStreamGoroutineCompleted = true
 | 
			
		||||
	if s.isClosed {
 | 
			
		||||
		s.conn.removeStream(s)
 | 
			
		||||
	}
 | 
			
		||||
	s.closeMx.Unlock()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Protocol returns the protocol negotiated on this stream (if set).
 | 
			
		||||
 | 
			
		||||
@ -51,8 +51,9 @@ func (t *transportConn) Scope() network.ConnScope {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (t *transportConn) Close() error {
 | 
			
		||||
	defer t.scope.Done()
 | 
			
		||||
	return t.MuxedConn.Close()
 | 
			
		||||
	err := t.MuxedConn.Close()
 | 
			
		||||
	t.scope.Done()
 | 
			
		||||
	return err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (t *transportConn) ConnState() network.ConnectionState {
 | 
			
		||||
 | 
			
		||||
@ -59,7 +59,7 @@ func (l *listener) Close() error {
 | 
			
		||||
//     mechanism while still allowing us to negotiate connections in parallel.
 | 
			
		||||
func (l *listener) handleIncoming() {
 | 
			
		||||
	var wg sync.WaitGroup
 | 
			
		||||
	defer func() {
 | 
			
		||||
	cleanup := func() {
 | 
			
		||||
		// make sure we're closed
 | 
			
		||||
		l.Listener.Close()
 | 
			
		||||
		if l.err == nil {
 | 
			
		||||
@ -68,7 +68,7 @@ func (l *listener) handleIncoming() {
 | 
			
		||||
 | 
			
		||||
		wg.Wait()
 | 
			
		||||
		close(l.incoming)
 | 
			
		||||
	}()
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	var catcher tec.TempErrCatcher
 | 
			
		||||
	for l.ctx.Err() == nil {
 | 
			
		||||
@ -80,6 +80,7 @@ func (l *listener) handleIncoming() {
 | 
			
		||||
				continue
 | 
			
		||||
			}
 | 
			
		||||
			l.err = err
 | 
			
		||||
			cleanup()
 | 
			
		||||
			return
 | 
			
		||||
		}
 | 
			
		||||
		catcher.Reset()
 | 
			
		||||
@ -114,10 +115,7 @@ func (l *listener) handleIncoming() {
 | 
			
		||||
 | 
			
		||||
		wg.Add(1)
 | 
			
		||||
		go func() {
 | 
			
		||||
			defer wg.Done()
 | 
			
		||||
 | 
			
		||||
			ctx, cancel := context.WithTimeout(l.ctx, l.upgrader.acceptTimeout)
 | 
			
		||||
			defer cancel()
 | 
			
		||||
 | 
			
		||||
			conn, err := l.upgrader.Upgrade(ctx, l.transport, maconn, network.DirInbound, "", connScope)
 | 
			
		||||
			if err != nil {
 | 
			
		||||
@ -128,6 +126,8 @@ func (l *listener) handleIncoming() {
 | 
			
		||||
					maconn.LocalMultiaddr(),
 | 
			
		||||
					maconn.RemoteMultiaddr())
 | 
			
		||||
				connScope.Done()
 | 
			
		||||
				wg.Done()
 | 
			
		||||
				cancel()
 | 
			
		||||
				return
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
@ -139,7 +139,6 @@ func (l *listener) handleIncoming() {
 | 
			
		||||
			// simply ensures that calls to Wait block while we're
 | 
			
		||||
			// over the threshold.
 | 
			
		||||
			l.threshold.Acquire()
 | 
			
		||||
			defer l.threshold.Release()
 | 
			
		||||
 | 
			
		||||
			select {
 | 
			
		||||
			case l.incoming <- conn:
 | 
			
		||||
@ -154,8 +153,12 @@ func (l *listener) handleIncoming() {
 | 
			
		||||
				// instead of hanging onto them.
 | 
			
		||||
				conn.Close()
 | 
			
		||||
			}
 | 
			
		||||
			wg.Done()
 | 
			
		||||
			cancel()
 | 
			
		||||
			l.threshold.Release()
 | 
			
		||||
		}()
 | 
			
		||||
	}
 | 
			
		||||
	cleanup()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Accept accepts a connection.
 | 
			
		||||
 | 
			
		||||
@ -122,20 +122,19 @@ func (c *Conn) Stat() network.ConnStats {
 | 
			
		||||
// implicitly because the connection manager closed the underlying relay connection.
 | 
			
		||||
func (c *Conn) tagHop() {
 | 
			
		||||
	c.client.mx.Lock()
 | 
			
		||||
	defer c.client.mx.Unlock()
 | 
			
		||||
 | 
			
		||||
	p := c.stream.Conn().RemotePeer()
 | 
			
		||||
	c.client.hopCount[p]++
 | 
			
		||||
	if c.client.hopCount[p] == 1 {
 | 
			
		||||
		c.client.host.ConnManager().TagPeer(p, "relay-hop-stream", HopTagWeight)
 | 
			
		||||
	}
 | 
			
		||||
	c.client.mx.Unlock()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// untagHop removes the relay-hop-stream tag if necessary; it is invoked when a relayed connection
 | 
			
		||||
// is closed.
 | 
			
		||||
func (c *Conn) untagHop() {
 | 
			
		||||
	c.client.mx.Lock()
 | 
			
		||||
	defer c.client.mx.Unlock()
 | 
			
		||||
 | 
			
		||||
	p := c.stream.Conn().RemotePeer()
 | 
			
		||||
	c.client.hopCount[p]--
 | 
			
		||||
@ -143,6 +142,7 @@ func (c *Conn) untagHop() {
 | 
			
		||||
		c.client.host.ConnManager().UntagPeer(p, "relay-hop-stream")
 | 
			
		||||
		delete(c.client.hopCount, p)
 | 
			
		||||
	}
 | 
			
		||||
	c.client.mx.Unlock()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type capableConnWithStat interface {
 | 
			
		||||
 | 
			
		||||
@ -130,12 +130,15 @@ func (c *Client) dialPeer(ctx context.Context, relay, dest peer.AddrInfo) (*Conn
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	dialCtx, cancel := context.WithTimeout(ctx, DialRelayTimeout)
 | 
			
		||||
	defer cancel()
 | 
			
		||||
 | 
			
		||||
	s, err := c.host.NewStream(dialCtx, relay.ID, proto.ProtoIDv2Hop)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		cancel()
 | 
			
		||||
		return nil, fmt.Errorf("error opening hop stream to relay: %w", err)
 | 
			
		||||
	}
 | 
			
		||||
	return c.connect(s, dest)
 | 
			
		||||
	conn, err := c.connect(s, dest)
 | 
			
		||||
	cancel()
 | 
			
		||||
	return conn, err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *Client) connect(s network.Stream, dest peer.AddrInfo) (*Conn, error) {
 | 
			
		||||
@ -143,11 +146,9 @@ func (c *Client) connect(s network.Stream, dest peer.AddrInfo) (*Conn, error) {
 | 
			
		||||
		s.Reset()
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
	defer s.Scope().ReleaseMemory(maxMessageSize)
 | 
			
		||||
 | 
			
		||||
	rd := util.NewDelimitedReader(s, maxMessageSize)
 | 
			
		||||
	wr := util.NewDelimitedWriter(s)
 | 
			
		||||
	defer rd.Close()
 | 
			
		||||
 | 
			
		||||
	var msg pbv2.HopMessage
 | 
			
		||||
 | 
			
		||||
@ -159,6 +160,8 @@ func (c *Client) connect(s network.Stream, dest peer.AddrInfo) (*Conn, error) {
 | 
			
		||||
	err := wr.WriteMsg(&msg)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		s.Reset()
 | 
			
		||||
		s.Scope().ReleaseMemory(maxMessageSize)
 | 
			
		||||
		rd.Close()
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@ -167,6 +170,8 @@ func (c *Client) connect(s network.Stream, dest peer.AddrInfo) (*Conn, error) {
 | 
			
		||||
	err = rd.ReadMsg(&msg)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		s.Reset()
 | 
			
		||||
		s.Scope().ReleaseMemory(maxMessageSize)
 | 
			
		||||
		rd.Close()
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@ -174,12 +179,16 @@ func (c *Client) connect(s network.Stream, dest peer.AddrInfo) (*Conn, error) {
 | 
			
		||||
 | 
			
		||||
	if msg.GetType() != pbv2.HopMessage_STATUS {
 | 
			
		||||
		s.Reset()
 | 
			
		||||
		s.Scope().ReleaseMemory(maxMessageSize)
 | 
			
		||||
		rd.Close()
 | 
			
		||||
		return nil, newRelayError("unexpected relay response; not a status message (%d)", msg.GetType())
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	status := msg.GetStatus()
 | 
			
		||||
	if status != pbv2.Status_OK {
 | 
			
		||||
		s.Reset()
 | 
			
		||||
		s.Scope().ReleaseMemory(maxMessageSize)
 | 
			
		||||
		rd.Close()
 | 
			
		||||
		return nil, newRelayError("error opening relay circuit: %s (%d)", pbv2.Status_name[int32(status)], status)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@ -193,5 +202,7 @@ func (c *Client) connect(s network.Stream, dest peer.AddrInfo) (*Conn, error) {
 | 
			
		||||
		stat.Extra[StatLimitData] = limit.GetData()
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	s.Scope().ReleaseMemory(maxMessageSize)
 | 
			
		||||
	rd.Close()
 | 
			
		||||
	return &Conn{stream: s, remote: dest, stat: stat, client: c}, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -19,18 +19,18 @@ func (c *Client) handleStreamV2(s network.Stream) {
 | 
			
		||||
	s.SetReadDeadline(time.Now().Add(StreamTimeout))
 | 
			
		||||
 | 
			
		||||
	rd := util.NewDelimitedReader(s, maxMessageSize)
 | 
			
		||||
	defer rd.Close()
 | 
			
		||||
 | 
			
		||||
	writeResponse := func(status pbv2.Status) error {
 | 
			
		||||
		s.SetWriteDeadline(time.Now().Add(StreamTimeout))
 | 
			
		||||
		defer s.SetWriteDeadline(time.Time{})
 | 
			
		||||
		wr := util.NewDelimitedWriter(s)
 | 
			
		||||
 | 
			
		||||
		var msg pbv2.StopMessage
 | 
			
		||||
		msg.Type = pbv2.StopMessage_STATUS.Enum()
 | 
			
		||||
		msg.Status = status.Enum()
 | 
			
		||||
 | 
			
		||||
		return wr.WriteMsg(&msg)
 | 
			
		||||
		err := wr.WriteMsg(&msg)
 | 
			
		||||
		s.SetWriteDeadline(time.Time{})
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	handleError := func(status pbv2.Status) {
 | 
			
		||||
@ -49,6 +49,7 @@ func (c *Client) handleStreamV2(s network.Stream) {
 | 
			
		||||
	err := rd.ReadMsg(&msg)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		handleError(pbv2.Status_MALFORMED_MESSAGE)
 | 
			
		||||
		rd.Close()
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
	// reset stream deadline as message has been read
 | 
			
		||||
@ -56,12 +57,14 @@ func (c *Client) handleStreamV2(s network.Stream) {
 | 
			
		||||
 | 
			
		||||
	if msg.GetType() != pbv2.StopMessage_CONNECT {
 | 
			
		||||
		handleError(pbv2.Status_UNEXPECTED_MESSAGE)
 | 
			
		||||
		rd.Close()
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	src, err := util.PeerToPeerInfoV2(msg.GetPeer())
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		handleError(pbv2.Status_MALFORMED_MESSAGE)
 | 
			
		||||
		rd.Close()
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@ -87,4 +90,5 @@ func (c *Client) handleStreamV2(s network.Stream) {
 | 
			
		||||
	case <-time.After(AcceptTimeout):
 | 
			
		||||
		handleError(pbv2.Status_CONNECTION_FAILED)
 | 
			
		||||
	}
 | 
			
		||||
	rd.Close()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -69,11 +69,9 @@ func Reserve(ctx context.Context, h host.Host, ai peer.AddrInfo) (*Reservation,
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, ReservationError{Status: pbv2.Status_CONNECTION_FAILED, Reason: "failed to open stream", err: err}
 | 
			
		||||
	}
 | 
			
		||||
	defer s.Close()
 | 
			
		||||
 | 
			
		||||
	rd := util.NewDelimitedReader(s, maxMessageSize)
 | 
			
		||||
	wr := util.NewDelimitedWriter(s)
 | 
			
		||||
	defer rd.Close()
 | 
			
		||||
 | 
			
		||||
	var msg pbv2.HopMessage
 | 
			
		||||
	msg.Type = pbv2.HopMessage_RESERVE.Enum()
 | 
			
		||||
@ -82,6 +80,8 @@ func Reserve(ctx context.Context, h host.Host, ai peer.AddrInfo) (*Reservation,
 | 
			
		||||
 | 
			
		||||
	if err := wr.WriteMsg(&msg); err != nil {
 | 
			
		||||
		s.Reset()
 | 
			
		||||
		s.Close()
 | 
			
		||||
		rd.Close()
 | 
			
		||||
		return nil, ReservationError{Status: pbv2.Status_CONNECTION_FAILED, Reason: "error writing reservation message", err: err}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@ -89,10 +89,14 @@ func Reserve(ctx context.Context, h host.Host, ai peer.AddrInfo) (*Reservation,
 | 
			
		||||
 | 
			
		||||
	if err := rd.ReadMsg(&msg); err != nil {
 | 
			
		||||
		s.Reset()
 | 
			
		||||
		s.Close()
 | 
			
		||||
		rd.Close()
 | 
			
		||||
		return nil, ReservationError{Status: pbv2.Status_CONNECTION_FAILED, Reason: "error reading reservation response message: %w", err: err}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if msg.GetType() != pbv2.HopMessage_STATUS {
 | 
			
		||||
		s.Close()
 | 
			
		||||
		rd.Close()
 | 
			
		||||
		return nil, ReservationError{
 | 
			
		||||
			Status: pbv2.Status_MALFORMED_MESSAGE,
 | 
			
		||||
			Reason: fmt.Sprintf("unexpected relay response: not a status message (%d)", msg.GetType()),
 | 
			
		||||
@ -100,17 +104,23 @@ func Reserve(ctx context.Context, h host.Host, ai peer.AddrInfo) (*Reservation,
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if status := msg.GetStatus(); status != pbv2.Status_OK {
 | 
			
		||||
		s.Close()
 | 
			
		||||
		rd.Close()
 | 
			
		||||
		return nil, ReservationError{Status: msg.GetStatus(), Reason: "reservation failed"}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	rsvp := msg.GetReservation()
 | 
			
		||||
	if rsvp == nil {
 | 
			
		||||
		s.Close()
 | 
			
		||||
		rd.Close()
 | 
			
		||||
		return nil, ReservationError{Status: pbv2.Status_MALFORMED_MESSAGE, Reason: "missing reservation info"}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	result := &Reservation{}
 | 
			
		||||
	result.Expiration = time.Unix(int64(rsvp.GetExpire()), 0)
 | 
			
		||||
	if result.Expiration.Before(time.Now()) {
 | 
			
		||||
		s.Close()
 | 
			
		||||
		rd.Close()
 | 
			
		||||
		return nil, ReservationError{
 | 
			
		||||
			Status: pbv2.Status_MALFORMED_MESSAGE,
 | 
			
		||||
			Reason: fmt.Sprintf("received reservation with expiration date in the past: %s", result.Expiration),
 | 
			
		||||
@ -132,6 +142,8 @@ func Reserve(ctx context.Context, h host.Host, ai peer.AddrInfo) (*Reservation,
 | 
			
		||||
	if voucherBytes != nil {
 | 
			
		||||
		_, rec, err := record.ConsumeEnvelope(voucherBytes, proto.RecordDomain)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			s.Close()
 | 
			
		||||
			rd.Close()
 | 
			
		||||
			return nil, ReservationError{
 | 
			
		||||
				Status: pbv2.Status_MALFORMED_MESSAGE,
 | 
			
		||||
				Reason: fmt.Sprintf("error consuming voucher envelope: %s", err),
 | 
			
		||||
@ -141,6 +153,8 @@ func Reserve(ctx context.Context, h host.Host, ai peer.AddrInfo) (*Reservation,
 | 
			
		||||
 | 
			
		||||
		voucher, ok := rec.(*proto.ReservationVoucher)
 | 
			
		||||
		if !ok {
 | 
			
		||||
			s.Close()
 | 
			
		||||
			rd.Close()
 | 
			
		||||
			return nil, ReservationError{
 | 
			
		||||
				Status: pbv2.Status_MALFORMED_MESSAGE,
 | 
			
		||||
				Reason: fmt.Sprintf("unexpected voucher record type: %+T", rec),
 | 
			
		||||
@ -155,5 +169,7 @@ func Reserve(ctx context.Context, h host.Host, ai peer.AddrInfo) (*Reservation,
 | 
			
		||||
		result.LimitData = limit.GetData()
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	s.Close()
 | 
			
		||||
	rd.Close()
 | 
			
		||||
	return result, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -48,27 +48,30 @@ func newConstraints(rc *Resources) *constraints {
 | 
			
		||||
// If adding this reservation violates IP constraints, an error is returned.
 | 
			
		||||
func (c *constraints) AddReservation(p peer.ID, a ma.Multiaddr) error {
 | 
			
		||||
	c.mutex.Lock()
 | 
			
		||||
	defer c.mutex.Unlock()
 | 
			
		||||
 | 
			
		||||
	now := time.Now()
 | 
			
		||||
	c.cleanup(now)
 | 
			
		||||
 | 
			
		||||
	if len(c.total) >= c.rc.MaxReservations {
 | 
			
		||||
		c.mutex.Unlock()
 | 
			
		||||
		return errTooManyReservations
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	ip, err := manet.ToIP(a)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		c.mutex.Unlock()
 | 
			
		||||
		return errors.New("no IP address associated with peer")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	peerReservations := c.peers[p]
 | 
			
		||||
	if len(peerReservations) >= c.rc.MaxReservationsPerPeer {
 | 
			
		||||
		c.mutex.Unlock()
 | 
			
		||||
		return errTooManyReservationsForPeer
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	ipReservations := c.ips[ip.String()]
 | 
			
		||||
	if len(ipReservations) >= c.rc.MaxReservationsPerIP {
 | 
			
		||||
		c.mutex.Unlock()
 | 
			
		||||
		return errTooManyReservationsForIP
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@ -79,6 +82,7 @@ func (c *constraints) AddReservation(p peer.ID, a ma.Multiaddr) error {
 | 
			
		||||
		if asn != 0 {
 | 
			
		||||
			asnReservations = c.asns[asn]
 | 
			
		||||
			if len(asnReservations) >= c.rc.MaxReservationsPerASN {
 | 
			
		||||
				c.mutex.Unlock()
 | 
			
		||||
				return errTooManyReservationsForASN
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
@ -97,6 +101,7 @@ func (c *constraints) AddReservation(p peer.ID, a ma.Multiaddr) error {
 | 
			
		||||
		asnReservations = append(asnReservations, expiry)
 | 
			
		||||
		c.asns[asn] = asnReservations
 | 
			
		||||
	}
 | 
			
		||||
	c.mutex.Unlock()
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -163,24 +163,23 @@ func (mt *metricsTracer) RelayStatus(enabled bool) {
 | 
			
		||||
 | 
			
		||||
func (mt *metricsTracer) ConnectionOpened() {
 | 
			
		||||
	tags := metricshelper.GetStringSlice()
 | 
			
		||||
	defer metricshelper.PutStringSlice(tags)
 | 
			
		||||
	*tags = append(*tags, "opened")
 | 
			
		||||
 | 
			
		||||
	connectionsTotal.WithLabelValues(*tags...).Add(1)
 | 
			
		||||
	metricshelper.PutStringSlice(tags)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (mt *metricsTracer) ConnectionClosed(d time.Duration) {
 | 
			
		||||
	tags := metricshelper.GetStringSlice()
 | 
			
		||||
	defer metricshelper.PutStringSlice(tags)
 | 
			
		||||
	*tags = append(*tags, "closed")
 | 
			
		||||
 | 
			
		||||
	connectionsTotal.WithLabelValues(*tags...).Add(1)
 | 
			
		||||
	connectionDurationSeconds.Observe(d.Seconds())
 | 
			
		||||
	metricshelper.PutStringSlice(tags)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (mt *metricsTracer) ConnectionRequestHandled(status pbv2.Status) {
 | 
			
		||||
	tags := metricshelper.GetStringSlice()
 | 
			
		||||
	defer metricshelper.PutStringSlice(tags)
 | 
			
		||||
 | 
			
		||||
	respStatus := getResponseStatus(status)
 | 
			
		||||
 | 
			
		||||
@ -191,11 +190,11 @@ func (mt *metricsTracer) ConnectionRequestHandled(status pbv2.Status) {
 | 
			
		||||
		*tags = append(*tags, getRejectionReason(status))
 | 
			
		||||
		connectionRejectionsTotal.WithLabelValues(*tags...).Add(1)
 | 
			
		||||
	}
 | 
			
		||||
	metricshelper.PutStringSlice(tags)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (mt *metricsTracer) ReservationAllowed(isRenewal bool) {
 | 
			
		||||
	tags := metricshelper.GetStringSlice()
 | 
			
		||||
	defer metricshelper.PutStringSlice(tags)
 | 
			
		||||
	if isRenewal {
 | 
			
		||||
		*tags = append(*tags, "renewed")
 | 
			
		||||
	} else {
 | 
			
		||||
@ -203,19 +202,19 @@ func (mt *metricsTracer) ReservationAllowed(isRenewal bool) {
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	reservationsTotal.WithLabelValues(*tags...).Add(1)
 | 
			
		||||
	metricshelper.PutStringSlice(tags)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (mt *metricsTracer) ReservationClosed(cnt int) {
 | 
			
		||||
	tags := metricshelper.GetStringSlice()
 | 
			
		||||
	defer metricshelper.PutStringSlice(tags)
 | 
			
		||||
	*tags = append(*tags, "closed")
 | 
			
		||||
 | 
			
		||||
	reservationsTotal.WithLabelValues(*tags...).Add(float64(cnt))
 | 
			
		||||
	metricshelper.PutStringSlice(tags)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (mt *metricsTracer) ReservationRequestHandled(status pbv2.Status) {
 | 
			
		||||
	tags := metricshelper.GetStringSlice()
 | 
			
		||||
	defer metricshelper.PutStringSlice(tags)
 | 
			
		||||
 | 
			
		||||
	respStatus := getResponseStatus(status)
 | 
			
		||||
 | 
			
		||||
@ -226,6 +225,7 @@ func (mt *metricsTracer) ReservationRequestHandled(status pbv2.Status) {
 | 
			
		||||
		*tags = append(*tags, getRejectionReason(status))
 | 
			
		||||
		reservationRejectionsTotal.WithLabelValues(*tags...).Add(1)
 | 
			
		||||
	}
 | 
			
		||||
	metricshelper.PutStringSlice(tags)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (mt *metricsTracer) BytesTransferred(cnt int) {
 | 
			
		||||
 | 
			
		||||
@ -146,10 +146,8 @@ func (r *Relay) handleStream(s network.Stream) {
 | 
			
		||||
		s.Reset()
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
	defer s.Scope().ReleaseMemory(maxMessageSize)
 | 
			
		||||
 | 
			
		||||
	rd := util.NewDelimitedReader(s, maxMessageSize)
 | 
			
		||||
	defer rd.Close()
 | 
			
		||||
 | 
			
		||||
	s.SetReadDeadline(time.Now().Add(StreamTimeout))
 | 
			
		||||
 | 
			
		||||
@ -158,6 +156,8 @@ func (r *Relay) handleStream(s network.Stream) {
 | 
			
		||||
	err := rd.ReadMsg(&msg)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		r.handleError(s, pbv2.Status_MALFORMED_MESSAGE)
 | 
			
		||||
		s.Scope().ReleaseMemory(maxMessageSize)
 | 
			
		||||
		rd.Close()
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
	// reset stream deadline as message has been read
 | 
			
		||||
@ -176,22 +176,25 @@ func (r *Relay) handleStream(s network.Stream) {
 | 
			
		||||
	default:
 | 
			
		||||
		r.handleError(s, pbv2.Status_MALFORMED_MESSAGE)
 | 
			
		||||
	}
 | 
			
		||||
	s.Scope().ReleaseMemory(maxMessageSize)
 | 
			
		||||
	rd.Close()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (r *Relay) handleReserve(s network.Stream) pbv2.Status {
 | 
			
		||||
	defer s.Close()
 | 
			
		||||
	p := s.Conn().RemotePeer()
 | 
			
		||||
	a := s.Conn().RemoteMultiaddr()
 | 
			
		||||
 | 
			
		||||
	if isRelayAddr(a) {
 | 
			
		||||
		log.Debugf("refusing relay reservation for %s; reservation attempt over relay connection")
 | 
			
		||||
		r.handleError(s, pbv2.Status_PERMISSION_DENIED)
 | 
			
		||||
		s.Close()
 | 
			
		||||
		return pbv2.Status_PERMISSION_DENIED
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if r.acl != nil && !r.acl.AllowReserve(p, a) {
 | 
			
		||||
		log.Debugf("refusing relay reservation for %s; permission denied", p)
 | 
			
		||||
		r.handleError(s, pbv2.Status_PERMISSION_DENIED)
 | 
			
		||||
		s.Close()
 | 
			
		||||
		return pbv2.Status_PERMISSION_DENIED
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@ -202,6 +205,7 @@ func (r *Relay) handleReserve(s network.Stream) pbv2.Status {
 | 
			
		||||
		r.mx.Unlock()
 | 
			
		||||
		log.Debugf("refusing relay reservation for %s; relay closed", p)
 | 
			
		||||
		r.handleError(s, pbv2.Status_PERMISSION_DENIED)
 | 
			
		||||
		s.Close()
 | 
			
		||||
		return pbv2.Status_PERMISSION_DENIED
 | 
			
		||||
	}
 | 
			
		||||
	now := time.Now()
 | 
			
		||||
@ -212,6 +216,7 @@ func (r *Relay) handleReserve(s network.Stream) pbv2.Status {
 | 
			
		||||
			r.mx.Unlock()
 | 
			
		||||
			log.Debugf("refusing relay reservation for %s; IP constraint violation: %s", p, err)
 | 
			
		||||
			r.handleError(s, pbv2.Status_RESERVATION_REFUSED)
 | 
			
		||||
			s.Close()
 | 
			
		||||
			return pbv2.Status_RESERVATION_REFUSED
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
@ -232,8 +237,10 @@ func (r *Relay) handleReserve(s network.Stream) pbv2.Status {
 | 
			
		||||
	if err := r.writeResponse(s, pbv2.Status_OK, r.makeReservationMsg(p, expire), r.makeLimitMsg(p)); err != nil {
 | 
			
		||||
		log.Debugf("error writing reservation response; retracting reservation for %s", p)
 | 
			
		||||
		s.Reset()
 | 
			
		||||
		s.Close()
 | 
			
		||||
		return pbv2.Status_CONNECTION_FAILED
 | 
			
		||||
	}
 | 
			
		||||
	s.Close()
 | 
			
		||||
	return pbv2.Status_OK
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -324,7 +331,6 @@ func (r *Relay) handleConnect(s network.Stream, msg *pbv2.HopMessage) pbv2.Statu
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	ctx, cancel := context.WithTimeout(r.ctx, ConnectTimeout)
 | 
			
		||||
	defer cancel()
 | 
			
		||||
 | 
			
		||||
	ctx = network.WithNoDial(ctx, "relay connect")
 | 
			
		||||
 | 
			
		||||
@ -333,6 +339,7 @@ func (r *Relay) handleConnect(s network.Stream, msg *pbv2.HopMessage) pbv2.Statu
 | 
			
		||||
		log.Debugf("error opening relay stream to %s: %s", dest.ID, err)
 | 
			
		||||
		cleanup()
 | 
			
		||||
		r.handleError(s, pbv2.Status_CONNECTION_FAILED)
 | 
			
		||||
		cancel()
 | 
			
		||||
		return pbv2.Status_CONNECTION_FAILED
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@ -345,6 +352,7 @@ func (r *Relay) handleConnect(s network.Stream, msg *pbv2.HopMessage) pbv2.Statu
 | 
			
		||||
	if err := bs.Scope().SetService(ServiceName); err != nil {
 | 
			
		||||
		log.Debugf("error attaching stream to relay service: %s", err)
 | 
			
		||||
		fail(pbv2.Status_RESOURCE_LIMIT_EXCEEDED)
 | 
			
		||||
		cancel()
 | 
			
		||||
		return pbv2.Status_RESOURCE_LIMIT_EXCEEDED
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@ -352,13 +360,12 @@ func (r *Relay) handleConnect(s network.Stream, msg *pbv2.HopMessage) pbv2.Statu
 | 
			
		||||
	if err := bs.Scope().ReserveMemory(maxMessageSize, network.ReservationPriorityAlways); err != nil {
 | 
			
		||||
		log.Debugf("error reserving memory for stream: %s", err)
 | 
			
		||||
		fail(pbv2.Status_RESOURCE_LIMIT_EXCEEDED)
 | 
			
		||||
		cancel()
 | 
			
		||||
		return pbv2.Status_RESOURCE_LIMIT_EXCEEDED
 | 
			
		||||
	}
 | 
			
		||||
	defer bs.Scope().ReleaseMemory(maxMessageSize)
 | 
			
		||||
 | 
			
		||||
	rd := util.NewDelimitedReader(bs, maxMessageSize)
 | 
			
		||||
	wr := util.NewDelimitedWriter(bs)
 | 
			
		||||
	defer rd.Close()
 | 
			
		||||
 | 
			
		||||
	var stopmsg pbv2.StopMessage
 | 
			
		||||
	stopmsg.Type = pbv2.StopMessage_CONNECT.Enum()
 | 
			
		||||
@ -371,6 +378,10 @@ func (r *Relay) handleConnect(s network.Stream, msg *pbv2.HopMessage) pbv2.Statu
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		log.Debugf("error writing stop handshake")
 | 
			
		||||
		fail(pbv2.Status_CONNECTION_FAILED)
 | 
			
		||||
 | 
			
		||||
		cancel()
 | 
			
		||||
		bs.Scope().ReleaseMemory(maxMessageSize)
 | 
			
		||||
		rd.Close()
 | 
			
		||||
		return pbv2.Status_CONNECTION_FAILED
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@ -380,18 +391,30 @@ func (r *Relay) handleConnect(s network.Stream, msg *pbv2.HopMessage) pbv2.Statu
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		log.Debugf("error reading stop response: %s", err.Error())
 | 
			
		||||
		fail(pbv2.Status_CONNECTION_FAILED)
 | 
			
		||||
 | 
			
		||||
		cancel()
 | 
			
		||||
		bs.Scope().ReleaseMemory(maxMessageSize)
 | 
			
		||||
		rd.Close()
 | 
			
		||||
		return pbv2.Status_CONNECTION_FAILED
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if t := stopmsg.GetType(); t != pbv2.StopMessage_STATUS {
 | 
			
		||||
		log.Debugf("unexpected stop response; not a status message (%d)", t)
 | 
			
		||||
		fail(pbv2.Status_CONNECTION_FAILED)
 | 
			
		||||
 | 
			
		||||
		cancel()
 | 
			
		||||
		bs.Scope().ReleaseMemory(maxMessageSize)
 | 
			
		||||
		rd.Close()
 | 
			
		||||
		return pbv2.Status_CONNECTION_FAILED
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if status := stopmsg.GetStatus(); status != pbv2.Status_OK {
 | 
			
		||||
		log.Debugf("relay stop failure: %d", status)
 | 
			
		||||
		fail(pbv2.Status_CONNECTION_FAILED)
 | 
			
		||||
 | 
			
		||||
		cancel()
 | 
			
		||||
		bs.Scope().ReleaseMemory(maxMessageSize)
 | 
			
		||||
		rd.Close()
 | 
			
		||||
		return pbv2.Status_CONNECTION_FAILED
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@ -407,6 +430,10 @@ func (r *Relay) handleConnect(s network.Stream, msg *pbv2.HopMessage) pbv2.Statu
 | 
			
		||||
		bs.Reset()
 | 
			
		||||
		s.Reset()
 | 
			
		||||
		cleanup()
 | 
			
		||||
 | 
			
		||||
		cancel()
 | 
			
		||||
		bs.Scope().ReleaseMemory(maxMessageSize)
 | 
			
		||||
		rd.Close()
 | 
			
		||||
		return pbv2.Status_CONNECTION_FAILED
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@ -437,6 +464,9 @@ func (r *Relay) handleConnect(s network.Stream, msg *pbv2.HopMessage) pbv2.Statu
 | 
			
		||||
		go r.relayUnlimited(bs, s, dest.ID, src, done)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	cancel()
 | 
			
		||||
	bs.Scope().ReleaseMemory(maxMessageSize)
 | 
			
		||||
	rd.Close()
 | 
			
		||||
	return pbv2.Status_OK
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -461,10 +491,7 @@ func (r *Relay) rmConn(p peer.ID) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (r *Relay) relayLimited(src, dest network.Stream, srcID, destID peer.ID, limit int64, done func()) {
 | 
			
		||||
	defer done()
 | 
			
		||||
 | 
			
		||||
	buf := pool.Get(r.rc.BufferSize)
 | 
			
		||||
	defer pool.Put(buf)
 | 
			
		||||
 | 
			
		||||
	limitedSrc := io.LimitReader(src, limit)
 | 
			
		||||
 | 
			
		||||
@ -484,13 +511,13 @@ func (r *Relay) relayLimited(src, dest network.Stream, srcID, destID peer.ID, li
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	log.Debugf("relayed %d bytes from %s to %s", count, srcID, destID)
 | 
			
		||||
 | 
			
		||||
	done()
 | 
			
		||||
	pool.Put(buf)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (r *Relay) relayUnlimited(src, dest network.Stream, srcID, destID peer.ID, done func()) {
 | 
			
		||||
	defer done()
 | 
			
		||||
 | 
			
		||||
	buf := pool.Get(r.rc.BufferSize)
 | 
			
		||||
	defer pool.Put(buf)
 | 
			
		||||
 | 
			
		||||
	count, err := r.copyWithBuffer(dest, src, buf)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
@ -504,6 +531,9 @@ func (r *Relay) relayUnlimited(src, dest network.Stream, srcID, destID peer.ID,
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	log.Debugf("relayed %d bytes from %s to %s", count, srcID, destID)
 | 
			
		||||
 | 
			
		||||
	done()
 | 
			
		||||
	pool.Put(buf)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// errInvalidWrite means that a write returned an impossible count.
 | 
			
		||||
@ -560,7 +590,6 @@ func (r *Relay) handleError(s network.Stream, status pbv2.Status) {
 | 
			
		||||
 | 
			
		||||
func (r *Relay) writeResponse(s network.Stream, status pbv2.Status, rsvp *pbv2.Reservation, limit *pbv2.Limit) error {
 | 
			
		||||
	s.SetWriteDeadline(time.Now().Add(StreamTimeout))
 | 
			
		||||
	defer s.SetWriteDeadline(time.Time{})
 | 
			
		||||
	wr := util.NewDelimitedWriter(s)
 | 
			
		||||
 | 
			
		||||
	var msg pbv2.HopMessage
 | 
			
		||||
@ -569,7 +598,9 @@ func (r *Relay) writeResponse(s network.Stream, status pbv2.Status, rsvp *pbv2.R
 | 
			
		||||
	msg.Reservation = rsvp
 | 
			
		||||
	msg.Limit = limit
 | 
			
		||||
 | 
			
		||||
	return wr.WriteMsg(&msg)
 | 
			
		||||
	err := wr.WriteMsg(&msg)
 | 
			
		||||
	s.SetWriteDeadline(time.Time{})
 | 
			
		||||
	return err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (r *Relay) makeReservationMsg(p peer.ID, expire time.Time) *pbv2.Reservation {
 | 
			
		||||
@ -629,13 +660,13 @@ func (r *Relay) makeLimitMsg(p peer.ID) *pbv2.Limit {
 | 
			
		||||
 | 
			
		||||
func (r *Relay) background() {
 | 
			
		||||
	ticker := time.NewTicker(time.Minute)
 | 
			
		||||
	defer ticker.Stop()
 | 
			
		||||
 | 
			
		||||
	for {
 | 
			
		||||
		select {
 | 
			
		||||
		case <-ticker.C:
 | 
			
		||||
			r.gc()
 | 
			
		||||
		case <-r.ctx.Done():
 | 
			
		||||
			ticker.Stop()
 | 
			
		||||
			return
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
@ -643,7 +674,6 @@ func (r *Relay) background() {
 | 
			
		||||
 | 
			
		||||
func (r *Relay) gc() {
 | 
			
		||||
	r.mx.Lock()
 | 
			
		||||
	defer r.mx.Unlock()
 | 
			
		||||
 | 
			
		||||
	now := time.Now()
 | 
			
		||||
	cnt := 0
 | 
			
		||||
@ -663,6 +693,7 @@ func (r *Relay) gc() {
 | 
			
		||||
			delete(r.conns, p)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	r.mx.Unlock()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (r *Relay) disconnected(n network.Network, c network.Conn) {
 | 
			
		||||
 | 
			
		||||
@ -67,18 +67,21 @@ func newHolePuncher(h host.Host, ids identify.IDService, tracer *tracer, filter
 | 
			
		||||
 | 
			
		||||
func (hp *holePuncher) beginDirectConnect(p peer.ID) error {
 | 
			
		||||
	hp.closeMx.RLock()
 | 
			
		||||
	defer hp.closeMx.RUnlock()
 | 
			
		||||
	if hp.closed {
 | 
			
		||||
		hp.closeMx.RUnlock()
 | 
			
		||||
		return ErrClosed
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	hp.activeMx.Lock()
 | 
			
		||||
	defer hp.activeMx.Unlock()
 | 
			
		||||
	if _, ok := hp.active[p]; ok {
 | 
			
		||||
		hp.activeMx.Unlock()
 | 
			
		||||
		hp.closeMx.RUnlock()
 | 
			
		||||
		return ErrHolePunchActive
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	hp.active[p] = struct{}{}
 | 
			
		||||
	hp.activeMx.Unlock()
 | 
			
		||||
	hp.closeMx.RUnlock()
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -90,13 +93,11 @@ func (hp *holePuncher) DirectConnect(p peer.ID) error {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	defer func() {
 | 
			
		||||
		hp.activeMx.Lock()
 | 
			
		||||
		delete(hp.active, p)
 | 
			
		||||
		hp.activeMx.Unlock()
 | 
			
		||||
	}()
 | 
			
		||||
 | 
			
		||||
	return hp.directConnect(p)
 | 
			
		||||
	err := hp.directConnect(p)
 | 
			
		||||
	hp.activeMx.Lock()
 | 
			
		||||
	delete(hp.active, p)
 | 
			
		||||
	hp.activeMx.Unlock()
 | 
			
		||||
	return err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (hp *holePuncher) directConnect(rp peer.ID) error {
 | 
			
		||||
@ -181,14 +182,15 @@ func (hp *holePuncher) initiateHolePunch(rp peer.ID) ([]ma.Multiaddr, []ma.Multi
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, nil, 0, fmt.Errorf("failed to open hole-punching stream: %w", err)
 | 
			
		||||
	}
 | 
			
		||||
	defer str.Close()
 | 
			
		||||
 | 
			
		||||
	addr, obsAddr, rtt, err := hp.initiateHolePunchImpl(str)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		log.Debugf("%s", err)
 | 
			
		||||
		str.Reset()
 | 
			
		||||
		str.Close()
 | 
			
		||||
		return addr, obsAddr, rtt, err
 | 
			
		||||
	}
 | 
			
		||||
	str.Close()
 | 
			
		||||
	return addr, obsAddr, rtt, err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -200,7 +202,6 @@ func (hp *holePuncher) initiateHolePunchImpl(str network.Stream) ([]ma.Multiaddr
 | 
			
		||||
	if err := str.Scope().ReserveMemory(maxMsgSize, network.ReservationPriorityAlways); err != nil {
 | 
			
		||||
		return nil, nil, 0, fmt.Errorf("error reserving memory for stream: %s", err)
 | 
			
		||||
	}
 | 
			
		||||
	defer str.Scope().ReleaseMemory(maxMsgSize)
 | 
			
		||||
 | 
			
		||||
	w := pbio.NewDelimitedWriter(str)
 | 
			
		||||
	rd := pbio.NewDelimitedReader(str, maxMsgSize)
 | 
			
		||||
@ -213,6 +214,7 @@ func (hp *holePuncher) initiateHolePunchImpl(str network.Stream) ([]ma.Multiaddr
 | 
			
		||||
		obsAddrs = hp.filter.FilterLocal(str.Conn().RemotePeer(), obsAddrs)
 | 
			
		||||
	}
 | 
			
		||||
	if len(obsAddrs) == 0 {
 | 
			
		||||
		str.Scope().ReleaseMemory(maxMsgSize)
 | 
			
		||||
		return nil, nil, 0, errors.New("aborting hole punch initiation as we have no public address")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@ -222,16 +224,19 @@ func (hp *holePuncher) initiateHolePunchImpl(str network.Stream) ([]ma.Multiaddr
 | 
			
		||||
		ObsAddrs: addrsToBytes(obsAddrs),
 | 
			
		||||
	}); err != nil {
 | 
			
		||||
		str.Reset()
 | 
			
		||||
		str.Scope().ReleaseMemory(maxMsgSize)
 | 
			
		||||
		return nil, nil, 0, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// wait for a CONNECT message from the remote peer
 | 
			
		||||
	var msg pb.HolePunch
 | 
			
		||||
	if err := rd.ReadMsg(&msg); err != nil {
 | 
			
		||||
		str.Scope().ReleaseMemory(maxMsgSize)
 | 
			
		||||
		return nil, nil, 0, fmt.Errorf("failed to read CONNECT message from remote peer: %w", err)
 | 
			
		||||
	}
 | 
			
		||||
	rtt := time.Since(start)
 | 
			
		||||
	if t := msg.GetType(); t != pb.HolePunch_CONNECT {
 | 
			
		||||
		str.Scope().ReleaseMemory(maxMsgSize)
 | 
			
		||||
		return nil, nil, 0, fmt.Errorf("expect CONNECT message, got %s", t)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@ -241,12 +246,15 @@ func (hp *holePuncher) initiateHolePunchImpl(str network.Stream) ([]ma.Multiaddr
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if len(addrs) == 0 {
 | 
			
		||||
		str.Scope().ReleaseMemory(maxMsgSize)
 | 
			
		||||
		return nil, nil, 0, errors.New("didn't receive any public addresses in CONNECT")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if err := w.WriteMsg(&pb.HolePunch{Type: pb.HolePunch_SYNC.Enum()}); err != nil {
 | 
			
		||||
		str.Scope().ReleaseMemory(maxMsgSize)
 | 
			
		||||
		return nil, nil, 0, fmt.Errorf("failed to send SYNC message for hole punching: %w", err)
 | 
			
		||||
	}
 | 
			
		||||
	str.Scope().ReleaseMemory(maxMsgSize)
 | 
			
		||||
	return addrs, obsAddrs, rtt, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -269,17 +277,17 @@ func (nn *netNotifiee) Connected(_ network.Network, conn network.Conn) {
 | 
			
		||||
	if conn.Stat().Direction == network.DirInbound && isRelayAddress(conn.RemoteMultiaddr()) {
 | 
			
		||||
		hs.refCount.Add(1)
 | 
			
		||||
		go func() {
 | 
			
		||||
			defer hs.refCount.Done()
 | 
			
		||||
 | 
			
		||||
			select {
 | 
			
		||||
			// waiting for Identify here will allow us to access the peer's public and observed addresses
 | 
			
		||||
			// that we can dial to for a hole punch.
 | 
			
		||||
			case <-hs.ids.IdentifyWait(conn):
 | 
			
		||||
			case <-hs.ctx.Done():
 | 
			
		||||
				hs.refCount.Done()
 | 
			
		||||
				return
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			_ = hs.DirectConnect(conn.RemotePeer())
 | 
			
		||||
			hs.refCount.Done()
 | 
			
		||||
		}()
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -108,7 +108,6 @@ func NewMetricsTracer(opts ...MetricsTracerOption) MetricsTracer {
 | 
			
		||||
func (mt *metricsTracer) HolePunchFinished(side string, numAttempts int,
 | 
			
		||||
	remoteAddrs []ma.Multiaddr, localAddrs []ma.Multiaddr, directConn network.ConnMultiaddrs) {
 | 
			
		||||
	tags := metricshelper.GetStringSlice()
 | 
			
		||||
	defer metricshelper.PutStringSlice(tags)
 | 
			
		||||
 | 
			
		||||
	*tags = append(*tags, side, getNumAttemptString(numAttempts))
 | 
			
		||||
	var dipv, dtransport string
 | 
			
		||||
@ -165,6 +164,7 @@ func (mt *metricsTracer) HolePunchFinished(side string, numAttempts int,
 | 
			
		||||
 | 
			
		||||
	*tags = append(*tags, outcome)
 | 
			
		||||
	hpOutcomesTotal.WithLabelValues(*tags...).Inc()
 | 
			
		||||
	metricshelper.PutStringSlice(tags)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func getNumAttemptString(numAttempt int) string {
 | 
			
		||||
@ -177,11 +177,11 @@ func getNumAttemptString(numAttempt int) string {
 | 
			
		||||
 | 
			
		||||
func (mt *metricsTracer) DirectDialFinished(success bool) {
 | 
			
		||||
	tags := metricshelper.GetStringSlice()
 | 
			
		||||
	defer metricshelper.PutStringSlice(tags)
 | 
			
		||||
	if success {
 | 
			
		||||
		*tags = append(*tags, "success")
 | 
			
		||||
	} else {
 | 
			
		||||
		*tags = append(*tags, "failed")
 | 
			
		||||
	}
 | 
			
		||||
	directDialsTotal.WithLabelValues(*tags...).Inc()
 | 
			
		||||
	metricshelper.PutStringSlice(tags)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -93,8 +93,6 @@ func NewService(h host.Host, ids identify.IDService, opts ...Option) (*Service,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (s *Service) watchForPublicAddr() {
 | 
			
		||||
	defer s.refCount.Done()
 | 
			
		||||
 | 
			
		||||
	log.Debug("waiting until we have at least one public address", "peer", s.host.ID())
 | 
			
		||||
 | 
			
		||||
	// TODO: We should have an event here that fires when identify discovers a new
 | 
			
		||||
@ -104,7 +102,6 @@ func (s *Service) watchForPublicAddr() {
 | 
			
		||||
	duration := 250 * time.Millisecond
 | 
			
		||||
	const maxDuration = 5 * time.Second
 | 
			
		||||
	t := time.NewTimer(duration)
 | 
			
		||||
	defer t.Stop()
 | 
			
		||||
	for {
 | 
			
		||||
		if containsPublicAddr(s.ids.OwnObservedAddrs()) {
 | 
			
		||||
			log.Debug("Host now has a public address. Starting holepunch protocol.")
 | 
			
		||||
@ -114,6 +111,8 @@ func (s *Service) watchForPublicAddr() {
 | 
			
		||||
 | 
			
		||||
		select {
 | 
			
		||||
		case <-s.ctx.Done():
 | 
			
		||||
			s.refCount.Done()
 | 
			
		||||
			t.Stop()
 | 
			
		||||
			return
 | 
			
		||||
		case <-t.C:
 | 
			
		||||
			duration *= 2
 | 
			
		||||
@ -128,15 +127,22 @@ func (s *Service) watchForPublicAddr() {
 | 
			
		||||
	sub, err := s.host.EventBus().Subscribe(&event.EvtLocalReachabilityChanged{}, eventbus.Name("holepunch"))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		log.Debugf("failed to subscripe to Reachability event: %s", err)
 | 
			
		||||
		s.refCount.Done()
 | 
			
		||||
		t.Stop()
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
	defer sub.Close()
 | 
			
		||||
	for {
 | 
			
		||||
		select {
 | 
			
		||||
		case <-s.ctx.Done():
 | 
			
		||||
			s.refCount.Done()
 | 
			
		||||
			t.Stop()
 | 
			
		||||
			sub.Close()
 | 
			
		||||
			return
 | 
			
		||||
		case e, ok := <-sub.Out():
 | 
			
		||||
			if !ok {
 | 
			
		||||
				s.refCount.Done()
 | 
			
		||||
				t.Stop()
 | 
			
		||||
				sub.Close()
 | 
			
		||||
				return
 | 
			
		||||
			}
 | 
			
		||||
			if e.(event.EvtLocalReachabilityChanged).Reachability != network.ReachabilityPrivate {
 | 
			
		||||
@ -146,6 +152,10 @@ func (s *Service) watchForPublicAddr() {
 | 
			
		||||
			s.holePuncher = newHolePuncher(s.host, s.ids, s.tracer, s.filter)
 | 
			
		||||
			s.holePuncherMx.Unlock()
 | 
			
		||||
			close(s.hasPublicAddrsChan)
 | 
			
		||||
 | 
			
		||||
			s.refCount.Done()
 | 
			
		||||
			t.Stop()
 | 
			
		||||
			sub.Close()
 | 
			
		||||
			return
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
@ -185,7 +195,6 @@ func (s *Service) incomingHolePunch(str network.Stream) (rtt time.Duration, remo
 | 
			
		||||
		log.Debugf("error reserving memory for stream: %s", err)
 | 
			
		||||
		return 0, nil, nil, err
 | 
			
		||||
	}
 | 
			
		||||
	defer str.Scope().ReleaseMemory(maxMsgSize)
 | 
			
		||||
 | 
			
		||||
	wr := pbio.NewDelimitedWriter(str)
 | 
			
		||||
	rd := pbio.NewDelimitedReader(str, maxMsgSize)
 | 
			
		||||
@ -196,9 +205,11 @@ func (s *Service) incomingHolePunch(str network.Stream) (rtt time.Duration, remo
 | 
			
		||||
	str.SetDeadline(time.Now().Add(StreamTimeout))
 | 
			
		||||
 | 
			
		||||
	if err := rd.ReadMsg(msg); err != nil {
 | 
			
		||||
		str.Scope().ReleaseMemory(maxMsgSize)
 | 
			
		||||
		return 0, nil, nil, fmt.Errorf("failed to read message from initiator: %w", err)
 | 
			
		||||
	}
 | 
			
		||||
	if t := msg.GetType(); t != pb.HolePunch_CONNECT {
 | 
			
		||||
		str.Scope().ReleaseMemory(maxMsgSize)
 | 
			
		||||
		return 0, nil, nil, fmt.Errorf("expected CONNECT message from initiator but got %d", t)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@ -209,6 +220,7 @@ func (s *Service) incomingHolePunch(str network.Stream) (rtt time.Duration, remo
 | 
			
		||||
 | 
			
		||||
	log.Debugw("received hole punch request", "peer", str.Conn().RemotePeer(), "addrs", obsDial)
 | 
			
		||||
	if len(obsDial) == 0 {
 | 
			
		||||
		str.Scope().ReleaseMemory(maxMsgSize)
 | 
			
		||||
		return 0, nil, nil, errors.New("expected CONNECT message to contain at least one address")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@ -218,17 +230,21 @@ func (s *Service) incomingHolePunch(str network.Stream) (rtt time.Duration, remo
 | 
			
		||||
	msg.ObsAddrs = addrsToBytes(ownAddrs)
 | 
			
		||||
	tstart := time.Now()
 | 
			
		||||
	if err := wr.WriteMsg(msg); err != nil {
 | 
			
		||||
		str.Scope().ReleaseMemory(maxMsgSize)
 | 
			
		||||
		return 0, nil, nil, fmt.Errorf("failed to write CONNECT message to initiator: %w", err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Read SYNC message
 | 
			
		||||
	msg.Reset()
 | 
			
		||||
	if err := rd.ReadMsg(msg); err != nil {
 | 
			
		||||
		str.Scope().ReleaseMemory(maxMsgSize)
 | 
			
		||||
		return 0, nil, nil, fmt.Errorf("failed to read message from initiator: %w", err)
 | 
			
		||||
	}
 | 
			
		||||
	if t := msg.GetType(); t != pb.HolePunch_SYNC {
 | 
			
		||||
		str.Scope().ReleaseMemory(maxMsgSize)
 | 
			
		||||
		return 0, nil, nil, fmt.Errorf("expected SYNC message from initiator but got %d", t)
 | 
			
		||||
	}
 | 
			
		||||
	str.Scope().ReleaseMemory(maxMsgSize)
 | 
			
		||||
	return time.Since(tstart), obsDial, ownAddrs, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -259,9 +259,7 @@ func (t *tracer) HolePunchAttempt(p peer.ID) {
 | 
			
		||||
// gc cleans up the peers map. This is only run when tracer is initialised with a non nil
 | 
			
		||||
// EventTracer
 | 
			
		||||
func (t *tracer) gc() {
 | 
			
		||||
	defer t.refCount.Done()
 | 
			
		||||
	timer := time.NewTicker(tracerGCInterval)
 | 
			
		||||
	defer timer.Stop()
 | 
			
		||||
 | 
			
		||||
	for {
 | 
			
		||||
		select {
 | 
			
		||||
@ -275,6 +273,8 @@ func (t *tracer) gc() {
 | 
			
		||||
			}
 | 
			
		||||
			t.mutex.Unlock()
 | 
			
		||||
		case <-t.ctx.Done():
 | 
			
		||||
			t.refCount.Done()
 | 
			
		||||
			timer.Stop()
 | 
			
		||||
			return
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@ -68,12 +68,13 @@ func holePunchConnect(ctx context.Context, host host.Host, pi peer.AddrInfo, isC
 | 
			
		||||
	holePunchCtx := network.WithSimultaneousConnect(ctx, isClient, "hole-punching")
 | 
			
		||||
	forceDirectConnCtx := network.WithForceDirectDial(holePunchCtx, "hole-punching")
 | 
			
		||||
	dialCtx, cancel := context.WithTimeout(forceDirectConnCtx, dialTimeout)
 | 
			
		||||
	defer cancel()
 | 
			
		||||
 | 
			
		||||
	if err := host.Connect(dialCtx, pi); err != nil {
 | 
			
		||||
		log.Debugw("hole punch attempt with peer failed", "peer ID", pi.ID, "error", err)
 | 
			
		||||
		cancel()
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
	log.Debugw("hole punch successful", "peer", pi.ID)
 | 
			
		||||
	cancel()
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -258,8 +258,6 @@ func (ids *idService) Start() {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (ids *idService) loop(ctx context.Context) {
 | 
			
		||||
	defer ids.refCount.Done()
 | 
			
		||||
 | 
			
		||||
	sub, err := ids.Host.EventBus().Subscribe(
 | 
			
		||||
		[]any{&event.EvtLocalProtocolsUpdated{}, &event.EvtLocalAddressesUpdated{}},
 | 
			
		||||
		eventbus.BufSize(256),
 | 
			
		||||
@ -267,9 +265,9 @@ func (ids *idService) loop(ctx context.Context) {
 | 
			
		||||
	)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		log.Errorf("failed to subscribe to events on the bus, err=%s", err)
 | 
			
		||||
		ids.refCount.Done()
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
	defer sub.Close()
 | 
			
		||||
 | 
			
		||||
	// Send pushes from a separate Go routine.
 | 
			
		||||
	// That way, we can end up with
 | 
			
		||||
@ -278,11 +276,10 @@ func (ids *idService) loop(ctx context.Context) {
 | 
			
		||||
	triggerPush := make(chan struct{}, 1)
 | 
			
		||||
	ids.refCount.Add(1)
 | 
			
		||||
	go func() {
 | 
			
		||||
		defer ids.refCount.Done()
 | 
			
		||||
 | 
			
		||||
		for {
 | 
			
		||||
			select {
 | 
			
		||||
			case <-ctx.Done():
 | 
			
		||||
				ids.refCount.Done()
 | 
			
		||||
				return
 | 
			
		||||
			case <-triggerPush:
 | 
			
		||||
				ids.sendPushes(ctx)
 | 
			
		||||
@ -294,6 +291,8 @@ func (ids *idService) loop(ctx context.Context) {
 | 
			
		||||
		select {
 | 
			
		||||
		case e, ok := <-sub.Out():
 | 
			
		||||
			if !ok {
 | 
			
		||||
				sub.Close()
 | 
			
		||||
				ids.refCount.Done()
 | 
			
		||||
				return
 | 
			
		||||
			}
 | 
			
		||||
			if updated := ids.updateSnapshot(); !updated {
 | 
			
		||||
@ -307,6 +306,8 @@ func (ids *idService) loop(ctx context.Context) {
 | 
			
		||||
			default: // we already have one more push queued, no need to queue another one
 | 
			
		||||
			}
 | 
			
		||||
		case <-ctx.Done():
 | 
			
		||||
			sub.Close()
 | 
			
		||||
			ids.refCount.Done()
 | 
			
		||||
			return
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
@ -346,19 +347,22 @@ func (ids *idService) sendPushes(ctx context.Context) {
 | 
			
		||||
		sem <- struct{}{}
 | 
			
		||||
		wg.Add(1)
 | 
			
		||||
		go func(c network.Conn) {
 | 
			
		||||
			defer wg.Done()
 | 
			
		||||
			defer func() { <-sem }()
 | 
			
		||||
			ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
 | 
			
		||||
			defer cancel()
 | 
			
		||||
			str, err := ids.Host.NewStream(ctx, c.RemotePeer(), IDPush)
 | 
			
		||||
			if err != nil { // connection might have been closed recently
 | 
			
		||||
				cancel()
 | 
			
		||||
				func() { <-sem }()
 | 
			
		||||
				wg.Done()
 | 
			
		||||
				return
 | 
			
		||||
			}
 | 
			
		||||
			// TODO: find out if the peer supports push if we didn't have any information about push support
 | 
			
		||||
			if err := ids.sendIdentifyResp(str, true); err != nil {
 | 
			
		||||
				log.Debugw("failed to send identify push", "peer", c.RemotePeer(), "error", err)
 | 
			
		||||
				return
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			cancel()
 | 
			
		||||
			func() { <-sem }()
 | 
			
		||||
			wg.Done()
 | 
			
		||||
		}(c)
 | 
			
		||||
	}
 | 
			
		||||
	wg.Wait()
 | 
			
		||||
@ -402,7 +406,6 @@ func (ids *idService) IdentifyConn(c network.Conn) {
 | 
			
		||||
// If successful, the peer store will contain the peer's addresses and supported protocols.
 | 
			
		||||
func (ids *idService) IdentifyWait(c network.Conn) <-chan struct{} {
 | 
			
		||||
	ids.connsMu.Lock()
 | 
			
		||||
	defer ids.connsMu.Unlock()
 | 
			
		||||
 | 
			
		||||
	e, found := ids.conns[c]
 | 
			
		||||
	if !found {
 | 
			
		||||
@ -412,6 +415,7 @@ func (ids *idService) IdentifyWait(c network.Conn) <-chan struct{} {
 | 
			
		||||
			log.Debugw("connection not found in identify service", "peer", c.RemotePeer())
 | 
			
		||||
			ch := make(chan struct{})
 | 
			
		||||
			close(ch)
 | 
			
		||||
			ids.connsMu.Unlock()
 | 
			
		||||
			return ch
 | 
			
		||||
		} else {
 | 
			
		||||
			ids.addConnWithLock(c)
 | 
			
		||||
@ -419,6 +423,7 @@ func (ids *idService) IdentifyWait(c network.Conn) <-chan struct{} {
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if e.IdentifyWaitChan != nil {
 | 
			
		||||
		ids.connsMu.Unlock()
 | 
			
		||||
		return e.IdentifyWaitChan
 | 
			
		||||
	}
 | 
			
		||||
	// First call to IdentifyWait for this connection. Create the channel.
 | 
			
		||||
@ -429,23 +434,23 @@ func (ids *idService) IdentifyWait(c network.Conn) <-chan struct{} {
 | 
			
		||||
	// already, but that doesn't really matter. We'll fail to open a
 | 
			
		||||
	// stream then forget the connection.
 | 
			
		||||
	go func() {
 | 
			
		||||
		defer close(e.IdentifyWaitChan)
 | 
			
		||||
		if err := ids.identifyConn(c); err != nil {
 | 
			
		||||
			log.Warnf("failed to identify %s: %s", c.RemotePeer(), err)
 | 
			
		||||
			ids.emitters.evtPeerIdentificationFailed.Emit(event.EvtPeerIdentificationFailed{Peer: c.RemotePeer(), Reason: err})
 | 
			
		||||
			return
 | 
			
		||||
		}
 | 
			
		||||
		close(e.IdentifyWaitChan)
 | 
			
		||||
	}()
 | 
			
		||||
 | 
			
		||||
	ids.connsMu.Unlock()
 | 
			
		||||
	return e.IdentifyWaitChan
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (ids *idService) identifyConn(c network.Conn) error {
 | 
			
		||||
	ctx, cancel := context.WithTimeout(context.Background(), Timeout)
 | 
			
		||||
	defer cancel()
 | 
			
		||||
	s, err := c.NewStream(network.WithAllowLimitedConn(ctx, "identify"))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		log.Debugw("error opening identify stream", "peer", c.RemotePeer(), "error", err)
 | 
			
		||||
		cancel()
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
	s.SetDeadline(time.Now().Add(Timeout))
 | 
			
		||||
@ -459,10 +464,13 @@ func (ids *idService) identifyConn(c network.Conn) error {
 | 
			
		||||
	if err := msmux.SelectProtoOrFail(ID, s); err != nil {
 | 
			
		||||
		log.Infow("failed negotiate identify protocol with peer", "peer", c.RemotePeer(), "error", err)
 | 
			
		||||
		s.Reset()
 | 
			
		||||
		cancel()
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return ids.handleIdentifyResponse(s, false)
 | 
			
		||||
	err = ids.handleIdentifyResponse(s, false)
 | 
			
		||||
	cancel()
 | 
			
		||||
	return err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// handlePush handles incoming identify push streams
 | 
			
		||||
@ -480,7 +488,6 @@ func (ids *idService) sendIdentifyResp(s network.Stream, isPush bool) error {
 | 
			
		||||
		s.Reset()
 | 
			
		||||
		return fmt.Errorf("failed to attaching stream to identify service: %w", err)
 | 
			
		||||
	}
 | 
			
		||||
	defer s.Close()
 | 
			
		||||
 | 
			
		||||
	ids.currentSnapshot.Lock()
 | 
			
		||||
	snapshot := ids.currentSnapshot.snapshot
 | 
			
		||||
@ -493,6 +500,7 @@ func (ids *idService) sendIdentifyResp(s network.Stream, isPush bool) error {
 | 
			
		||||
 | 
			
		||||
	log.Debugf("%s sending message to %s %s", ID, s.Conn().RemotePeer(), s.Conn().RemoteMultiaddr())
 | 
			
		||||
	if err := ids.writeChunkedIdentifyMsg(s, mes); err != nil {
 | 
			
		||||
		s.Close()
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@ -501,17 +509,21 @@ func (ids *idService) sendIdentifyResp(s network.Stream, isPush bool) error {
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	ids.connsMu.Lock()
 | 
			
		||||
	defer ids.connsMu.Unlock()
 | 
			
		||||
	e, ok := ids.conns[s.Conn()]
 | 
			
		||||
	// The connection might already have been closed.
 | 
			
		||||
	// We *should* receive the Connected notification from the swarm before we're able to accept the peer's
 | 
			
		||||
	// Identify stream, but if that for some reason doesn't work, we also wouldn't have a map entry here.
 | 
			
		||||
	// The only consequence would be that we send a spurious Push to that peer later.
 | 
			
		||||
	if !ok {
 | 
			
		||||
		ids.connsMu.Unlock()
 | 
			
		||||
		s.Close()
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
	e.Sequence = snapshot.seq
 | 
			
		||||
	ids.conns[s.Conn()] = e
 | 
			
		||||
 | 
			
		||||
	ids.connsMu.Unlock()
 | 
			
		||||
	s.Close()
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -527,7 +539,6 @@ func (ids *idService) handleIdentifyResponse(s network.Stream, isPush bool) erro
 | 
			
		||||
		s.Reset()
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
	defer s.Scope().ReleaseMemory(signedIDSize)
 | 
			
		||||
 | 
			
		||||
	c := s.Conn()
 | 
			
		||||
 | 
			
		||||
@ -537,11 +548,10 @@ func (ids *idService) handleIdentifyResponse(s network.Stream, isPush bool) erro
 | 
			
		||||
	if err := readAllIDMessages(r, mes); err != nil {
 | 
			
		||||
		log.Warn("error reading identify message: ", err)
 | 
			
		||||
		s.Reset()
 | 
			
		||||
		s.Scope().ReleaseMemory(signedIDSize)
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	defer s.Close()
 | 
			
		||||
 | 
			
		||||
	log.Debugf("%s received message from %s %s", s.Protocol(), c.RemotePeer(), c.RemoteMultiaddr())
 | 
			
		||||
 | 
			
		||||
	ids.consumeMessage(mes, c, isPush)
 | 
			
		||||
@ -551,9 +561,12 @@ func (ids *idService) handleIdentifyResponse(s network.Stream, isPush bool) erro
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	ids.connsMu.Lock()
 | 
			
		||||
	defer ids.connsMu.Unlock()
 | 
			
		||||
 | 
			
		||||
	e, ok := ids.conns[c]
 | 
			
		||||
	if !ok { // might already have disconnected
 | 
			
		||||
		ids.connsMu.Unlock()
 | 
			
		||||
		s.Close()
 | 
			
		||||
		s.Scope().ReleaseMemory(signedIDSize)
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
	sup, err := ids.Host.Peerstore().SupportsProtocols(c.RemotePeer(), IDPush)
 | 
			
		||||
@ -568,6 +581,10 @@ func (ids *idService) handleIdentifyResponse(s network.Stream, isPush bool) erro
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	ids.conns[c] = e
 | 
			
		||||
 | 
			
		||||
	ids.connsMu.Unlock()
 | 
			
		||||
	s.Close()
 | 
			
		||||
	s.Scope().ReleaseMemory(signedIDSize)
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -612,9 +629,9 @@ func (ids *idService) updateSnapshot() (updated bool) {
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	ids.currentSnapshot.Lock()
 | 
			
		||||
	defer ids.currentSnapshot.Unlock()
 | 
			
		||||
 | 
			
		||||
	if ids.currentSnapshot.snapshot.Equal(&snapshot) {
 | 
			
		||||
		ids.currentSnapshot.Unlock()
 | 
			
		||||
		return false
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@ -622,6 +639,7 @@ func (ids *idService) updateSnapshot() (updated bool) {
 | 
			
		||||
	ids.currentSnapshot.snapshot = snapshot
 | 
			
		||||
 | 
			
		||||
	log.Debugw("updating snapshot", "seq", snapshot.seq, "addrs", snapshot.addrs)
 | 
			
		||||
	ids.currentSnapshot.Unlock()
 | 
			
		||||
	return true
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -1036,12 +1054,12 @@ func (nn *netNotifiee) Disconnected(_ network.Network, c network.Conn) {
 | 
			
		||||
	// Last disconnect.
 | 
			
		||||
	// Undo the setting of addresses to peer.ConnectedAddrTTL we did
 | 
			
		||||
	ids.addrMu.Lock()
 | 
			
		||||
	defer ids.addrMu.Unlock()
 | 
			
		||||
 | 
			
		||||
	// This check MUST happen after acquiring the Lock as identify on a different connection
 | 
			
		||||
	// might be trying to add addresses.
 | 
			
		||||
	switch ids.Host.Network().Connectedness(c.RemotePeer()) {
 | 
			
		||||
	case network.Connected, network.Limited:
 | 
			
		||||
		ids.addrMu.Unlock()
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
	// peerstore returns the elements in a random order as it uses a map to store the addresses
 | 
			
		||||
@ -1059,6 +1077,7 @@ func (nn *netNotifiee) Disconnected(_ network.Network, c network.Conn) {
 | 
			
		||||
	ids.Host.Peerstore().UpdateAddrs(c.RemotePeer(), peerstore.ConnectedAddrTTL, peerstore.TempAddrTTL)
 | 
			
		||||
	ids.Host.Peerstore().AddAddrs(c.RemotePeer(), addrs[:n], peerstore.RecentlyConnectedAddrTTL)
 | 
			
		||||
	ids.Host.Peerstore().UpdateAddrs(c.RemotePeer(), peerstore.TempAddrTTL, 0)
 | 
			
		||||
	ids.addrMu.Unlock()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (nn *netNotifiee) Listen(n network.Network, a ma.Multiaddr)      {}
 | 
			
		||||
 | 
			
		||||
@ -133,7 +133,6 @@ func NewMetricsTracer(opts ...MetricsTracerOption) MetricsTracer {
 | 
			
		||||
 | 
			
		||||
func (t *metricsTracer) TriggeredPushes(ev any) {
 | 
			
		||||
	tags := metricshelper.GetStringSlice()
 | 
			
		||||
	defer metricshelper.PutStringSlice(tags)
 | 
			
		||||
 | 
			
		||||
	typ := "unknown"
 | 
			
		||||
	switch ev.(type) {
 | 
			
		||||
@ -144,19 +143,19 @@ func (t *metricsTracer) TriggeredPushes(ev any) {
 | 
			
		||||
	}
 | 
			
		||||
	*tags = append(*tags, typ)
 | 
			
		||||
	pushesTriggered.WithLabelValues(*tags...).Inc()
 | 
			
		||||
	metricshelper.PutStringSlice(tags)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (t *metricsTracer) IncrementPushSupport(s identifyPushSupport) {
 | 
			
		||||
	tags := metricshelper.GetStringSlice()
 | 
			
		||||
	defer metricshelper.PutStringSlice(tags)
 | 
			
		||||
 | 
			
		||||
	*tags = append(*tags, getPushSupport(s))
 | 
			
		||||
	connPushSupportTotal.WithLabelValues(*tags...).Inc()
 | 
			
		||||
	metricshelper.PutStringSlice(tags)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (t *metricsTracer) IdentifySent(isPush bool, numProtocols int, numAddrs int) {
 | 
			
		||||
	tags := metricshelper.GetStringSlice()
 | 
			
		||||
	defer metricshelper.PutStringSlice(tags)
 | 
			
		||||
 | 
			
		||||
	if isPush {
 | 
			
		||||
		*tags = append(*tags, metricshelper.GetDirection(network.DirOutbound))
 | 
			
		||||
@ -168,11 +167,11 @@ func (t *metricsTracer) IdentifySent(isPush bool, numProtocols int, numAddrs int
 | 
			
		||||
 | 
			
		||||
	protocolsCount.Set(float64(numProtocols))
 | 
			
		||||
	addrsCount.Set(float64(numAddrs))
 | 
			
		||||
	metricshelper.PutStringSlice(tags)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (t *metricsTracer) IdentifyReceived(isPush bool, numProtocols int, numAddrs int) {
 | 
			
		||||
	tags := metricshelper.GetStringSlice()
 | 
			
		||||
	defer metricshelper.PutStringSlice(tags)
 | 
			
		||||
 | 
			
		||||
	if isPush {
 | 
			
		||||
		*tags = append(*tags, metricshelper.GetDirection(network.DirInbound))
 | 
			
		||||
@ -184,14 +183,15 @@ func (t *metricsTracer) IdentifyReceived(isPush bool, numProtocols int, numAddrs
 | 
			
		||||
 | 
			
		||||
	numProtocolsReceived.Observe(float64(numProtocols))
 | 
			
		||||
	numAddrsReceived.Observe(float64(numAddrs))
 | 
			
		||||
	metricshelper.PutStringSlice(tags)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (t *metricsTracer) ConnPushSupport(support identifyPushSupport) {
 | 
			
		||||
	tags := metricshelper.GetStringSlice()
 | 
			
		||||
	defer metricshelper.PutStringSlice(tags)
 | 
			
		||||
 | 
			
		||||
	*tags = append(*tags, getPushSupport(support))
 | 
			
		||||
	connPushSupportTotal.WithLabelValues(*tags...).Inc()
 | 
			
		||||
	metricshelper.PutStringSlice(tags)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func getPushSupport(s identifyPushSupport) string {
 | 
			
		||||
 | 
			
		||||
@ -53,7 +53,6 @@ func newNATEmitter(h host.Host, o *ObservedAddrManager, eventInterval time.Durat
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (n *natEmitter) worker() {
 | 
			
		||||
	defer n.wg.Done()
 | 
			
		||||
	subCh := n.reachabilitySub.Out()
 | 
			
		||||
	ticker := time.NewTicker(n.eventInterval)
 | 
			
		||||
	pendingUpdate := false
 | 
			
		||||
@ -86,6 +85,7 @@ func (n *natEmitter) worker() {
 | 
			
		||||
				enoughTimeSinceLastUpdate = false
 | 
			
		||||
			}
 | 
			
		||||
		case <-n.ctx.Done():
 | 
			
		||||
			n.wg.Done()
 | 
			
		||||
			return
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@ -116,10 +116,10 @@ func (s *observerSet) cacheMultiaddr(addr ma.Multiaddr) ma.Multiaddr {
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	s.mu.Lock()
 | 
			
		||||
	defer s.mu.Unlock()
 | 
			
		||||
	// Check if some other go routine added this while we were waiting
 | 
			
		||||
	res, ok = s.cachedMultiaddrs[addrStr]
 | 
			
		||||
	if ok {
 | 
			
		||||
		s.mu.Unlock()
 | 
			
		||||
		return res
 | 
			
		||||
	}
 | 
			
		||||
	if s.cachedMultiaddrs == nil {
 | 
			
		||||
@ -133,7 +133,9 @@ func (s *observerSet) cacheMultiaddr(addr ma.Multiaddr) ma.Multiaddr {
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	s.cachedMultiaddrs[addrStr] = ma.Join(s.ObservedTWAddr, addr)
 | 
			
		||||
	return s.cachedMultiaddrs[addrStr]
 | 
			
		||||
	mas := s.cachedMultiaddrs[addrStr]
 | 
			
		||||
	s.mu.Unlock()
 | 
			
		||||
	return mas
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type observation struct {
 | 
			
		||||
@ -202,9 +204,9 @@ func (o *ObservedAddrManager) AddrsFor(addr ma.Multiaddr) (addrs []ma.Multiaddr)
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
	o.mu.RLock()
 | 
			
		||||
	defer o.mu.RUnlock()
 | 
			
		||||
	tw, err := thinWaistForm(o.normalize(addr))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		o.mu.RUnlock()
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@ -213,13 +215,13 @@ func (o *ObservedAddrManager) AddrsFor(addr ma.Multiaddr) (addrs []ma.Multiaddr)
 | 
			
		||||
	for _, s := range observerSets {
 | 
			
		||||
		res = append(res, s.cacheMultiaddr(tw.Rest))
 | 
			
		||||
	}
 | 
			
		||||
	o.mu.RUnlock()
 | 
			
		||||
	return res
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Addrs return all activated observed addresses
 | 
			
		||||
func (o *ObservedAddrManager) Addrs() []ma.Multiaddr {
 | 
			
		||||
	o.mu.RLock()
 | 
			
		||||
	defer o.mu.RUnlock()
 | 
			
		||||
 | 
			
		||||
	m := make(map[string][]*observerSet)
 | 
			
		||||
	for localTWStr := range o.externalAddrs {
 | 
			
		||||
@ -231,6 +233,7 @@ func (o *ObservedAddrManager) Addrs() []ma.Multiaddr {
 | 
			
		||||
			addrs = append(addrs, s.cacheMultiaddr(t.Rest))
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	o.mu.RUnlock()
 | 
			
		||||
	return addrs
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -282,13 +285,12 @@ func (o *ObservedAddrManager) Record(conn connMultiaddrs, observed ma.Multiaddr)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (o *ObservedAddrManager) worker() {
 | 
			
		||||
	defer o.wg.Done()
 | 
			
		||||
 | 
			
		||||
	for {
 | 
			
		||||
		select {
 | 
			
		||||
		case obs := <-o.wch:
 | 
			
		||||
			o.maybeRecordObservation(obs.conn, obs.observed)
 | 
			
		||||
		case <-o.ctx.Done():
 | 
			
		||||
			o.wg.Done()
 | 
			
		||||
			return
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
@ -372,12 +374,12 @@ func (o *ObservedAddrManager) maybeRecordObservation(conn connMultiaddrs, observ
 | 
			
		||||
	log.Debugw("added own observed listen addr", "observed", observed)
 | 
			
		||||
 | 
			
		||||
	o.mu.Lock()
 | 
			
		||||
	defer o.mu.Unlock()
 | 
			
		||||
	o.recordObservationUnlocked(conn, localTW, observedTW)
 | 
			
		||||
	select {
 | 
			
		||||
	case o.addrRecordedNotif <- struct{}{}:
 | 
			
		||||
	default:
 | 
			
		||||
	}
 | 
			
		||||
	o.mu.Unlock()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (o *ObservedAddrManager) recordObservationUnlocked(conn connMultiaddrs, localTW, observedTW thinWaist) {
 | 
			
		||||
@ -453,16 +455,17 @@ func (o *ObservedAddrManager) removeConn(conn connMultiaddrs) {
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
	o.mu.Lock()
 | 
			
		||||
	defer o.mu.Unlock()
 | 
			
		||||
 | 
			
		||||
	// normalize before obtaining the thinWaist so that we are always dealing
 | 
			
		||||
	// with the normalized form of the address
 | 
			
		||||
	localTW, err := thinWaistForm(o.normalize(conn.LocalMultiaddr()))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		o.mu.Unlock()
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
	t, ok := o.localAddrs[string(localTW.Addr.Bytes())]
 | 
			
		||||
	if !ok {
 | 
			
		||||
		o.mu.Unlock()
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
	t.Count--
 | 
			
		||||
@ -472,11 +475,13 @@ func (o *ObservedAddrManager) removeConn(conn connMultiaddrs) {
 | 
			
		||||
 | 
			
		||||
	observedTWAddr, ok := o.connObservedTWAddrs[conn]
 | 
			
		||||
	if !ok {
 | 
			
		||||
		o.mu.Unlock()
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
	delete(o.connObservedTWAddrs, conn)
 | 
			
		||||
	observer, err := getObserver(conn.RemoteMultiaddr())
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		o.mu.Unlock()
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@ -485,11 +490,11 @@ func (o *ObservedAddrManager) removeConn(conn connMultiaddrs) {
 | 
			
		||||
	case o.addrRecordedNotif <- struct{}{}:
 | 
			
		||||
	default:
 | 
			
		||||
	}
 | 
			
		||||
	o.mu.Unlock()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (o *ObservedAddrManager) getNATType() (tcpNATType, udpNATType network.NATDeviceType) {
 | 
			
		||||
	o.mu.RLock()
 | 
			
		||||
	defer o.mu.RUnlock()
 | 
			
		||||
 | 
			
		||||
	var tcpCounts, udpCounts []int
 | 
			
		||||
	var tcpTotal, udpTotal int
 | 
			
		||||
@ -539,6 +544,7 @@ func (o *ObservedAddrManager) getNATType() (tcpNATType, udpNATType network.NATDe
 | 
			
		||||
			udpNATType = network.NATDeviceTypeSymmetric
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	o.mu.RUnlock()
 | 
			
		||||
	return
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -50,15 +50,11 @@ func (p *PingService) PingHandler(s network.Stream) {
 | 
			
		||||
		s.Reset()
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
	defer s.Scope().ReleaseMemory(PingSize)
 | 
			
		||||
 | 
			
		||||
	buf := pool.Get(PingSize)
 | 
			
		||||
	defer pool.Put(buf)
 | 
			
		||||
 | 
			
		||||
	errCh := make(chan error, 1)
 | 
			
		||||
	defer close(errCh)
 | 
			
		||||
	timer := time.NewTimer(pingTimeout)
 | 
			
		||||
	defer timer.Stop()
 | 
			
		||||
 | 
			
		||||
	go func() {
 | 
			
		||||
		select {
 | 
			
		||||
@ -78,12 +74,22 @@ func (p *PingService) PingHandler(s network.Stream) {
 | 
			
		||||
		_, err := io.ReadFull(s, buf)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			errCh <- err
 | 
			
		||||
 | 
			
		||||
			s.Scope().ReleaseMemory(PingSize)
 | 
			
		||||
			pool.Put(buf)
 | 
			
		||||
			close(errCh)
 | 
			
		||||
			timer.Stop()
 | 
			
		||||
			return
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		_, err = s.Write(buf)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			errCh <- err
 | 
			
		||||
 | 
			
		||||
			s.Scope().ReleaseMemory(PingSize)
 | 
			
		||||
			pool.Put(buf)
 | 
			
		||||
			close(errCh)
 | 
			
		||||
			timer.Stop()
 | 
			
		||||
			return
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
@ -134,15 +140,14 @@ func Ping(ctx context.Context, h host.Host, p peer.ID) <-chan Result {
 | 
			
		||||
 | 
			
		||||
	out := make(chan Result)
 | 
			
		||||
	go func() {
 | 
			
		||||
		defer close(out)
 | 
			
		||||
		defer cancel()
 | 
			
		||||
 | 
			
		||||
		for ctx.Err() == nil {
 | 
			
		||||
			var res Result
 | 
			
		||||
			res.RTT, res.Error = ping(s, ra)
 | 
			
		||||
 | 
			
		||||
			// canceled, ignore everything.
 | 
			
		||||
			if ctx.Err() != nil {
 | 
			
		||||
				close(out)
 | 
			
		||||
				cancel()
 | 
			
		||||
				return
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
@ -154,9 +159,13 @@ func Ping(ctx context.Context, h host.Host, p peer.ID) <-chan Result {
 | 
			
		||||
			select {
 | 
			
		||||
			case out <- res:
 | 
			
		||||
			case <-ctx.Done():
 | 
			
		||||
				close(out)
 | 
			
		||||
				cancel()
 | 
			
		||||
				return
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		close(out)
 | 
			
		||||
		cancel()
 | 
			
		||||
	}()
 | 
			
		||||
	context.AfterFunc(ctx, func() {
 | 
			
		||||
		// forces the ping to abort.
 | 
			
		||||
@ -172,30 +181,40 @@ func ping(s network.Stream, randReader io.Reader) (time.Duration, error) {
 | 
			
		||||
		s.Reset()
 | 
			
		||||
		return 0, err
 | 
			
		||||
	}
 | 
			
		||||
	defer s.Scope().ReleaseMemory(2 * PingSize)
 | 
			
		||||
 | 
			
		||||
	buf := pool.Get(PingSize)
 | 
			
		||||
	defer pool.Put(buf)
 | 
			
		||||
 | 
			
		||||
	if _, err := io.ReadFull(randReader, buf); err != nil {
 | 
			
		||||
		s.Scope().ReleaseMemory(2 * PingSize)
 | 
			
		||||
		pool.Put(buf)
 | 
			
		||||
		return 0, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	before := time.Now()
 | 
			
		||||
	if _, err := s.Write(buf); err != nil {
 | 
			
		||||
		s.Scope().ReleaseMemory(2 * PingSize)
 | 
			
		||||
		pool.Put(buf)
 | 
			
		||||
		return 0, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	rbuf := pool.Get(PingSize)
 | 
			
		||||
	defer pool.Put(rbuf)
 | 
			
		||||
 | 
			
		||||
	if _, err := io.ReadFull(s, rbuf); err != nil {
 | 
			
		||||
		s.Scope().ReleaseMemory(2 * PingSize)
 | 
			
		||||
		pool.Put(buf)
 | 
			
		||||
		pool.Put(rbuf)
 | 
			
		||||
		return 0, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if !bytes.Equal(buf, rbuf) {
 | 
			
		||||
		s.Scope().ReleaseMemory(2 * PingSize)
 | 
			
		||||
		pool.Put(buf)
 | 
			
		||||
		pool.Put(rbuf)
 | 
			
		||||
		return 0, errors.New("ping packet was incorrect")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	s.Scope().ReleaseMemory(2 * PingSize)
 | 
			
		||||
	pool.Put(buf)
 | 
			
		||||
	pool.Put(rbuf)
 | 
			
		||||
	return time.Since(before), nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -194,15 +194,15 @@ func (t *transport) holePunch(ctx context.Context, raddr ma.Multiaddr, p peer.ID
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
	defer tr.DecreaseCount()
 | 
			
		||||
 | 
			
		||||
	ctx, cancel := context.WithTimeout(ctx, HolePunchTimeout)
 | 
			
		||||
	defer cancel()
 | 
			
		||||
 | 
			
		||||
	key := holePunchKey{addr: addr.String(), peer: p}
 | 
			
		||||
	t.holePunchingMx.Lock()
 | 
			
		||||
	if _, ok := t.holePunching[key]; ok {
 | 
			
		||||
		t.holePunchingMx.Unlock()
 | 
			
		||||
		tr.DecreaseCount()
 | 
			
		||||
		cancel()
 | 
			
		||||
		return nil, fmt.Errorf("already punching hole for %s", addr)
 | 
			
		||||
	}
 | 
			
		||||
	connCh := make(chan tpt.CapableConn, 1)
 | 
			
		||||
@ -210,11 +210,6 @@ func (t *transport) holePunch(ctx context.Context, raddr ma.Multiaddr, p peer.ID
 | 
			
		||||
	t.holePunchingMx.Unlock()
 | 
			
		||||
 | 
			
		||||
	var timer *time.Timer
 | 
			
		||||
	defer func() {
 | 
			
		||||
		if timer != nil {
 | 
			
		||||
			timer.Stop()
 | 
			
		||||
		}
 | 
			
		||||
	}()
 | 
			
		||||
 | 
			
		||||
	payload := make([]byte, 64)
 | 
			
		||||
	var punchErr error
 | 
			
		||||
@ -247,6 +242,11 @@ loop:
 | 
			
		||||
			t.holePunchingMx.Lock()
 | 
			
		||||
			delete(t.holePunching, key)
 | 
			
		||||
			t.holePunchingMx.Unlock()
 | 
			
		||||
			tr.DecreaseCount()
 | 
			
		||||
			cancel()
 | 
			
		||||
			if timer != nil {
 | 
			
		||||
				timer.Stop()
 | 
			
		||||
			}
 | 
			
		||||
			return c, nil
 | 
			
		||||
		case <-timer.C:
 | 
			
		||||
		case <-ctx.Done():
 | 
			
		||||
@ -256,14 +256,24 @@ loop:
 | 
			
		||||
	}
 | 
			
		||||
	// we only arrive here if punchErr != nil
 | 
			
		||||
	t.holePunchingMx.Lock()
 | 
			
		||||
	defer func() {
 | 
			
		||||
		delete(t.holePunching, key)
 | 
			
		||||
		t.holePunchingMx.Unlock()
 | 
			
		||||
	}()
 | 
			
		||||
	select {
 | 
			
		||||
	case c := <-t.holePunching[key].connCh:
 | 
			
		||||
		tr.DecreaseCount()
 | 
			
		||||
		cancel()
 | 
			
		||||
		if timer != nil {
 | 
			
		||||
			timer.Stop()
 | 
			
		||||
		}
 | 
			
		||||
		delete(t.holePunching, key)
 | 
			
		||||
		t.holePunchingMx.Unlock()
 | 
			
		||||
		return c, nil
 | 
			
		||||
	default:
 | 
			
		||||
		tr.DecreaseCount()
 | 
			
		||||
		cancel()
 | 
			
		||||
		if timer != nil {
 | 
			
		||||
			timer.Stop()
 | 
			
		||||
		}
 | 
			
		||||
		delete(t.holePunching, key)
 | 
			
		||||
		t.holePunchingMx.Unlock()
 | 
			
		||||
		return nil, punchErr
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
@ -294,7 +304,6 @@ func (t *transport) Listen(addr ma.Multiaddr) (tpt.Listener, error) {
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	t.listenersMu.Lock()
 | 
			
		||||
	defer t.listenersMu.Unlock()
 | 
			
		||||
	listeners := t.listeners[udpAddr.String()]
 | 
			
		||||
	var underlyingListener *listener
 | 
			
		||||
	var acceptRunner *acceptLoopRunner
 | 
			
		||||
@ -304,16 +313,19 @@ func (t *transport) Listen(addr ma.Multiaddr) (tpt.Listener, error) {
 | 
			
		||||
		acceptRunner = listeners[0].acceptRunnner
 | 
			
		||||
		// Make sure our underlying listener is listening on the specified QUIC version
 | 
			
		||||
		if _, ok := underlyingListener.localMultiaddrs[version]; !ok {
 | 
			
		||||
			t.listenersMu.Unlock()
 | 
			
		||||
			return nil, fmt.Errorf("can't listen on quic version %v, underlying listener doesn't support it", version)
 | 
			
		||||
		}
 | 
			
		||||
	} else {
 | 
			
		||||
		ln, err := t.connManager.ListenQUIC(addr, &tlsConf, t.allowWindowIncrease)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			t.listenersMu.Unlock()
 | 
			
		||||
			return nil, err
 | 
			
		||||
		}
 | 
			
		||||
		l, err := newListener(ln, t, t.localPeer, t.privKey, t.rcmgr)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			_ = ln.Close()
 | 
			
		||||
			t.listenersMu.Unlock()
 | 
			
		||||
			return nil, err
 | 
			
		||||
		}
 | 
			
		||||
		underlyingListener = &l
 | 
			
		||||
@ -335,7 +347,7 @@ func (t *transport) Listen(addr ma.Multiaddr) (tpt.Listener, error) {
 | 
			
		||||
 | 
			
		||||
	listeners = append(listeners, l)
 | 
			
		||||
	t.listeners[udpAddr.String()] = listeners
 | 
			
		||||
 | 
			
		||||
	t.listenersMu.Unlock()
 | 
			
		||||
	return l, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -373,7 +385,6 @@ func (t *transport) Close() error {
 | 
			
		||||
 | 
			
		||||
func (t *transport) CloseVirtualListener(l *virtualListener) error {
 | 
			
		||||
	t.listenersMu.Lock()
 | 
			
		||||
	defer t.listenersMu.Unlock()
 | 
			
		||||
 | 
			
		||||
	var err error
 | 
			
		||||
	listeners := t.listeners[l.udpAddr]
 | 
			
		||||
@ -381,6 +392,7 @@ func (t *transport) CloseVirtualListener(l *virtualListener) error {
 | 
			
		||||
		// This is the last virtual listener here, so we can close the underlying listener
 | 
			
		||||
		err = l.listener.Close()
 | 
			
		||||
		delete(t.listeners, l.udpAddr)
 | 
			
		||||
		t.listenersMu.Unlock()
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@ -394,6 +406,6 @@ func (t *transport) CloseVirtualListener(l *virtualListener) error {
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	t.listenersMu.Unlock()
 | 
			
		||||
	return nil
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -89,17 +89,18 @@ func (c *ConnManager) ListenQUIC(addr ma.Multiaddr, tlsConf *tls.Config, allowWi
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	c.quicListenersMu.Lock()
 | 
			
		||||
	defer c.quicListenersMu.Unlock()
 | 
			
		||||
 | 
			
		||||
	key := laddr.String()
 | 
			
		||||
	entry, ok := c.quicListeners[key]
 | 
			
		||||
	if !ok {
 | 
			
		||||
		tr, err := c.transportForListen(netw, laddr)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			c.quicListenersMu.Unlock()
 | 
			
		||||
			return nil, err
 | 
			
		||||
		}
 | 
			
		||||
		ln, err := newQuicListener(tr, c.serverConfig)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			c.quicListenersMu.Unlock()
 | 
			
		||||
			return nil, err
 | 
			
		||||
		}
 | 
			
		||||
		key = tr.LocalAddr().String()
 | 
			
		||||
@ -110,16 +111,17 @@ func (c *ConnManager) ListenQUIC(addr ma.Multiaddr, tlsConf *tls.Config, allowWi
 | 
			
		||||
		if entry.refCount <= 0 {
 | 
			
		||||
			entry.ln.Close()
 | 
			
		||||
		}
 | 
			
		||||
		c.quicListenersMu.Unlock()
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
	entry.refCount++
 | 
			
		||||
	c.quicListeners[key] = entry
 | 
			
		||||
	c.quicListenersMu.Unlock()
 | 
			
		||||
	return l, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *ConnManager) onListenerClosed(key string) {
 | 
			
		||||
	c.quicListenersMu.Lock()
 | 
			
		||||
	defer c.quicListenersMu.Unlock()
 | 
			
		||||
 | 
			
		||||
	entry := c.quicListeners[key]
 | 
			
		||||
	entry.refCount = entry.refCount - 1
 | 
			
		||||
@ -129,6 +131,7 @@ func (c *ConnManager) onListenerClosed(key string) {
 | 
			
		||||
	} else {
 | 
			
		||||
		c.quicListeners[key] = entry
 | 
			
		||||
	}
 | 
			
		||||
	c.quicListenersMu.Unlock()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *ConnManager) transportForListen(network string, laddr *net.UDPAddr) (refCountedQuicTransport, error) {
 | 
			
		||||
 | 
			
		||||
@ -55,16 +55,18 @@ func newQuicListener(tr refCountedQuicTransport, quicConfig *quic.Config) (*quic
 | 
			
		||||
		SessionTicketsDisabled: true, // This is set for the config for client, but we set it here as well: https://github.com/quic-go/quic-go/issues/4029
 | 
			
		||||
		GetConfigForClient: func(info *tls.ClientHelloInfo) (*tls.Config, error) {
 | 
			
		||||
			cl.protocolsMu.Lock()
 | 
			
		||||
			defer cl.protocolsMu.Unlock()
 | 
			
		||||
			for _, proto := range info.SupportedProtos {
 | 
			
		||||
				if entry, ok := cl.protocols[proto]; ok {
 | 
			
		||||
					conf := entry.tlsConf
 | 
			
		||||
					if conf.GetConfigForClient != nil {
 | 
			
		||||
						cl.protocolsMu.Unlock()
 | 
			
		||||
						return conf.GetConfigForClient(info)
 | 
			
		||||
					}
 | 
			
		||||
					cl.protocolsMu.Unlock()
 | 
			
		||||
					return conf, nil
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
			cl.protocolsMu.Unlock()
 | 
			
		||||
			return nil, fmt.Errorf("no supported protocol found. offered: %+v", info.SupportedProtos)
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
@ -81,25 +83,27 @@ func newQuicListener(tr refCountedQuicTransport, quicConfig *quic.Config) (*quic
 | 
			
		||||
 | 
			
		||||
func (l *quicListener) allowWindowIncrease(conn quic.Connection, delta uint64) bool {
 | 
			
		||||
	l.protocolsMu.Lock()
 | 
			
		||||
	defer l.protocolsMu.Unlock()
 | 
			
		||||
 | 
			
		||||
	conf, ok := l.protocols[conn.ConnectionState().TLS.NegotiatedProtocol]
 | 
			
		||||
	if !ok {
 | 
			
		||||
		l.protocolsMu.Unlock()
 | 
			
		||||
		return false
 | 
			
		||||
	}
 | 
			
		||||
	l.protocolsMu.Unlock()
 | 
			
		||||
	return conf.allowWindowIncrease(conn, delta)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (l *quicListener) Add(tlsConf *tls.Config, allowWindowIncrease func(conn quic.Connection, delta uint64) bool, onRemove func()) (Listener, error) {
 | 
			
		||||
	l.protocolsMu.Lock()
 | 
			
		||||
	defer l.protocolsMu.Unlock()
 | 
			
		||||
 | 
			
		||||
	if len(tlsConf.NextProtos) == 0 {
 | 
			
		||||
		l.protocolsMu.Unlock()
 | 
			
		||||
		return nil, errors.New("no ALPN found in tls.Config")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for _, proto := range tlsConf.NextProtos {
 | 
			
		||||
		if _, ok := l.protocols[proto]; ok {
 | 
			
		||||
			l.protocolsMu.Unlock()
 | 
			
		||||
			return nil, fmt.Errorf("already listening for protocol %s", proto)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
@ -119,18 +123,21 @@ func (l *quicListener) Add(tlsConf *tls.Config, allowWindowIncrease func(conn qu
 | 
			
		||||
			allowWindowIncrease: allowWindowIncrease,
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	l.protocolsMu.Unlock()
 | 
			
		||||
	return ln, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (l *quicListener) Run() error {
 | 
			
		||||
	defer close(l.running)
 | 
			
		||||
	defer l.transport.DecreaseCount()
 | 
			
		||||
	for {
 | 
			
		||||
		conn, err := l.l.Accept(context.Background())
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			if errors.Is(err, quic.ErrServerClosed) || strings.Contains(err.Error(), "use of closed network connection") {
 | 
			
		||||
				close(l.running)
 | 
			
		||||
				l.transport.DecreaseCount()
 | 
			
		||||
				return transport.ErrListenerClosed
 | 
			
		||||
			}
 | 
			
		||||
			close(l.running)
 | 
			
		||||
			l.transport.DecreaseCount()
 | 
			
		||||
			return err
 | 
			
		||||
		}
 | 
			
		||||
		proto := conn.ConnectionState().TLS.NegotiatedProtocol
 | 
			
		||||
@ -139,6 +146,8 @@ func (l *quicListener) Run() error {
 | 
			
		||||
		ln, ok := l.protocols[proto]
 | 
			
		||||
		if !ok {
 | 
			
		||||
			l.protocolsMu.Unlock()
 | 
			
		||||
			close(l.running)
 | 
			
		||||
			l.transport.DecreaseCount()
 | 
			
		||||
			return fmt.Errorf("negotiated unknown protocol: %s", proto)
 | 
			
		||||
		}
 | 
			
		||||
		ln.ln.add(conn)
 | 
			
		||||
 | 
			
		||||
@ -82,8 +82,8 @@ func NewUDPMux(socket net.PacketConn) *UDPMux {
 | 
			
		||||
func (mux *UDPMux) Start() {
 | 
			
		||||
	mux.wg.Add(1)
 | 
			
		||||
	go func() {
 | 
			
		||||
		defer mux.wg.Done()
 | 
			
		||||
		mux.readLoop()
 | 
			
		||||
		mux.wg.Done()
 | 
			
		||||
	}()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -264,7 +264,6 @@ func (mux *UDPMux) RemoveConnByUfrag(ufrag string) {
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	mux.mx.Lock()
 | 
			
		||||
	defer mux.mx.Unlock()
 | 
			
		||||
 | 
			
		||||
	for _, isIPv6 := range [...]bool{true, false} {
 | 
			
		||||
		key := ufragConnKey{ufrag: ufrag, isIPv6: isIPv6}
 | 
			
		||||
@ -276,17 +275,18 @@ func (mux *UDPMux) RemoveConnByUfrag(ufrag string) {
 | 
			
		||||
			delete(mux.ufragAddrMap, key)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	mux.mx.Unlock()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (mux *UDPMux) getOrCreateConn(ufrag string, isIPv6 bool, _ *UDPMux, addr net.Addr) (created bool, _ *muxedConnection) {
 | 
			
		||||
	key := ufragConnKey{ufrag: ufrag, isIPv6: isIPv6}
 | 
			
		||||
 | 
			
		||||
	mux.mx.Lock()
 | 
			
		||||
	defer mux.mx.Unlock()
 | 
			
		||||
 | 
			
		||||
	if conn, ok := mux.ufragMap[key]; ok {
 | 
			
		||||
		mux.addrMap[addr.String()] = conn
 | 
			
		||||
		mux.ufragAddrMap[key] = append(mux.ufragAddrMap[key], addr)
 | 
			
		||||
		mux.mx.Unlock()
 | 
			
		||||
		return false, conn
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@ -294,5 +294,6 @@ func (mux *UDPMux) getOrCreateConn(ufrag string, isIPv6 bool, _ *UDPMux, addr ne
 | 
			
		||||
	mux.ufragMap[key] = conn
 | 
			
		||||
	mux.addrMap[addr.String()] = conn
 | 
			
		||||
	mux.ufragAddrMap[key] = append(mux.ufragAddrMap[key], addr)
 | 
			
		||||
	mux.mx.Unlock()
 | 
			
		||||
	return true, conn
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -40,10 +40,10 @@ func NewConn(raw *ws.Conn, secure bool) *Conn {
 | 
			
		||||
 | 
			
		||||
func (c *Conn) Read(b []byte) (int, error) {
 | 
			
		||||
	c.readLock.Lock()
 | 
			
		||||
	defer c.readLock.Unlock()
 | 
			
		||||
 | 
			
		||||
	if c.reader == nil {
 | 
			
		||||
		if err := c.prepNextReader(); err != nil {
 | 
			
		||||
			c.readLock.Unlock()
 | 
			
		||||
			return 0, err
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
@ -55,15 +55,18 @@ func (c *Conn) Read(b []byte) (int, error) {
 | 
			
		||||
			c.reader = nil
 | 
			
		||||
 | 
			
		||||
			if n > 0 {
 | 
			
		||||
				c.readLock.Unlock()
 | 
			
		||||
				return n, nil
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			if err := c.prepNextReader(); err != nil {
 | 
			
		||||
				c.readLock.Unlock()
 | 
			
		||||
				return 0, err
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			// explicitly looping
 | 
			
		||||
		default:
 | 
			
		||||
			c.readLock.Unlock()
 | 
			
		||||
			return n, err
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
@ -90,12 +93,13 @@ func (c *Conn) prepNextReader() error {
 | 
			
		||||
 | 
			
		||||
func (c *Conn) Write(b []byte) (n int, err error) {
 | 
			
		||||
	c.writeLock.Lock()
 | 
			
		||||
	defer c.writeLock.Unlock()
 | 
			
		||||
 | 
			
		||||
	if err := c.Conn.WriteMessage(c.DefaultMessageType, b); err != nil {
 | 
			
		||||
		c.writeLock.Unlock()
 | 
			
		||||
		return 0, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	c.writeLock.Unlock()
 | 
			
		||||
	return len(b), nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -148,9 +152,9 @@ func (c *Conn) SetWriteDeadline(t time.Time) error {
 | 
			
		||||
	// deadline.
 | 
			
		||||
 | 
			
		||||
	c.writeLock.Lock()
 | 
			
		||||
	defer c.writeLock.Unlock()
 | 
			
		||||
 | 
			
		||||
	return c.Conn.SetWriteDeadline(t)
 | 
			
		||||
	err := c.Conn.SetWriteDeadline(t)
 | 
			
		||||
	c.writeLock.Unlock()
 | 
			
		||||
	return err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type capableConn struct {
 | 
			
		||||
 | 
			
		||||
@ -93,12 +93,12 @@ func newListener(a ma.Multiaddr, tlsConf *tls.Config) (*listener, error) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (l *listener) serve() {
 | 
			
		||||
	defer close(l.closed)
 | 
			
		||||
	if !l.isWss {
 | 
			
		||||
		l.server.Serve(l.nl)
 | 
			
		||||
	} else {
 | 
			
		||||
		l.server.ServeTLS(l.nl, "", "")
 | 
			
		||||
	}
 | 
			
		||||
	close(l.closed)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (l *listener) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 | 
			
		||||
 | 
			
		||||
@ -5,6 +5,7 @@ import (
 | 
			
		||||
	"context"
 | 
			
		||||
	"encoding/hex"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"math/bits"
 | 
			
		||||
	"sync"
 | 
			
		||||
	"time"
 | 
			
		||||
 | 
			
		||||
@ -17,8 +18,12 @@ import (
 | 
			
		||||
	"github.com/libp2p/go-libp2p/core/peer"
 | 
			
		||||
	"github.com/libp2p/go-libp2p/p2p/discovery/routing"
 | 
			
		||||
	"github.com/libp2p/go-libp2p/p2p/discovery/util"
 | 
			
		||||
	rcmgr "github.com/libp2p/go-libp2p/p2p/host/resource-manager"
 | 
			
		||||
	"github.com/libp2p/go-libp2p/p2p/net/connmgr"
 | 
			
		||||
	ma "github.com/multiformats/go-multiaddr"
 | 
			
		||||
	madns "github.com/multiformats/go-multiaddr-dns"
 | 
			
		||||
	"github.com/pkg/errors"
 | 
			
		||||
	"github.com/prometheus/client_golang/prometheus"
 | 
			
		||||
	"go.uber.org/zap"
 | 
			
		||||
	blossomsub "source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub"
 | 
			
		||||
	"source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb"
 | 
			
		||||
@ -111,6 +116,23 @@ func NewBlossomSub(
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	defaultBootstrapPeers := append([]string{}, p2pConfig.BootstrapPeers...)
 | 
			
		||||
 | 
			
		||||
	if p2pConfig.Network == 0 {
 | 
			
		||||
		defaultBootstrapPeers = config.BootstrapPeers
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	bootstrappers := []peer.AddrInfo{}
 | 
			
		||||
 | 
			
		||||
	for _, peerAddr := range defaultBootstrapPeers {
 | 
			
		||||
		peerinfo, err := peer.AddrInfoFromString(peerAddr)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			panic(err)
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		bootstrappers = append(bootstrappers, *peerinfo)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	var privKey crypto.PrivKey
 | 
			
		||||
	if p2pConfig.PeerPrivKey != "" {
 | 
			
		||||
		peerPrivKey, err := hex.DecodeString(p2pConfig.PeerPrivKey)
 | 
			
		||||
@ -136,7 +158,17 @@ func NewBlossomSub(
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			panic(err)
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		rm, err := resourceManager(
 | 
			
		||||
			p2pConfig.HighWatermarkConnections,
 | 
			
		||||
			bootstrappers,
 | 
			
		||||
		)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			panic(err)
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		opts = append(opts, libp2p.ConnectionManager(cm))
 | 
			
		||||
		opts = append(opts, libp2p.ResourceManager(rm))
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	bs := &BlossomSub{
 | 
			
		||||
@ -156,7 +188,14 @@ func NewBlossomSub(
 | 
			
		||||
 | 
			
		||||
	logger.Info("established peer id", zap.String("peer_id", h.ID().String()))
 | 
			
		||||
 | 
			
		||||
	kademliaDHT := initDHT(ctx, p2pConfig, logger, h, isBootstrapPeer)
 | 
			
		||||
	kademliaDHT := initDHT(
 | 
			
		||||
		ctx,
 | 
			
		||||
		p2pConfig,
 | 
			
		||||
		logger,
 | 
			
		||||
		h,
 | 
			
		||||
		isBootstrapPeer,
 | 
			
		||||
		bootstrappers,
 | 
			
		||||
	)
 | 
			
		||||
	routingDiscovery := routing.NewRoutingDiscovery(kademliaDHT)
 | 
			
		||||
	util.Advertise(ctx, routingDiscovery, getNetworkNamespace(p2pConfig.Network))
 | 
			
		||||
 | 
			
		||||
@ -223,6 +262,96 @@ func NewBlossomSub(
 | 
			
		||||
	return bs
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// adjusted from Lotus' reference implementation, addressing
 | 
			
		||||
// https://github.com/libp2p/go-libp2p/issues/1640
 | 
			
		||||
func resourceManager(highWatermark uint, bootstrappers []peer.AddrInfo) (
 | 
			
		||||
	network.ResourceManager,
 | 
			
		||||
	error,
 | 
			
		||||
) {
 | 
			
		||||
	defaultLimits := rcmgr.DefaultLimits
 | 
			
		||||
 | 
			
		||||
	libp2p.SetDefaultServiceLimits(&defaultLimits)
 | 
			
		||||
 | 
			
		||||
	defaultLimits.SystemBaseLimit.Memory = 1 << 28
 | 
			
		||||
	defaultLimits.SystemLimitIncrease.Memory = 1 << 28
 | 
			
		||||
	defaultLimitConfig := defaultLimits.AutoScale()
 | 
			
		||||
 | 
			
		||||
	changes := rcmgr.PartialLimitConfig{}
 | 
			
		||||
 | 
			
		||||
	if defaultLimitConfig.ToPartialLimitConfig().System.Memory > 2<<30 {
 | 
			
		||||
		changes.System.Memory = 2 << 30
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	maxconns := uint(highWatermark)
 | 
			
		||||
	if rcmgr.LimitVal(3*maxconns) > defaultLimitConfig.
 | 
			
		||||
		ToPartialLimitConfig().System.ConnsInbound {
 | 
			
		||||
		changes.System.ConnsInbound = rcmgr.LimitVal(1 << bits.Len(3*maxconns))
 | 
			
		||||
		changes.System.ConnsOutbound = rcmgr.LimitVal(1 << bits.Len(3*maxconns))
 | 
			
		||||
		changes.System.Conns = rcmgr.LimitVal(1 << bits.Len(6*maxconns))
 | 
			
		||||
		changes.System.StreamsInbound = rcmgr.LimitVal(1 << bits.Len(36*maxconns))
 | 
			
		||||
		changes.System.StreamsOutbound = rcmgr.LimitVal(1 << bits.Len(216*maxconns))
 | 
			
		||||
		changes.System.Streams = rcmgr.LimitVal(1 << bits.Len(216*maxconns))
 | 
			
		||||
 | 
			
		||||
		if rcmgr.LimitVal(3*maxconns) > defaultLimitConfig.
 | 
			
		||||
			ToPartialLimitConfig().System.FD {
 | 
			
		||||
			changes.System.FD = rcmgr.LimitVal(1 << bits.Len(3*maxconns))
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		changes.ServiceDefault.StreamsInbound = rcmgr.LimitVal(
 | 
			
		||||
			1 << bits.Len(12*maxconns),
 | 
			
		||||
		)
 | 
			
		||||
		changes.ServiceDefault.StreamsOutbound = rcmgr.LimitVal(
 | 
			
		||||
			1 << bits.Len(48*maxconns),
 | 
			
		||||
		)
 | 
			
		||||
		changes.ServiceDefault.Streams = rcmgr.LimitVal(1 << bits.Len(48*maxconns))
 | 
			
		||||
		changes.ProtocolDefault.StreamsInbound = rcmgr.LimitVal(
 | 
			
		||||
			1 << bits.Len(12*maxconns),
 | 
			
		||||
		)
 | 
			
		||||
		changes.ProtocolDefault.StreamsOutbound = rcmgr.LimitVal(
 | 
			
		||||
			1 << bits.Len(48*maxconns),
 | 
			
		||||
		)
 | 
			
		||||
		changes.ProtocolDefault.Streams = rcmgr.LimitVal(1 << bits.Len(48*maxconns))
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	changedLimitConfig := changes.Build(defaultLimitConfig)
 | 
			
		||||
 | 
			
		||||
	limiter := rcmgr.NewFixedLimiter(changedLimitConfig)
 | 
			
		||||
 | 
			
		||||
	str, err := rcmgr.NewStatsTraceReporter()
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, errors.Wrap(err, "resource manager")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	rcmgr.MustRegisterWith(prometheus.DefaultRegisterer)
 | 
			
		||||
 | 
			
		||||
	// Metrics
 | 
			
		||||
	opts := append(
 | 
			
		||||
		[]rcmgr.Option{},
 | 
			
		||||
		rcmgr.WithTraceReporter(str),
 | 
			
		||||
	)
 | 
			
		||||
 | 
			
		||||
	resolver := madns.DefaultResolver
 | 
			
		||||
	var bootstrapperMaddrs []ma.Multiaddr
 | 
			
		||||
	for _, pi := range bootstrappers {
 | 
			
		||||
		for _, addr := range pi.Addrs {
 | 
			
		||||
			resolved, err := resolver.Resolve(context.Background(), addr)
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				continue
 | 
			
		||||
			}
 | 
			
		||||
			bootstrapperMaddrs = append(bootstrapperMaddrs, resolved...)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	opts = append(opts, rcmgr.WithAllowlistedMultiaddrs(bootstrapperMaddrs))
 | 
			
		||||
 | 
			
		||||
	mgr, err := rcmgr.NewResourceManager(limiter, opts...)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, errors.Wrap(err, "resource manager")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return mgr, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BlossomSub) PublishToBitmask(bitmask []byte, data []byte) error {
 | 
			
		||||
	return b.ps.Publish(b.ctx, bitmask, data)
 | 
			
		||||
}
 | 
			
		||||
@ -303,26 +432,11 @@ func initDHT(
 | 
			
		||||
	logger *zap.Logger,
 | 
			
		||||
	h host.Host,
 | 
			
		||||
	isBootstrapPeer bool,
 | 
			
		||||
	bootstrappers []peer.AddrInfo,
 | 
			
		||||
) *dht.IpfsDHT {
 | 
			
		||||
	logger.Info("establishing dht")
 | 
			
		||||
	var kademliaDHT *dht.IpfsDHT
 | 
			
		||||
	var err error
 | 
			
		||||
	defaultBootstrapPeers := append([]string{}, p2pConfig.BootstrapPeers...)
 | 
			
		||||
 | 
			
		||||
	if p2pConfig.Network == 0 {
 | 
			
		||||
		defaultBootstrapPeers = config.BootstrapPeers
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	bootstrappers := []peer.AddrInfo{}
 | 
			
		||||
 | 
			
		||||
	for _, peerAddr := range defaultBootstrapPeers {
 | 
			
		||||
		peerinfo, err := peer.AddrInfoFromString(peerAddr)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			panic(err)
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		bootstrappers = append(bootstrappers, *peerinfo)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if isBootstrapPeer {
 | 
			
		||||
		kademliaDHT, err = dht.New(
 | 
			
		||||
 | 
			
		||||
		Loading…
	
		Reference in New Issue
	
	Block a user