From e16f35e99a919aaa04ef5ca60811c2ddd59f3e25 Mon Sep 17 00:00:00 2001 From: Andrew Gillis <11790789+gammazero@users.noreply.github.com> Date: Thu, 26 Feb 2026 12:40:50 -1000 Subject: [PATCH] refactor: apply go fix modernizers from Go 1.26 (#3463) Co-authored-by: sukun --- .golangci.yml | 4 ++ config/config.go | 2 +- core/discovery/options.go | 2 +- core/event/bus.go | 14 +++--- core/internal/catch/catch.go | 2 +- core/metrics/bandwidth_test.go | 16 +++---- core/network/network.go | 2 +- core/peer/addrinfo.go | 4 +- core/peer/peer.go | 4 +- core/peer/record_test.go | 2 +- core/peerstore/peerstore.go | 4 +- core/pnet/codec_test.go | 6 +-- core/record/record.go | 4 +- core/routing/options.go | 10 ++-- core/routing/query_serde.go | 2 +- core/routing/query_test.go | 2 +- core/test/addrs.go | 12 ++--- gologshim/gologshim.go | 2 +- options.go | 6 +-- p2p/discovery/backoff/backoff_test.go | 10 ++-- p2p/discovery/backoff/backoffcache_test.go | 6 +-- .../backoff/backoffconnector_test.go | 4 +- p2p/discovery/mdns/mdns.go | 2 +- p2p/discovery/mdns/mdns_test.go | 10 ++-- p2p/host/autonat/svc_test.go | 2 +- p2p/host/autorelay/autorelay_test.go | 31 +++++-------- p2p/host/basic/addrs_manager_test.go | 8 ++-- .../basic/addrs_reachability_tracker_test.go | 17 +++---- p2p/host/basic/basic_host_test.go | 8 +--- p2p/host/eventbus/basic.go | 38 +++++++-------- p2p/host/eventbus/basic_test.go | 42 ++++++++--------- p2p/host/eventbus/opts.go | 10 ++-- p2p/host/observedaddrs/manager.go | 5 +- p2p/host/observedaddrs/manager_test.go | 46 +++++++++---------- p2p/host/peerstore/metrics_test.go | 2 +- p2p/host/peerstore/pstoreds/metadata.go | 6 +-- p2p/host/peerstore/pstoreds/peerstore.go | 2 +- .../peerstore/pstoremem/addr_book_test.go | 26 +++++------ p2p/host/peerstore/pstoremem/inmem_test.go | 6 +-- p2p/host/peerstore/pstoremem/metadata.go | 10 ++-- p2p/host/peerstore/pstoremem/peerstore.go | 4 +- p2p/host/peerstore/test/addr_book_suite.go | 2 +- p2p/host/peerstore/test/benchmarks_suite.go | 6 +-- p2p/host/peerstore/test/keybook_suite.go | 4 +- p2p/host/peerstore/test/peerstore_suite.go | 24 +++++----- p2p/host/peerstore/test/utils.go | 18 +++----- p2p/host/resource-manager/allowlist_test.go | 2 +- .../resource-manager/conn_limiter_test.go | 2 +- p2p/host/resource-manager/error.go | 12 ++--- p2p/host/resource-manager/extapi.go | 5 +- p2p/host/resource-manager/limit_defaults.go | 33 ++++++------- p2p/host/resource-manager/rcmgr.go | 12 ++--- p2p/host/resource-manager/scope_test.go | 6 +-- p2p/host/resource-manager/trace.go | 8 ++-- p2p/http/auth/auth_test.go | 4 +- p2p/http/auth/internal/handshake/handshake.go | 8 ++-- p2p/http/libp2phttp.go | 5 +- p2p/metricshelper/pool_test.go | 2 +- p2p/muxer/testsuite/mux.go | 12 ++--- p2p/net/connmgr/bench_test.go | 2 +- p2p/net/connmgr/connmgr.go | 5 +- p2p/net/connmgr/connmgr_test.go | 40 ++++++++-------- p2p/net/mock/mock.go | 2 +- p2p/net/mock/mock_notif_test.go | 13 ++---- p2p/net/mock/mock_test.go | 8 ++-- p2p/net/nat/internal/nat/natpmp.go | 2 +- p2p/net/nat/internal/nat/upnp.go | 2 +- p2p/net/nat/nat_test.go | 20 ++++---- p2p/net/pnet/psk_conn_test.go | 2 +- p2p/net/reuseport/transport_test.go | 9 ++-- p2p/net/swarm/black_hole_detector_test.go | 20 ++++---- p2p/net/swarm/dial_ranker.go | 4 +- p2p/net/swarm/dial_sync_test.go | 6 +-- p2p/net/swarm/dial_test.go | 16 +++---- p2p/net/swarm/dial_worker_test.go | 38 +++++++-------- p2p/net/swarm/limiter_test.go | 10 ++-- p2p/net/swarm/peers_test.go | 2 +- p2p/net/swarm/resolve_test.go | 8 ++-- p2p/net/swarm/simul_test.go | 2 +- p2p/net/swarm/swarm_dial.go | 10 +--- p2p/net/swarm/swarm_dial_test.go | 2 +- p2p/net/swarm/swarm_event_test.go | 24 +++++----- p2p/net/swarm/swarm_listen.go | 7 +-- p2p/net/swarm/swarm_net_test.go | 4 +- p2p/net/swarm/swarm_notif_test.go | 8 ++-- p2p/net/swarm/swarm_test.go | 8 ++-- p2p/net/upgrader/listener_test.go | 10 ++-- p2p/protocol/autonatv2/autonat.go | 2 +- p2p/protocol/autonatv2/autonat_test.go | 7 +-- p2p/protocol/autonatv2/server_test.go | 24 +++++----- p2p/protocol/circuitv2/client/dial.go | 4 +- p2p/protocol/circuitv2/client/handlers.go | 2 +- .../circuitv2/relay/constraints_test.go | 8 ++-- p2p/protocol/circuitv2/relay/relay_test.go | 8 ++-- p2p/protocol/holepunch/tracer.go | 10 ++-- p2p/protocol/identify/id.go | 18 +++----- p2p/protocol/identify/id_test.go | 8 ++-- p2p/protocol/ping/ping_test.go | 2 +- p2p/security/noise/benchmark_test.go | 2 +- p2p/security/noise/rw.go | 5 +- p2p/security/noise/transport.go | 7 ++- p2p/security/tls/crypto.go | 2 +- p2p/test/backpressure/backpressure_test.go | 2 +- p2p/test/basichost/basic_host_test.go | 4 +- p2p/test/negotiation/muxer_test.go | 2 - p2p/test/negotiation/security_test.go | 1 - p2p/test/reconnects/reconnect_test.go | 6 +-- p2p/test/resource-manager/echo_test.go | 6 +-- p2p/test/resource-manager/rcmgr_test.go | 2 +- p2p/test/transport/transport_test.go | 10 ++-- p2p/transport/quic/conn_test.go | 2 +- p2p/transport/quic/transport.go | 7 ++- p2p/transport/quicreuse/connmgr_test.go | 12 ++--- p2p/transport/tcp/tcp_test.go | 6 +-- .../internal/sampledconn/sampledconn_test.go | 2 +- p2p/transport/tcpreuse/listener_test.go | 24 +++++----- p2p/transport/testsuite/stream_suite.go | 4 +- p2p/transport/testsuite/transport_suite.go | 6 +-- p2p/transport/testsuite/utils_suite.go | 2 +- p2p/transport/webrtc/hex.go | 2 +- p2p/transport/webrtc/logger.go | 10 ++-- p2p/transport/webrtc/stream_test.go | 14 +++--- p2p/transport/webrtc/stream_write.go | 5 +- p2p/transport/webrtc/transport.go | 2 +- p2p/transport/webrtc/transport_test.go | 39 ++++++++-------- p2p/transport/webrtc/udpmux/mux_test.go | 18 ++++---- p2p/transport/websocket/websocket_test.go | 6 +-- .../webtransport/cert_manager_test.go | 2 +- p2p/transport/webtransport/crypto_test.go | 6 +-- p2p/transport/webtransport/transport_test.go | 23 ++++------ x/rate/limiter_test.go | 6 +-- 131 files changed, 553 insertions(+), 631 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 13697f72b..0a277c7c9 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -6,6 +6,10 @@ run: issues: max-issues-per-linter: 0 max-same-issues: 0 + exclude-rules: + - path: _test\.go + linters: + - prealloc linters: enable: diff --git a/config/config.go b/config/config.go index 8e730b19e..602e17e88 100644 --- a/config/config.go +++ b/config/config.go @@ -74,7 +74,7 @@ type AutoNATConfig struct { type Security struct { ID protocol.ID - Constructor interface{} + Constructor any } // Config describes a set of settings for a libp2p node diff --git a/core/discovery/options.go b/core/discovery/options.go index 7b2830526..098597ec6 100644 --- a/core/discovery/options.go +++ b/core/discovery/options.go @@ -11,7 +11,7 @@ type Options struct { Limit int // Other (implementation-specific) options - Other map[interface{}]interface{} + Other map[any]any } // Apply applies the given options to this DiscoveryOpts diff --git a/core/event/bus.go b/core/event/bus.go index 1929f064d..1e2ec5bf4 100644 --- a/core/event/bus.go +++ b/core/event/bus.go @@ -6,17 +6,17 @@ import ( ) // SubscriptionOpt represents a subscriber option. Use the options exposed by the implementation of choice. -type SubscriptionOpt = func(interface{}) error +type SubscriptionOpt = func(any) error // EmitterOpt represents an emitter option. Use the options exposed by the implementation of choice. -type EmitterOpt = func(interface{}) error +type EmitterOpt = func(any) error // CancelFunc closes a subscriber. type CancelFunc = func() // wildcardSubscriptionType is a virtual type to represent wildcard // subscriptions. -type wildcardSubscriptionType interface{} +type wildcardSubscriptionType any // WildcardSubscription is the type to subscribe to receive all events // emitted in the eventbus. @@ -30,7 +30,7 @@ type Emitter interface { // calls to Emit will block. // // Calling this function with wrong event type will cause a panic. - Emit(evt interface{}) error + Emit(evt any) error } // Subscription represents a subscription to one or multiple event types. @@ -38,7 +38,7 @@ type Subscription interface { io.Closer // Out returns the channel from which to consume events. - Out() <-chan interface{} + Out() <-chan any // Name returns the name for the subscription Name() string @@ -79,7 +79,7 @@ type Bus interface { // [...] // } // } - Subscribe(eventType interface{}, opts ...SubscriptionOpt) (Subscription, error) + Subscribe(eventType any, opts ...SubscriptionOpt) (Subscription, error) // Emitter creates a new event emitter. // @@ -89,7 +89,7 @@ type Bus interface { // em, err := eventbus.Emitter(new(EventT)) // defer em.Close() // MUST call this after being done with the emitter // em.Emit(EventT{}) - Emitter(eventType interface{}, opts ...EmitterOpt) (Emitter, error) + Emitter(eventType any, opts ...EmitterOpt) (Emitter, error) // GetAllEventTypes returns all the event types that this bus knows about // (having emitters and subscribers). It omits the WildcardSubscription. diff --git a/core/internal/catch/catch.go b/core/internal/catch/catch.go index c61ee2aa3..ccde8fb71 100644 --- a/core/internal/catch/catch.go +++ b/core/internal/catch/catch.go @@ -10,7 +10,7 @@ import ( var panicWriter io.Writer = os.Stderr // HandlePanic handles and logs panics. -func HandlePanic(rerr interface{}, err *error, where string) { +func HandlePanic(rerr any, err *error, where string) { if rerr != nil { fmt.Fprintf(panicWriter, "caught panic: %s\n%s\n", rerr, debug.Stack()) *err = fmt.Errorf("panic in %s: %s", where, rerr) diff --git a/core/metrics/bandwidth_test.go b/core/metrics/bandwidth_test.go index 84c507ecc..3bb17c854 100644 --- a/core/metrics/bandwidth_test.go +++ b/core/metrics/bandwidth_test.go @@ -35,15 +35,15 @@ func round(bwc *BandwidthCounter, b *testing.B) { start := make(chan struct{}) var wg sync.WaitGroup wg.Add(10000) - for i := 0; i < 1000; i++ { + for i := range 1000 { p := peer.ID(fmt.Sprintf("peer-%d", i)) - for j := 0; j < 10; j++ { + for j := range 10 { proto := protocol.ID(fmt.Sprintf("bitswap-%d", j)) go func() { defer wg.Done() <-start - for i := 0; i < 1000; i++ { + for range 1000 { bwc.LogSentMessage(100) bwc.LogSentMessageStream(100, proto, p) time.Sleep(1 * time.Millisecond) @@ -60,10 +60,10 @@ func round(bwc *BandwidthCounter, b *testing.B) { func TestBandwidthCounter(t *testing.T) { bwc := NewBandwidthCounter() - for i := 0; i < 40; i++ { - for i := 0; i < 100; i++ { + for range 40 { + for i := range 100 { p := peer.ID(fmt.Sprintf("peer-%d", i)) - for j := 0; j < 2; j++ { + for j := range 2 { proto := protocol.ID(fmt.Sprintf("proto-%d", j)) // make sure the bandwidth counters are active @@ -81,7 +81,7 @@ func TestBandwidthCounter(t *testing.T) { assertProtocols := func(check func(Stats)) { byProtocol := bwc.GetBandwidthByProtocol() require.Len(t, byProtocol, 2, "expected 2 protocols") - for i := 0; i < 2; i++ { + for i := range 2 { p := protocol.ID(fmt.Sprintf("proto-%d", i)) for _, stats := range [...]Stats{bwc.GetBandwidthForProtocol(p), byProtocol[p]} { check(stats) @@ -92,7 +92,7 @@ func TestBandwidthCounter(t *testing.T) { assertPeers := func(check func(Stats)) { byPeer := bwc.GetBandwidthByPeer() require.Len(t, byPeer, 100, "expected 100 peers") - for i := 0; i < 100; i++ { + for i := range 100 { p := peer.ID(fmt.Sprintf("peer-%d", i)) for _, stats := range [...]Stats{bwc.GetBandwidthForPeer(p), byPeer[p]} { check(stats) diff --git a/core/network/network.go b/core/network/network.go index 7c0ad949e..eeab32c6b 100644 --- a/core/network/network.go +++ b/core/network/network.go @@ -123,7 +123,7 @@ type Stats struct { // relay. Limited bool // Extra stores additional metadata about this connection. - Extra map[interface{}]interface{} + Extra map[any]any } // StreamHandler is the type of function used to listen for diff --git a/core/peer/addrinfo.go b/core/peer/addrinfo.go index 0c0d34fda..a4f88af54 100644 --- a/core/peer/addrinfo.go +++ b/core/peer/addrinfo.go @@ -118,8 +118,8 @@ func AddrInfoToP2pAddrs(pi *AddrInfo) ([]ma.Multiaddr, error) { return addrs, nil } -func (pi *AddrInfo) Loggable() map[string]interface{} { - return map[string]interface{}{ +func (pi *AddrInfo) Loggable() map[string]any { + return map[string]any{ "peerID": pi.ID.String(), "addrs": pi.Addrs, } diff --git a/core/peer/peer.go b/core/peer/peer.go index ddea302e6..392cbc2ca 100644 --- a/core/peer/peer.go +++ b/core/peer/peer.go @@ -42,8 +42,8 @@ const maxInlineKeyLength = 42 type ID string // Loggable returns a pretty peer ID string in loggable JSON format. -func (id ID) Loggable() map[string]interface{} { - return map[string]interface{}{ +func (id ID) Loggable() map[string]any { + return map[string]any{ "peerID": id.String(), } } diff --git a/core/peer/record_test.go b/core/peer/record_test.go index 4ac8ffba2..b06f11340 100644 --- a/core/peer/record_test.go +++ b/core/peer/record_test.go @@ -57,7 +57,7 @@ func TestSignedPeerRecordFromEnvelope(t *testing.T) { // low clock precision. This makes sure we never get a duplicate. func TestTimestampSeq(t *testing.T) { var last uint64 - for i := 0; i < 1000; i++ { + for range 1000 { next := TimestampSeq() if next <= last { t.Errorf("non-increasing timestamp found: %d <= %d", next, last) diff --git a/core/peerstore/peerstore.go b/core/peerstore/peerstore.go index 6366026c9..70d51e32e 100644 --- a/core/peerstore/peerstore.go +++ b/core/peerstore/peerstore.go @@ -83,8 +83,8 @@ type PeerMetadata interface { // Get / Put is a simple registry for other peer-related key/value pairs. // If we find something we use often, it should become its own set of // methods. This is a last resort. - Get(p peer.ID, key string) (interface{}, error) - Put(p peer.ID, key string, val interface{}) error + Get(p peer.ID, key string) (any, error) + Put(p peer.ID, key string, val any) error // RemovePeer removes all values stored for a peer. RemovePeer(peer.ID) diff --git a/core/pnet/codec_test.go b/core/pnet/codec_test.go index b4b9272d0..2eb71b662 100644 --- a/core/pnet/codec_test.go +++ b/core/pnet/codec_test.go @@ -43,7 +43,7 @@ func testDecodeBad(t *testing.T, windows bool) { func testDecodeHex(t *testing.T, windows bool) { b := bufWithBase("/base16/", windows) - for i := 0; i < 32; i++ { + for range 32 { b.WriteString("FF") } @@ -67,7 +67,7 @@ func TestDecodeB64(t *testing.T) { func testDecodeB64(t *testing.T, windows bool) { b := bufWithBase("/base64/", windows) key := make([]byte, 32) - for i := 0; i < 32; i++ { + for i := range 32 { key[i] = byte(i) } @@ -102,7 +102,7 @@ func TestDecodeBin(t *testing.T) { func testDecodeBin(t *testing.T, windows bool) { b := bufWithBase("/bin/", windows) key := make([]byte, 32) - for i := 0; i < 32; i++ { + for i := range 32 { key[i] = byte(i) } diff --git a/core/record/record.go b/core/record/record.go index 9b98f04f5..eace797c6 100644 --- a/core/record/record.go +++ b/core/record/record.go @@ -96,9 +96,9 @@ func blankRecordForPayloadType(payloadType []byte) (Record, error) { return asRecord, nil } -func getValueType(i interface{}) reflect.Type { +func getValueType(i any) reflect.Type { valueType := reflect.TypeOf(i) - if valueType.Kind() == reflect.Ptr { + if valueType.Kind() == reflect.Pointer { valueType = valueType.Elem() } return valueType diff --git a/core/routing/options.go b/core/routing/options.go index 4b235cbfc..8e74783f6 100644 --- a/core/routing/options.go +++ b/core/routing/options.go @@ -1,5 +1,7 @@ package routing +import "maps" + // Option is a single routing option. type Option func(opts *Options) error @@ -9,7 +11,7 @@ type Options struct { Expired bool Offline bool // Other (ValueStore implementation specific) options. - Other map[interface{}]interface{} + Other map[any]any } // Apply applies the given options to this Options @@ -27,10 +29,8 @@ func (opts *Options) ToOption() Option { return func(nopts *Options) error { *nopts = *opts if opts.Other != nil { - nopts.Other = make(map[interface{}]interface{}, len(opts.Other)) - for k, v := range opts.Other { - nopts.Other[k] = v - } + nopts.Other = make(map[any]any, len(opts.Other)) + maps.Copy(nopts.Other, opts.Other) } return nil } diff --git a/core/routing/query_serde.go b/core/routing/query_serde.go index 6b566e0ca..426f8bd01 100644 --- a/core/routing/query_serde.go +++ b/core/routing/query_serde.go @@ -7,7 +7,7 @@ import ( ) func (qe *QueryEvent) MarshalJSON() ([]byte, error) { - return json.Marshal(map[string]interface{}{ + return json.Marshal(map[string]any{ "ID": qe.ID.String(), "Type": int(qe.Type), "Responses": qe.Responses, diff --git a/core/routing/query_test.go b/core/routing/query_test.go index 15b4846db..6caeaeda8 100644 --- a/core/routing/query_test.go +++ b/core/routing/query_test.go @@ -15,7 +15,7 @@ func TestEventsCancel(t *testing.T) { wg.Add(2) go func() { defer wg.Done() - for i := 0; i < 100; i++ { + for i := range 100 { PublishQueryEvent(ctx, &QueryEvent{Extra: fmt.Sprint(i)}) } close(goch) diff --git a/core/test/addrs.go b/core/test/addrs.go index e18849c48..f8d6451d8 100644 --- a/core/test/addrs.go +++ b/core/test/addrs.go @@ -2,6 +2,7 @@ package test import ( "fmt" + "slices" "testing" ma "github.com/multiformats/go-multiaddr" @@ -9,7 +10,7 @@ import ( func GenerateTestAddrs(n int) []ma.Multiaddr { out := make([]ma.Multiaddr, n) - for i := 0; i < n; i++ { + for i := range n { a, err := ma.NewMultiaddr(fmt.Sprintf("/ip4/1.2.3.4/tcp/%d", i)) if err != nil { continue @@ -26,14 +27,7 @@ func AssertAddressesEqual(t *testing.T, exp, act []ma.Multiaddr) { } for _, a := range exp { - found := false - - for _, b := range act { - if a.Equal(b) { - found = true - break - } - } + found := slices.ContainsFunc(act, a.Equal) if !found { t.Fatalf("expected address %s not found", a) diff --git a/gologshim/gologshim.go b/gologshim/gologshim.go index 57ed3e8aa..ba4362e0a 100644 --- a/gologshim/gologshim.go +++ b/gologshim/gologshim.go @@ -220,7 +220,7 @@ func parseIPFSGoLogEnv(loggingLevelEnvStr string) (slog.Level, map[string]slog.L fallbackLvl := slog.LevelError var systemToLevel map[string]slog.Level if loggingLevelEnvStr != "" { - for _, kvs := range strings.Split(loggingLevelEnvStr, ",") { + for kvs := range strings.SplitSeq(loggingLevelEnvStr, ",") { kv := strings.SplitN(kvs, "=", 2) var lvl slog.Level err := lvl.UnmarshalText([]byte(kv[len(kv)-1])) diff --git a/options.go b/options.go index 0329b7e60..6bf0e2cc2 100644 --- a/options.go +++ b/options.go @@ -70,7 +70,7 @@ func ListenAddrs(addrs ...ma.Multiaddr) Option { // * Host // * Network // * Peerstore -func Security(name string, constructor interface{}) Option { +func Security(name string, constructor any) Option { return func(cfg *Config) error { if cfg.Insecure { return fmt.Errorf("cannot use security transports with an insecure libp2p configuration") @@ -99,7 +99,7 @@ func Muxer(name string, muxer network.Multiplexer) Option { } } -func QUICReuse(constructor interface{}, opts ...quicreuse.Option) Option { +func QUICReuse(constructor any, opts ...quicreuse.Option) Option { return func(cfg *Config) error { tag := `group:"quicreuseopts"` typ := reflect.ValueOf(constructor).Type() @@ -141,7 +141,7 @@ func QUICReuse(constructor interface{}, opts ...quicreuse.Option) Option { // * Public Key // * Address filter (filter.Filter) // * Peerstore -func Transport(constructor interface{}, opts ...interface{}) Option { +func Transport(constructor any, opts ...any) Option { return func(cfg *Config) error { // generate a random identifier, so that fx can associate the constructor with its options b := make([]byte, 8) diff --git a/p2p/discovery/backoff/backoff_test.go b/p2p/discovery/backoff/backoff_test.go index e31ea1c80..d68b85504 100644 --- a/p2p/discovery/backoff/backoff_test.go +++ b/p2p/discovery/backoff/backoff_test.go @@ -89,7 +89,7 @@ func minMaxJitterTest(jitter Jitter, t *testing.T) { func TestNoJitter(t *testing.T) { minMaxJitterTest(NoJitter, t) - for i := 0; i < 10; i++ { + for i := range 10 { expected := time.Second * time.Duration(i) if calculated := NoJitter(expected, time.Duration(0), time.Second*100, nil); calculated != expected { t.Fatalf("expected %v, got %v", expected, calculated) @@ -106,7 +106,7 @@ func TestFullJitter(t *testing.T) { histogram := make([]int, numBuckets) - for i := 0; i < (numBuckets-1)*multiplier; i++ { + for range (numBuckets - 1) * multiplier { started := time.Nanosecond * 50 calculated := FullJitter(started, 0, 100, rng) histogram[calculated]++ @@ -148,7 +148,7 @@ func testManyBackoffFactoryHelper(concurrent int, bkf BackoffFactory) { backoffCh := make(chan BackoffStrategy, concurrent) errGrp := errgroup.Group{} - for i := 0; i < concurrent; i++ { + for range concurrent { errGrp.Go(func() (err error) { defer func() { if r := recover(); r != nil { @@ -174,8 +174,8 @@ func testManyBackoffFactoryHelper(concurrent int, bkf BackoffFactory) { } }() - for i := 0; i < 5; i++ { - for j := 0; j < 10; j++ { + for range 5 { + for range 10 { backoff.Delay() } backoff.Reset() diff --git a/p2p/discovery/backoff/backoffcache_test.go b/p2p/discovery/backoff/backoffcache_test.go index 7f80eb87c..95501dddb 100644 --- a/p2p/discovery/backoff/backoffcache_test.go +++ b/p2p/discovery/backoff/backoffcache_test.go @@ -202,7 +202,7 @@ func TestBackoffDiscoverySimultaneousQuery(t *testing.T) { n := 40 advertisers := make([]discovery.Discovery, n) - for i := 0; i < n; i++ { + for i := range n { h := bhost.NewBlankHost(swarmt.GenSwarm(t)) defer h.Close() advertisers[i] = mocks.NewDiscoveryClient(h, discServer) @@ -263,7 +263,7 @@ func TestBackoffDiscoveryCacheCapacity(t *testing.T) { n := 40 advertisers := make([]discovery.Discovery, n) - for i := 0; i < n; i++ { + for i := range n { h := bhost.NewBlankHost(swarmt.GenSwarm(t)) defer h.Close() advertisers[i] = mocks.NewDiscoveryClient(h, discServer) @@ -283,7 +283,7 @@ func TestBackoffDiscoveryCacheCapacity(t *testing.T) { const ns = "test" // add speers - for i := 0; i < n; i++ { + for i := range n { advertisers[i].Advertise(ctx, ns, discovery.TTL(time.Hour)) } // Advance clock by one step diff --git a/p2p/discovery/backoff/backoffconnector_test.go b/p2p/discovery/backoff/backoffconnector_test.go index e95796c3b..b23588272 100644 --- a/p2p/discovery/backoff/backoffconnector_test.go +++ b/p2p/discovery/backoff/backoffconnector_test.go @@ -40,9 +40,9 @@ func (h *maxDialHost) Connect(ctx context.Context, ai peer.AddrInfo) error { } func getNetHosts(t *testing.T, n int) []host.Host { - var out []host.Host + out := make([]host.Host, 0, n) - for i := 0; i < n; i++ { + for range n { netw := swarmt.GenSwarm(t) h := bhost.NewBlankHost(netw) t.Cleanup(func() { h.Close() }) diff --git a/p2p/discovery/mdns/mdns.go b/p2p/discovery/mdns/mdns.go index 5c2692893..e22e6e696 100644 --- a/p2p/discovery/mdns/mdns.go +++ b/p2p/discovery/mdns/mdns.go @@ -257,7 +257,7 @@ func (s *mdnsService) startResolver(ctx context.Context) { func randomString(l int) string { const alphabet = "abcdefghijklmnopqrstuvwxyz0123456789" s := make([]byte, 0, l) - for i := 0; i < l; i++ { + for range l { s = append(s, alphabet[rand.Intn(len(alphabet))]) } return string(s) diff --git a/p2p/discovery/mdns/mdns_test.go b/p2p/discovery/mdns/mdns_test.go index cfef3cc2e..7a59db6a6 100644 --- a/p2p/discovery/mdns/mdns_test.go +++ b/p2p/discovery/mdns/mdns_test.go @@ -3,6 +3,7 @@ package mdns import ( "os" "runtime" + "slices" "sync" "testing" "time" @@ -58,7 +59,7 @@ func TestOtherDiscovery(t *testing.T) { notifs := make([]*notif, n) hostIDs := make([]peer.ID, n) - for i := 0; i < n; i++ { + for i := range n { notif := ¬if{} notifs[i] = notif hostIDs[i] = setupMDNS(t, notif) @@ -70,11 +71,8 @@ func TestOtherDiscovery(t *testing.T) { if currentHostID == id { continue } - for _, i := range ids { - if id == i { - found = true - break - } + if slices.Contains(ids, id) { + found = true } if !found { return false diff --git a/p2p/host/autonat/svc_test.go b/p2p/host/autonat/svc_test.go index 8e1242aff..098f79e37 100644 --- a/p2p/host/autonat/svc_test.go +++ b/p2p/host/autonat/svc_test.go @@ -140,7 +140,7 @@ func TestAutoNATServiceGlobalLimiter(t *testing.T) { hs := c.host - for i := 0; i < 5; i++ { + for range 5 { hc, ac := makeAutoNATClient(t) connect(t, hs, hc) diff --git a/p2p/host/autorelay/autorelay_test.go b/p2p/host/autorelay/autorelay_test.go index 3a280ac38..46a2a4935 100644 --- a/p2p/host/autorelay/autorelay_test.go +++ b/p2p/host/autorelay/autorelay_test.go @@ -110,12 +110,7 @@ func newRelay(t *testing.T) host.Host { ) require.NoError(t, err) require.Eventually(t, func() bool { - for _, p := range h.Mux().Protocols() { - if p == protoIDv2 { - return true - } - } - return false + return slices.Contains(h.Mux().Protocols(), protoIDv2) }, time.Second, 10*time.Millisecond) return h } @@ -150,7 +145,7 @@ func TestSingleRelay(t *testing.T) { const numCandidates = 3 var called bool peerChan := make(chan peer.AddrInfo, numCandidates) - for i := 0; i < numCandidates; i++ { + for range numCandidates { r := newRelay(t) t.Cleanup(func() { r.Close() }) peerChan <- peer.AddrInfo{ID: r.ID(), Addrs: r.Addrs()} @@ -260,7 +255,7 @@ func TestBackoff(t *testing.T) { }, 2*time.Second, 100*time.Millisecond, "counter load should be 2") // make sure we don't add any relays yet - for i := 0; i < 2; i++ { + for range 2 { cl.AdvanceBy(backoff / 3) require.Equal(t, 1, int(reservations.Load())) } @@ -274,8 +269,8 @@ func TestBackoff(t *testing.T) { func TestStaticRelays(t *testing.T) { const numStaticRelays = 3 - var staticRelays []peer.AddrInfo - for i := 0; i < numStaticRelays; i++ { + staticRelays := make([]peer.AddrInfo, 0, numStaticRelays) + for range numStaticRelays { r := newRelay(t) t.Cleanup(func() { r.Close() }) staticRelays = append(staticRelays, peer.AddrInfo{ID: r.ID(), Addrs: r.Addrs()}) @@ -294,7 +289,7 @@ func TestConnectOnDisconnect(t *testing.T) { const num = 3 peerChan := make(chan peer.AddrInfo, num) relays := make([]host.Host, 0, num) - for i := 0; i < 3; i++ { + for range 3 { r := newRelay(t) t.Cleanup(func() { r.Close() }) peerChan <- peer.AddrInfo{ID: r.ID(), Addrs: r.Addrs()} @@ -322,7 +317,7 @@ func TestConnectOnDisconnect(t *testing.T) { } require.EventuallyWithT(t, func(collect *assert.CollectT) { relaysInUse = usedRelays(h) - assert.Len(collect, relaysInUse, 1) + require.Len(collect, relaysInUse, 1) assert.NotEqualf(collect, oldRelay, relaysInUse[0], "old relay should not be used again") }, 10*time.Second, 100*time.Millisecond) } @@ -335,7 +330,7 @@ func TestMaxAge(t *testing.T) { peerChan2 := make(chan peer.AddrInfo, num) relays1 := make([]host.Host, 0, num) relays2 := make([]host.Host, 0, num) - for i := 0; i < num; i++ { + for range num { r1 := newRelay(t) t.Cleanup(func() { r1.Close() }) peerChan1 <- peer.AddrInfo{ID: r1.ID(), Addrs: r1.Addrs()} @@ -415,10 +410,8 @@ func TestMaxAge(t *testing.T) { } require.Eventually(t, func() bool { - for _, id := range ids { - if id == relays[0] { - return true - } + if slices.Contains(ids, relays[0]) { + return true } fmt.Println("waiting for", ids, "to contain", relays[0]) return false @@ -428,10 +421,10 @@ func TestMaxAge(t *testing.T) { func TestReconnectToStaticRelays(t *testing.T) { cl := newMockClock() - var staticRelays []peer.AddrInfo const numStaticRelays = 1 + staticRelays := make([]peer.AddrInfo, 0, numStaticRelays) relays := make([]host.Host, 0, numStaticRelays) - for i := 0; i < numStaticRelays; i++ { + for range numStaticRelays { r := newRelay(t) t.Cleanup(func() { r.Close() }) relays = append(relays, r) diff --git a/p2p/host/basic/addrs_manager_test.go b/p2p/host/basic/addrs_manager_test.go index 618332789..67f7c74fb 100644 --- a/p2p/host/basic/addrs_manager_test.go +++ b/p2p/host/basic/addrs_manager_test.go @@ -489,8 +489,8 @@ func TestAddrsManagerPeerstoreUpdated(t *testing.T) { } func TestRemoveIfNotInSource(t *testing.T) { - var addrs []ma.Multiaddr - for i := 0; i < 10; i++ { + addrs := make([]ma.Multiaddr, 0, 10) + for i := range 10 { addrs = append(addrs, ma.StringCast(fmt.Sprintf("/ip4/1.2.3.4/tcp/%d", i))) } slices.SortFunc(addrs, func(a, b ma.Multiaddr) int { return a.Compare(b) }) @@ -517,7 +517,7 @@ func TestRemoveIfNotInSource(t *testing.T) { func BenchmarkAreAddrsDifferent(b *testing.B) { var addrs [10]ma.Multiaddr - for i := 0; i < len(addrs); i++ { + for i := range len(addrs) { addrs[i] = ma.StringCast(fmt.Sprintf("/ip4/1.1.1.%d/tcp/1", i)) } b.Run("areAddrsDifferent", func(b *testing.B) { @@ -531,7 +531,7 @@ func BenchmarkAreAddrsDifferent(b *testing.B) { func BenchmarkRemoveIfNotInSource(b *testing.B) { var addrs [10]ma.Multiaddr - for i := 0; i < len(addrs); i++ { + for i := range len(addrs) { addrs[i] = ma.StringCast(fmt.Sprintf("/ip4/1.1.1.%d/tcp/1", i)) } b.ReportAllocs() diff --git a/p2p/host/basic/addrs_reachability_tracker_test.go b/p2p/host/basic/addrs_reachability_tracker_test.go index c6a69f079..5e88e6c48 100644 --- a/p2p/host/basic/addrs_reachability_tracker_test.go +++ b/p2p/host/basic/addrs_reachability_tracker_test.go @@ -116,8 +116,8 @@ func TestProbeManager(t *testing.T) { t.Run("successes", func(t *testing.T) { pm := makeNewProbeManager([]ma.Multiaddr{pub1, pub2}) - for j := 0; j < 2; j++ { - for i := 0; i < targetConfidence; i++ { + for range 2 { + for range targetConfidence { reqs := nextProbe(pm) pm.CompleteProbe(reqs, autonatv2.Result{Addr: reqs[0].Addr, Idx: 0, Reachability: network.ReachabilityPublic}, nil) } @@ -396,7 +396,7 @@ func TestAddrsReachabilityTracker(t *testing.T) { } tr := newTracker(mockClient, nil) var addrs []ma.Multiaddr - for i := 0; i < 10; i++ { + for i := range 10 { addrs = append(addrs, ma.StringCast(fmt.Sprintf("/ip4/1.1.1.1/tcp/%d", i))) } slices.SortFunc(addrs, func(a, b ma.Multiaddr) int { return -a.Compare(b) }) // sort in reverse order @@ -460,7 +460,7 @@ func TestAddrsReachabilityTracker(t *testing.T) { require.True(t, drainNotify()) // check that we did receive probes backoffInterval := backoffStartInterval - for i := 0; i < 4; i++ { + for range 4 { drainNotify() cl.Add(backoffInterval / 2) select { @@ -512,7 +512,7 @@ func TestAddrsReachabilityTracker(t *testing.T) { tr.UpdateAddrs([]ma.Multiaddr{pub1}) assertFirstEvent(t, tr, []ma.Multiaddr{pub1}) - for i := 0; i < minConfidence; i++ { + for range minConfidence { select { case <-notify: case <-time.After(1 * time.Second): @@ -677,7 +677,7 @@ func TestRefreshReachability(t *testing.T) { time.Sleep(50 * time.Millisecond) // wait for the cancellation to be processed outer: - for i := 0; i < defaultMaxConcurrency; i++ { + for range defaultMaxConcurrency { select { case <-block: default: @@ -991,10 +991,7 @@ func FuzzAddrsReachabilityTracker(f *testing.F) { } ips = ips[1:] var x, y int64 - split := 128 / 8 - if len(ips) < split { - split = len(ips) - } + split := min(len(ips), 128/8) var b [8]byte copy(b[:], ips[:split]) x = int64(binary.LittleEndian.Uint64(b[:])) diff --git a/p2p/host/basic/basic_host_test.go b/p2p/host/basic/basic_host_test.go index 2ecd22f10..4fadb684a 100644 --- a/p2p/host/basic/basic_host_test.go +++ b/p2p/host/basic/basic_host_test.go @@ -6,6 +6,7 @@ import ( "fmt" "io" "reflect" + "slices" "strings" "sync" "testing" @@ -442,12 +443,7 @@ func TestHostProtoPreknowledge(t *testing.T) { require.Never(t, func() bool { protos, err := h1.Peerstore().GetProtocols(h2.ID()) require.NoError(t, err) - for _, p := range protos { - if p == "/foo" { - return true - } - } - return false + return slices.Contains(protos, "/foo") }, time.Second, 100*time.Millisecond) s, err := h1.NewStream(context.Background(), h2.ID(), "/foo", "/bar", "/super") diff --git a/p2p/host/eventbus/basic.go b/p2p/host/eventbus/basic.go index 6227848c8..a9db7b1e9 100644 --- a/p2p/host/eventbus/basic.go +++ b/p2p/host/eventbus/basic.go @@ -38,7 +38,7 @@ type emitter struct { metricsTracer MetricsTracer } -func (e *emitter) Emit(evt interface{}) error { +func (e *emitter) Emit(evt any) error { if e.closed.Load() { return fmt.Errorf("emitter is closed") } @@ -118,14 +118,14 @@ func (b *basicBus) tryDropNode(typ reflect.Type) { } type wildcardSub struct { - ch chan interface{} + ch chan any w *wildcardNode metricsTracer MetricsTracer name string closeOnce sync.Once } -func (w *wildcardSub) Out() <-chan interface{} { +func (w *wildcardSub) Out() <-chan any { return w.ch } @@ -146,11 +146,11 @@ func (w *wildcardSub) Name() string { type namedSink struct { name string - ch chan interface{} + ch chan any } type sub struct { - ch chan interface{} + ch chan any nodes []*node dropper func(reflect.Type) metricsTracer MetricsTracer @@ -162,7 +162,7 @@ func (s *sub) Name() string { return s.name } -func (s *sub) Out() <-chan interface{} { +func (s *sub) Out() <-chan any { return s.ch } @@ -207,7 +207,7 @@ var _ event.Subscription = (*sub)(nil) // Subscribe creates new subscription. Failing to drain the channel will cause // publishers to get blocked. CancelFunc is guaranteed to return after last send // to the channel -func (b *basicBus) Subscribe(evtTypes interface{}, opts ...event.SubscriptionOpt) (_ event.Subscription, err error) { +func (b *basicBus) Subscribe(evtTypes any, opts ...event.SubscriptionOpt) (_ event.Subscription, err error) { settings := newSubSettings() for _, opt := range opts { if err := opt(&settings); err != nil { @@ -217,7 +217,7 @@ func (b *basicBus) Subscribe(evtTypes interface{}, opts ...event.SubscriptionOpt if evtTypes == event.WildcardSubscription { out := &wildcardSub{ - ch: make(chan interface{}, settings.buffer), + ch: make(chan any, settings.buffer), w: b.wildcard, metricsTracer: b.metricsTracer, name: settings.name, @@ -226,9 +226,9 @@ func (b *basicBus) Subscribe(evtTypes interface{}, opts ...event.SubscriptionOpt return out, nil } - types, ok := evtTypes.([]interface{}) + types, ok := evtTypes.([]any) if !ok { - types = []interface{}{evtTypes} + types = []any{evtTypes} } if len(types) > 1 { @@ -240,7 +240,7 @@ func (b *basicBus) Subscribe(evtTypes interface{}, opts ...event.SubscriptionOpt } out := &sub{ - ch: make(chan interface{}, settings.buffer), + ch: make(chan any, settings.buffer), nodes: make([]*node, len(types)), dropper: b.tryDropNode, @@ -249,7 +249,7 @@ func (b *basicBus) Subscribe(evtTypes interface{}, opts ...event.SubscriptionOpt } for _, etyp := range types { - if reflect.TypeOf(etyp).Kind() != reflect.Ptr { + if reflect.TypeOf(etyp).Kind() != reflect.Pointer { return nil, errors.New("subscribe called with non-pointer type") } } @@ -287,7 +287,7 @@ func (b *basicBus) Subscribe(evtTypes interface{}, opts ...event.SubscriptionOpt // defer emit.Close() // MUST call this after being done with the emitter // // emit(EventT{}) -func (b *basicBus) Emitter(evtType interface{}, opts ...event.EmitterOpt) (e event.Emitter, err error) { +func (b *basicBus) Emitter(evtType any, opts ...event.EmitterOpt) (e event.Emitter, err error) { if evtType == event.WildcardSubscription { return nil, fmt.Errorf("illegal emitter for wildcard subscription") } @@ -300,7 +300,7 @@ func (b *basicBus) Emitter(evtType interface{}, opts ...event.EmitterOpt) (e eve } typ := reflect.TypeOf(evtType) - if typ.Kind() != reflect.Ptr { + if typ.Kind() != reflect.Pointer { return nil, errors.New("emitter called with non-pointer type") } typ = typ.Elem() @@ -349,7 +349,7 @@ func (n *wildcardNode) addSink(sink *namedSink) { } } -func (n *wildcardNode) removeSink(ch chan interface{}) { +func (n *wildcardNode) removeSink(ch chan any) { go func() { // drain the event channel, will return when closed and drained. // this is necessary to unblock publishes to this channel. @@ -370,7 +370,7 @@ func (n *wildcardNode) removeSink(ch chan interface{}) { var wildcardType = reflect.TypeOf(event.WildcardSubscription) -func (n *wildcardNode) emit(evt interface{}) { +func (n *wildcardNode) emit(evt any) { if n.nSinks.Load() == 0 { return } @@ -406,7 +406,7 @@ type node struct { nEmitters atomic.Int32 keepLast bool - last interface{} + last any sinks []*namedSink metricsTracer MetricsTracer @@ -421,7 +421,7 @@ func newNode(typ reflect.Type, metricsTracer MetricsTracer) *node { } } -func (n *node) emit(evt interface{}) { +func (n *node) emit(evt any) { typ := reflect.TypeOf(evt) if typ != n.typ { panic(fmt.Sprintf("Emit called with wrong type. expected: %s, got: %s", n.typ, typ)) @@ -446,7 +446,7 @@ func (n *node) emit(evt interface{}) { n.lk.Unlock() } -func emitAndLogError(timer *time.Timer, typ reflect.Type, evt interface{}, sink *namedSink) *time.Timer { +func emitAndLogError(timer *time.Timer, typ reflect.Type, evt any, sink *namedSink) *time.Timer { // Slow consumer. Log a warning if stalled for the timeout if timer == nil { timer = time.NewTimer(slowConsumerWarningTimeout) diff --git a/p2p/host/eventbus/basic_test.go b/p2p/host/eventbus/basic_test.go index defa63032..b36c65210 100644 --- a/p2p/host/eventbus/basic_test.go +++ b/p2p/host/eventbus/basic_test.go @@ -245,7 +245,7 @@ func TestClosingRaces(t *testing.T) { b := NewBus() - for i := 0; i < subs; i++ { + for range subs { go func() { lk.RLock() defer lk.RUnlock() @@ -257,7 +257,7 @@ func TestClosingRaces(t *testing.T) { wg.Done() }() } - for i := 0; i < emits; i++ { + for range emits { go func() { lk.RLock() defer lk.RUnlock() @@ -291,7 +291,7 @@ func TestSubMany(t *testing.T) { wait.Add(n) ready.Add(n) - for i := 0; i < n; i++ { + for range n { go func() { sub, err := bus.Subscribe(new(EventB)) if err != nil { @@ -340,7 +340,7 @@ func TestWildcardSubscription(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() - var evts []interface{} + var evts []any LOOP: for { @@ -366,8 +366,8 @@ LOOP: func TestManyWildcardSubscriptions(t *testing.T) { bus := NewBus() - var subs []event.Subscription - for i := 0; i < 10; i++ { + subs := make([]event.Subscription, 0, 10) + for range 10 { sub, err := bus.Subscribe(event.WildcardSubscription) require.NoError(t, err) subs = append(subs, sub) @@ -423,7 +423,7 @@ func TestManyWildcardSubscriptions(t *testing.T) { func TestWildcardValidations(t *testing.T) { bus := NewBus() - _, err := bus.Subscribe([]interface{}{event.WildcardSubscription, new(EventA), new(EventB)}) + _, err := bus.Subscribe([]any{event.WildcardSubscription, new(EventA), new(EventB)}) require.Error(t, err) _, err = bus.Emitter(event.WildcardSubscription) @@ -432,7 +432,7 @@ func TestWildcardValidations(t *testing.T) { func TestSubType(t *testing.T) { bus := NewBus() - sub, err := bus.Subscribe([]interface{}{new(EventA), new(EventB)}) + sub, err := bus.Subscribe([]any{new(EventA), new(EventB)}) if err != nil { t.Fatal(err) } @@ -555,7 +555,7 @@ func TestSubFailFully(t *testing.T) { t.Fatal(err) } - _, err = bus.Subscribe([]interface{}{new(EventB), 5}) + _, err = bus.Subscribe([]any{new(EventB), 5}) if err == nil || err.Error() != "subscribe called with non-pointer type" { t.Fatal(err) } @@ -576,7 +576,7 @@ func TestSubFailFully(t *testing.T) { func TestSubCloseMultiple(t *testing.T) { bus := NewBus() - sub, err := bus.Subscribe([]interface{}{new(EventB)}) + sub, err := bus.Subscribe([]any{new(EventB)}) require.NoError(t, err) err = sub.Close() require.NoError(t, err) @@ -598,7 +598,7 @@ func testMany(t testing.TB, subs, emits, msgs int, stateful bool) { wait.Add(subs + emits) ready.Add(subs) - for i := 0; i < subs; i++ { + for range subs { go func() { sub, err := bus.Subscribe(new(EventB)) if err != nil { @@ -618,9 +618,9 @@ func testMany(t testing.TB, subs, emits, msgs int, stateful bool) { }() } - for i := 0; i < emits; i++ { + for range emits { go func() { - em, err := bus.Emitter(new(EventB), func(settings interface{}) error { + em, err := bus.Emitter(new(EventB), func(settings any) error { settings.(*emitterSettings).makeStateful = stateful return nil }) @@ -631,7 +631,7 @@ func testMany(t testing.TB, subs, emits, msgs int, stateful bool) { ready.Wait() - for i := 0; i < msgs; i++ { + for range msgs { em.Emit(EventB(97)) } @@ -662,7 +662,7 @@ func (bc benchCase) name() string { func genTestCases() []benchCase { ret := make([]benchCase, 0, 200) - for stateful := 0; stateful < 2; stateful++ { + for stateful := range 2 { for subs := uint(0); subs <= 8; subs = subs + 4 { for emits := uint(0); emits <= 8; emits = emits + 4 { ret = append(ret, benchCase{1 << subs, 1 << emits, stateful == 1}) @@ -690,7 +690,7 @@ func benchMany(bc benchCase) func(*testing.B) { wait.Add(subs + emits) ready.Add(subs + emits) - for i := 0; i < subs; i++ { + for range subs { go func() { sub, err := bus.Subscribe(new(EventB)) if err != nil { @@ -710,9 +710,9 @@ func benchMany(bc benchCase) func(*testing.B) { }() } - for i := 0; i < emits; i++ { + for range emits { go func() { - em, err := bus.Emitter(new(EventB), func(settings interface{}) error { + em, err := bus.Emitter(new(EventB), func(settings any) error { settings.(*emitterSettings).makeStateful = stateful return nil }) @@ -743,7 +743,7 @@ func BenchmarkSubscribe(b *testing.B) { b.ReportAllocs() for i := 0; i < b.N/div; i++ { bus := NewBus() - for j := 0; j < div; j++ { + for range div { bus.Subscribe(new(EventA)) } } @@ -753,7 +753,7 @@ func BenchmarkEmitter(b *testing.B) { b.ReportAllocs() for i := 0; i < b.N/div; i++ { bus := NewBus() - for j := 0; j < div; j++ { + for range div { bus.Emitter(new(EventA)) } } @@ -763,7 +763,7 @@ func BenchmarkSubscribeAndEmitter(b *testing.B) { b.ReportAllocs() for i := 0; i < b.N/div; i++ { bus := NewBus() - for j := 0; j < div; j++ { + for range div { bus.Subscribe(new(EventA)) bus.Emitter(new(EventA)) } diff --git a/p2p/host/eventbus/opts.go b/p2p/host/eventbus/opts.go index 837a0683f..f3e2a744e 100644 --- a/p2p/host/eventbus/opts.go +++ b/p2p/host/eventbus/opts.go @@ -39,15 +39,15 @@ func newSubSettings() subSettings { return settings } -func BufSize(n int) func(interface{}) error { - return func(s interface{}) error { +func BufSize(n int) func(any) error { + return func(s any) error { s.(*subSettings).buffer = n return nil } } -func Name(name string) func(interface{}) error { - return func(s interface{}) error { +func Name(name string) func(any) error { + return func(s any) error { s.(*subSettings).name = name return nil } @@ -64,7 +64,7 @@ type emitterSettings struct { // // This allows to provide state tracking for dynamic systems, and/or // allows new subscribers to verify that there are Emitters on the channel -func Stateful(s interface{}) error { +func Stateful(s any) error { s.(*emitterSettings).makeStateful = true return nil } diff --git a/p2p/host/observedaddrs/manager.go b/p2p/host/observedaddrs/manager.go index 577b36405..68c90264b 100644 --- a/p2p/host/observedaddrs/manager.go +++ b/p2p/host/observedaddrs/manager.go @@ -304,10 +304,7 @@ func (o *Manager) getTopExternalAddrs(localTWStr string, minObservers int) []*ob }) // TODO(sukunrt): Improve this logic. Return only if the addresses have a // threshold fraction of the maximum observations - n := len(observerSets) - if n > maxExternalThinWaistAddrsPerLocalAddr { - n = maxExternalThinWaistAddrsPerLocalAddr - } + n := min(len(observerSets), maxExternalThinWaistAddrsPerLocalAddr) return observerSets[:n] } diff --git a/p2p/host/observedaddrs/manager_test.go b/p2p/host/observedaddrs/manager_test.go index 95f645431..8706f36e0 100644 --- a/p2p/host/observedaddrs/manager_test.go +++ b/p2p/host/observedaddrs/manager_test.go @@ -93,7 +93,7 @@ func TestObservedAddrsManager(t *testing.T) { } conns := make([]*mockConn, 0, n) - for i := 0; i < n; i++ { + for i := range n { ipPart := ma.StringCast(fmt.Sprintf("/ip4/1.2.3.%d", i)) conns = append(conns, newConn(localAddr, ma.Join(ipPart, protoPart))) } @@ -129,7 +129,7 @@ func TestObservedAddrsManager(t *testing.T) { defer o.Close() conns := getConns(t, 40, ma.P_TCP) observedAddrs := make([]ma.Multiaddr, maxExternalThinWaistAddrsPerLocalAddr*2) - for i := 0; i < len(observedAddrs); i++ { + for i := range observedAddrs { observedAddrs[i] = ma.StringCast(fmt.Sprintf("/ip4/2.2.2.%d/tcp/2", i)) } for i, c := range conns { @@ -207,11 +207,11 @@ func TestObservedAddrsManager(t *testing.T) { const N = 4 // ActivationThresh var ob1, ob2 [N]connMultiaddrs - for i := 0; i < N; i++ { + for i := range N { ob1[i] = newConn(quic4ListenAddr, ma.StringCast(fmt.Sprintf("/ip4/1.2.3.%d/udp/1/quic-v1", i))) ob2[i] = newConn(quic4ListenAddr, ma.StringCast(fmt.Sprintf("/ip4/1.2.3.%d/udp/2/quic-v1", i))) } - for i := 0; i < N-1; i++ { + for i := range N - 1 { o.maybeRecordObservation(ob1[i], observedQuic) o.maybeRecordObservation(ob2[i], observedQuic) } @@ -226,7 +226,7 @@ func TestObservedAddrsManager(t *testing.T) { }, 2*time.Second, 100*time.Millisecond) // Now disconnect first observer group - for i := 0; i < N; i++ { + for i := range N { o.removeConn(ob1[i]) } time.Sleep(100 * time.Millisecond) @@ -235,7 +235,7 @@ func TestObservedAddrsManager(t *testing.T) { } // Now disconnect the second group to check cleanup - for i := 0; i < N; i++ { + for i := range N { o.removeConn(ob2[i]) } require.Eventually(t, func() bool { @@ -254,11 +254,11 @@ func TestObservedAddrsManager(t *testing.T) { const N = 4 // ActivationThresh var ob1, ob2 [N]connMultiaddrs - for i := 0; i < N; i++ { + for i := range N { ob1[i] = newConn(quic4ListenAddr, ma.StringCast(fmt.Sprintf("/ip4/1.2.3.%d/udp/1/quic-v1", i))) ob2[i] = newConn(quic4ListenAddr, ma.StringCast(fmt.Sprintf("/ip4/1.2.3.%d/udp/2/quic-v1", i))) } - for i := 0; i < N-1; i++ { + for i := range N - 1 { o.maybeRecordObservation(ob1[i], observedQuic1) o.maybeRecordObservation(ob2[i], observedQuic2) } @@ -273,7 +273,7 @@ func TestObservedAddrsManager(t *testing.T) { }, 2*time.Second, 100*time.Millisecond) // Now disconnect first observer group - for i := 0; i < N; i++ { + for i := range N { o.removeConn(ob1[i]) } time.Sleep(100 * time.Millisecond) @@ -282,7 +282,7 @@ func TestObservedAddrsManager(t *testing.T) { } // Now disconnect the second group to check cleanup - for i := 0; i < N; i++ { + for i := range N { o.removeConn(ob2[i]) } require.Eventually(t, func() bool { @@ -300,7 +300,7 @@ func TestObservedAddrsManager(t *testing.T) { c5 := newConn(quic4ListenAddr, ma.StringCast("/ip4/1.2.3.5/udp/1/quic-v1")) c6 := newConn(quic4ListenAddr, ma.StringCast("/ip4/1.2.3.6/udp/1/quic-v1")) var observedQuic, observedWebTransport, observedWebTransportWithCertHash ma.Multiaddr - for i := 0; i < 10; i++ { + for i := range 10 { // Change the IP address in each observation observedQuic = ma.StringCast(fmt.Sprintf("/ip4/2.2.2.%d/udp/2/quic-v1", i)) observedWebTransport = ma.StringCast(fmt.Sprintf("/ip4/2.2.2.%d/udp/2/quic-v1/webtransport", i)) @@ -325,7 +325,7 @@ func TestObservedAddrsManager(t *testing.T) { requireEqualAddrs(t, []ma.Multiaddr{observedQuic}, o.AddrsFor(quic4ListenAddr)) requireAddrsMatch(t, []ma.Multiaddr{observedQuic, observedWebTransportWithCertHash}, o.Addrs(0)) - for i := 0; i < 3; i++ { + for range 3 { // remove non-recorded connection o.removeConn(c6) } @@ -374,7 +374,7 @@ func TestObservedAddrsManager(t *testing.T) { observedWebTransport := ma.StringCast("/ip4/2.2.2.2/udp/1/quic-v1/webtransport") var udpConns [5 * maxExternalThinWaistAddrsPerLocalAddr]connMultiaddrs - for i := 0; i < len(udpConns); i++ { + for i := range len(udpConns) { udpConns[i] = newConn(webTransport4ListenAddr, ma.StringCast(fmt.Sprintf("/ip4/1.2.3.%d/udp/1/quic-v1/webtransport", i))) o.maybeRecordObservation(udpConns[i], observedWebTransport) time.Sleep(10 * time.Millisecond) @@ -391,12 +391,12 @@ func TestObservedAddrsManager(t *testing.T) { defer o.Close() const N = 100 var tcpConns, quicConns [N]*mockConn - for i := 0; i < N; i++ { + for i := range N { tcpConns[i] = newConn(tcp4ListenAddr, ma.StringCast(fmt.Sprintf("/ip4/1.2.3.%d/tcp/1", i))) quicConns[i] = newConn(quic4ListenAddr, ma.StringCast(fmt.Sprintf("/ip4/1.2.3.%d/udp/1/quic-v1", i))) } var observedQuic, observedTCP ma.Multiaddr - for i := 0; i < N; i++ { + for i := range N { // ip addr has the form 2.2..2 observedQuic = ma.StringCast(fmt.Sprintf("/ip4/2.2.%d.2/udp/2/quic-v1", i%20)) observedTCP = ma.StringCast(fmt.Sprintf("/ip4/2.2.%d.2/tcp/2", i%20)) @@ -410,7 +410,7 @@ func TestObservedAddrsManager(t *testing.T) { require.Equal(t, len(o.Addrs(0)), 3*maxExternalThinWaistAddrsPerLocalAddr) }, 1*time.Second, 100*time.Millisecond) addrs := o.Addrs(0) - for i := 0; i < 10; i++ { + for range 10 { require.ElementsMatch(t, o.Addrs(0), addrs, "%s %s", o.Addrs(0), addrs) time.Sleep(50 * time.Millisecond) } @@ -419,7 +419,7 @@ func TestObservedAddrsManager(t *testing.T) { require.Equal(t, tcpNAT, network.NATDeviceTypeEndpointDependent) require.Equal(t, udpNAT, network.NATDeviceTypeEndpointDependent) - for i := 0; i < N; i++ { + for i := range N { o.removeConn(tcpConns[i]) o.removeConn(quicConns[i]) } @@ -444,7 +444,7 @@ func TestObservedAddrsManager(t *testing.T) { const N = 100 var tcp4Conns, quic4Conns, webTransport4Conns [N]*mockConn var tcp6Conns, quic6Conns, webTransport6Conns [N]*mockConn - for i := 0; i < N; i++ { + for i := range N { tcp4Conns[i] = newConn(tcp4ListenAddr, ma.StringCast(fmt.Sprintf("/ip4/1.2.3.%d/tcp/1", i))) quic4Conns[i] = newConn(quic4ListenAddr, ma.StringCast(fmt.Sprintf("/ip4/1.2.3.%d/udp/1/quic-v1", i))) webTransport4Conns[i] = newConn(webTransport4ListenAddr, ma.StringCast(fmt.Sprintf("/ip4/1.2.3.%d/udp/1/quic-v1/webtransport", i))) @@ -455,8 +455,8 @@ func TestObservedAddrsManager(t *testing.T) { } var observedQUIC4, observedWebTransport4, observedTCP4 ma.Multiaddr var observedQUIC6, observedWebTransport6, observedTCP6 ma.Multiaddr - for i := 0; i < N; i++ { - for j := 0; j < 5; j++ { + for i := range N { + for j := range 5 { // ip addr has the form 2.2.. observedQUIC4 = ma.StringCast(fmt.Sprintf("/ip4/2.2.%d.%d/udp/2/quic-v1", i/10, j)) observedWebTransport4 = ma.StringCast(fmt.Sprintf("/ip4/2.2.%d.%d/udp/2/quic-v1/webtransport", i/10, j)) @@ -482,7 +482,7 @@ func TestObservedAddrsManager(t *testing.T) { return len(o.Addrs(0)) == 2*3*maxExternalThinWaistAddrsPerLocalAddr }, 1*time.Second, 100*time.Millisecond) addrs := o.Addrs(0) - for i := 0; i < 10; i++ { + for range 10 { require.ElementsMatch(t, o.Addrs(0), addrs, "%s %s", o.Addrs(0), addrs) time.Sleep(10 * time.Millisecond) } @@ -519,7 +519,7 @@ func TestObservedAddrsManager(t *testing.T) { matest.AssertMultiaddrsMatch(t, o.Addrs(0), allAddrs) }, 1*time.Second, 100*time.Millisecond) - for i := 0; i < N; i++ { + for i := range N { o.removeConn(tcp4Conns[i]) o.removeConn(quic4Conns[i]) o.removeConn(webTransport4Conns[i]) @@ -584,7 +584,7 @@ func FuzzObservedAddrsManager(f *testing.F) { } n = len(addrs) for i := 0; i < n; i++ { - for j := 0; j < len(protos); j++ { + for j := range protos { protoAddr := ma.StringCast(protos[j]) addrs = append(addrs, addrs[i].Encapsulate(protoAddr)) addrs = append(addrs, protoAddr) diff --git a/p2p/host/peerstore/metrics_test.go b/p2p/host/peerstore/metrics_test.go index d92a482b5..c35b5f47c 100644 --- a/p2p/host/peerstore/metrics_test.go +++ b/p2p/host/peerstore/metrics_test.go @@ -48,7 +48,7 @@ func TestLatencyEWMA(t *testing.T) { const sig = 10 next := func() time.Duration { return time.Duration(rand.Intn(20) - 10 + mu) } - for i := 0; i < 10; i++ { + for range 10 { m.RecordLatency(id, next()) } diff --git a/p2p/host/peerstore/pstoreds/metadata.go b/p2p/host/peerstore/pstoreds/metadata.go index 9dcfcc13b..7df4cc326 100644 --- a/p2p/host/peerstore/pstoreds/metadata.go +++ b/p2p/host/peerstore/pstoreds/metadata.go @@ -41,7 +41,7 @@ func NewPeerMetadata(_ context.Context, store ds.Datastore, _ Options) (*dsPeerM return &dsPeerMetadata{store}, nil } -func (pm *dsPeerMetadata) Get(p peer.ID, key string) (interface{}, error) { +func (pm *dsPeerMetadata) Get(p peer.ID, key string) (any, error) { k := pmBase.ChildString(base32.RawStdEncoding.EncodeToString([]byte(p))).ChildString(key) value, err := pm.ds.Get(context.TODO(), k) if err != nil { @@ -51,14 +51,14 @@ func (pm *dsPeerMetadata) Get(p peer.ID, key string) (interface{}, error) { return nil, err } - var res interface{} + var res any if err := gob.NewDecoder(bytes.NewReader(value)).Decode(&res); err != nil { return nil, err } return res, nil } -func (pm *dsPeerMetadata) Put(p peer.ID, key string, val interface{}) error { +func (pm *dsPeerMetadata) Put(p peer.ID, key string, val any) error { k := pmBase.ChildString(base32.RawStdEncoding.EncodeToString([]byte(p))).ChildString(key) var buf pool.Buffer if err := gob.NewEncoder(&buf).Encode(&val); err != nil { diff --git a/p2p/host/peerstore/pstoreds/peerstore.go b/p2p/host/peerstore/pstoreds/peerstore.go index 8c9e9ff8b..895ac2151 100644 --- a/p2p/host/peerstore/pstoreds/peerstore.go +++ b/p2p/host/peerstore/pstoreds/peerstore.go @@ -136,7 +136,7 @@ func uniquePeerIds(ds ds.Datastore, prefix ds.Key, extractor func(result query.R func (ps *pstoreds) Close() (err error) { var errs []error - weakClose := func(name string, c interface{}) { + weakClose := func(name string, c any) { if cl, ok := c.(io.Closer); ok { if err = cl.Close(); err != nil { errs = append(errs, fmt.Errorf("%s error: %s", name, err)) diff --git a/p2p/host/peerstore/pstoremem/addr_book_test.go b/p2p/host/peerstore/pstoremem/addr_book_test.go index e8ba89ff9..21fa8d751 100644 --- a/p2p/host/peerstore/pstoremem/addr_book_test.go +++ b/p2p/host/peerstore/pstoremem/addr_book_test.go @@ -32,7 +32,7 @@ func TestPeerAddrsNextExpiry(t *testing.T) { func peerAddrsInput(n int) []*expiringAddr { expiringAddrs := make([]*expiringAddr, n) - for i := 0; i < n; i++ { + for i := range n { port := i % 65535 a := ma.StringCast(fmt.Sprintf("/ip4/1.2.3.4/udp/%d/quic-v1", port)) e := time.Time{}.Add(time.Duration(i) * time.Second) @@ -48,11 +48,11 @@ func TestPeerAddrsHeapProperty(t *testing.T) { const N = 10000 expiringAddrs := peerAddrsInput(N) - for i := 0; i < N; i++ { + for i := range N { paa.Insert(expiringAddrs[i]) } - for i := 0; i < N; i++ { + for i := range N { ea, ok := pa.PopIfExpired(expiringAddrs[i].Expiry) require.True(t, ok, "pos: %d", i) require.Equal(t, ea.Addr, expiringAddrs[i].Addr) @@ -69,7 +69,7 @@ func TestPeerAddrsHeapPropertyDeletions(t *testing.T) { const N = 10000 expiringAddrs := peerAddrsInput(N) - for i := 0; i < N; i++ { + for i := range N { paa.Insert(expiringAddrs[i]) } @@ -78,7 +78,7 @@ func TestPeerAddrsHeapPropertyDeletions(t *testing.T) { paa.Delete(expiringAddrs[i]) } - for i := 0; i < N; i++ { + for i := range N { ea, ok := pa.PopIfExpired(expiringAddrs[i].Expiry) if i%3 == 0 { require.False(t, ok) @@ -100,7 +100,7 @@ func TestPeerAddrsHeapPropertyUpdates(t *testing.T) { const N = 10000 expiringAddrs := peerAddrsInput(N) - for i := 0; i < N; i++ { + for i := range N { heap.Push(pa, expiringAddrs[i]) } @@ -112,7 +112,7 @@ func TestPeerAddrsHeapPropertyUpdates(t *testing.T) { endElements = append(endElements, expiringAddrs[i].Addr) } - for i := 0; i < N; i++ { + for i := range N { if i%3 == 0 { continue // skip the elements at the end } @@ -136,7 +136,7 @@ func TestPeerAddrsHeapPropertyUpdates(t *testing.T) { // TestPeerAddrsExpiry tests for multiple element expiry with PopIfExpired. func TestPeerAddrsExpiry(t *testing.T) { const T = 100_000 - for x := 0; x < T; x++ { + for range T { paa := newPeerAddrs() pa := &paa // Try a lot of random inputs. @@ -144,16 +144,16 @@ func TestPeerAddrsExpiry(t *testing.T) { // So this should test for all possible 5 element inputs. const N = 5 expiringAddrs := peerAddrsInput(N) - for i := 0; i < N; i++ { + for i := range N { expiringAddrs[i].Expiry = time.Time{}.Add(time.Duration(1+rand.Intn(N)) * time.Second) } - for i := 0; i < N; i++ { + for i := range N { pa.Insert(expiringAddrs[i]) } expiry := time.Time{}.Add(time.Duration(1+rand.Intn(N)) * time.Second) expected := []ma.Multiaddr{} - for i := 0; i < N; i++ { + for i := range N { if !expiry.Before(expiringAddrs[i].Expiry) { expected = append(expected, expiringAddrs[i].Addr) } @@ -167,7 +167,7 @@ func TestPeerAddrsExpiry(t *testing.T) { got = append(got, ea.Addr) } expiries := []int{} - for i := 0; i < N; i++ { + for i := range N { expiries = append(expiries, expiringAddrs[i].Expiry.Second()) } require.ElementsMatch(t, expected, got, "failed for input: element expiries: %v, expiry: %v", expiries, expiry.Second()) @@ -195,7 +195,7 @@ func BenchmarkPeerAddrs(b *testing.B) { paa := newPeerAddrs() pa := &paa expiringAddrs := peerAddrsInput(sz) - for i := 0; i < sz; i++ { + for i := range sz { pa.Insert(expiringAddrs[i]) } b.StartTimer() diff --git a/p2p/host/peerstore/pstoremem/inmem_test.go b/p2p/host/peerstore/pstoremem/inmem_test.go index 064b7d9ae..0a047eeed 100644 --- a/p2p/host/peerstore/pstoremem/inmem_test.go +++ b/p2p/host/peerstore/pstoremem/inmem_test.go @@ -23,7 +23,7 @@ func TestInvalidOption(t *testing.T) { func TestFuzzInMemoryPeerstore(t *testing.T) { // Just create and close a bunch of peerstores. If this leaks, we'll // catch it in the leak check below. - for i := 0; i < 100; i++ { + for range 100 { ps, err := NewPeerstore() require.NoError(t, err) ps.Close() @@ -99,10 +99,10 @@ func BenchmarkGC(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { b.StopTimer() - for i := 0; i < peerCount; i++ { + for i := range peerCount { id := peer.ID(strconv.Itoa(i)) addrs := make([]multiaddr.Multiaddr, addrsPerPeer) - for j := 0; j < addrsPerPeer; j++ { + for j := range addrsPerPeer { addrs[j] = multiaddr.StringCast("/ip4/1.2.3.4/tcp/" + strconv.Itoa(j)) } ps.AddAddrs(id, addrs, 24*time.Hour) diff --git a/p2p/host/peerstore/pstoremem/metadata.go b/p2p/host/peerstore/pstoremem/metadata.go index 305c74171..700d6bd89 100644 --- a/p2p/host/peerstore/pstoremem/metadata.go +++ b/p2p/host/peerstore/pstoremem/metadata.go @@ -9,7 +9,7 @@ import ( type memoryPeerMetadata struct { // store other data, like versions - ds map[peer.ID]map[string]interface{} + ds map[peer.ID]map[string]any dslock sync.RWMutex } @@ -17,23 +17,23 @@ var _ pstore.PeerMetadata = (*memoryPeerMetadata)(nil) func NewPeerMetadata() *memoryPeerMetadata { return &memoryPeerMetadata{ - ds: make(map[peer.ID]map[string]interface{}), + ds: make(map[peer.ID]map[string]any), } } -func (ps *memoryPeerMetadata) Put(p peer.ID, key string, val interface{}) error { +func (ps *memoryPeerMetadata) Put(p peer.ID, key string, val any) error { ps.dslock.Lock() defer ps.dslock.Unlock() m, ok := ps.ds[p] if !ok { - m = make(map[string]interface{}) + m = make(map[string]any) ps.ds[p] = m } m[key] = val return nil } -func (ps *memoryPeerMetadata) Get(p peer.ID, key string) (interface{}, error) { +func (ps *memoryPeerMetadata) Get(p peer.ID, key string) (any, error) { ps.dslock.RLock() defer ps.dslock.RUnlock() m, ok := ps.ds[p] diff --git a/p2p/host/peerstore/pstoremem/peerstore.go b/p2p/host/peerstore/pstoremem/peerstore.go index 15383f068..95016c443 100644 --- a/p2p/host/peerstore/pstoremem/peerstore.go +++ b/p2p/host/peerstore/pstoremem/peerstore.go @@ -20,7 +20,7 @@ type pstoremem struct { var _ peerstore.Peerstore = &pstoremem{} -type Option interface{} +type Option any // NewPeerstore creates an in-memory thread-safe collection of peers. // It's the caller's responsibility to call RemovePeer to ensure @@ -57,7 +57,7 @@ func NewPeerstore(opts ...Option) (ps *pstoremem, err error) { func (ps *pstoremem) Close() (err error) { var errs []error - weakClose := func(name string, c interface{}) { + weakClose := func(name string, c any) { if cl, ok := c.(io.Closer); ok { if err = cl.Close(); err != nil { errs = append(errs, fmt.Errorf("%s error: %s", name, err)) diff --git a/p2p/host/peerstore/test/addr_book_suite.go b/p2p/host/peerstore/test/addr_book_suite.go index 85929ad77..dbe48f5e8 100644 --- a/p2p/host/peerstore/test/addr_book_suite.go +++ b/p2p/host/peerstore/test/addr_book_suite.go @@ -217,7 +217,7 @@ func testSetNegativeTTLClears(m pstore.AddrBook, _ *mockClock.Mock) func(t *test // try to remove the same addr multiple times m.SetAddrs(id, addrs[:5], time.Hour) repeated := make([]multiaddr.Multiaddr, 10) - for i := 0; i < len(repeated); i++ { + for i := range repeated { repeated[i] = addrs[0] } m.SetAddrs(id, repeated, -1) diff --git a/p2p/host/peerstore/test/benchmarks_suite.go b/p2p/host/peerstore/test/benchmarks_suite.go index 63446c16e..844db6f9b 100644 --- a/p2p/host/peerstore/test/benchmarks_suite.go +++ b/p2p/host/peerstore/test/benchmarks_suite.go @@ -38,15 +38,15 @@ func BenchmarkPeerstore(b *testing.B, factory PeerstoreFactory, _ string) { b.ResetTimer() itersPerBM := 10 for i := 0; i < b.N; i++ { - for j := 0; j < itersPerBM; j++ { + for j := range itersPerBM { pp := peers[(i+j)%N] ps.AddAddrs(pp.ID, pp.Addr, pstore.RecentlyConnectedAddrTTL) } - for j := 0; j < itersPerBM; j++ { + for j := range itersPerBM { pp := peers[(i+j)%N] ps.Addrs(pp.ID) } - for j := 0; j < itersPerBM; j++ { + for j := range itersPerBM { pp := peers[(i+j)%N] ps.ClearAddrs(pp.ID) } diff --git a/p2p/host/peerstore/test/keybook_suite.go b/p2p/host/peerstore/test/keybook_suite.go index 3e559753b..71b522211 100644 --- a/p2p/host/peerstore/test/keybook_suite.go +++ b/p2p/host/peerstore/test/keybook_suite.go @@ -114,7 +114,7 @@ func testKeyBookPeers(kb pstore.KeyBook) func(t *testing.T) { } var peers peer.IDSlice - for i := 0; i < 10; i++ { + for range 10 { // Add a public key. _, pub, err := pt.RandTestKeyPair(ic.RSA, 2048) if err != nil { @@ -309,7 +309,7 @@ func benchmarkAddPrivKey(kb pstore.KeyBook) func(*testing.B) { func benchmarkPeersWithKeys(kb pstore.KeyBook) func(*testing.B) { return func(b *testing.B) { - for i := 0; i < 10; i++ { + for range 10 { priv, pub, err := pt.RandTestKeyPair(ic.RSA, 2048) if err != nil { b.Fatal(err) diff --git a/p2p/host/peerstore/test/peerstore_suite.go b/p2p/host/peerstore/test/peerstore_suite.go index 369b459d0..f04ccee2a 100644 --- a/p2p/host/peerstore/test/peerstore_suite.go +++ b/p2p/host/peerstore/test/peerstore_suite.go @@ -5,7 +5,7 @@ import ( "fmt" "math/rand" "reflect" - "sort" + "slices" "testing" "time" @@ -46,7 +46,7 @@ func TestPeerstore(t *testing.T, factory PeerstoreFactory) { } func sortProtos(protos []protocol.ID) { - sort.Slice(protos, func(i, j int) bool { return protos[i] < protos[j] }) + slices.Sort(protos) } func testAddrStream(ps pstore.Peerstore) func(t *testing.T) { @@ -65,7 +65,7 @@ func testAddrStream(ps pstore.Peerstore) func(t *testing.T) { // now receive them (without hanging) timeout := time.After(time.Second * 10) - for i := 0; i < 20; i++ { + for range 20 { select { case <-addrch: case <-timeout: @@ -88,7 +88,7 @@ func testAddrStream(ps pstore.Peerstore) func(t *testing.T) { // receive some concurrently with the goroutine timeout = time.After(time.Second * 10) - for i := 0; i < 40; i++ { + for range 40 { select { case <-addrch: case <-timeout: @@ -99,7 +99,7 @@ func testAddrStream(ps pstore.Peerstore) func(t *testing.T) { // receive some more after waiting for that goroutine to complete timeout = time.After(time.Second * 10) - for i := 0; i < 20; i++ { + for range 20 { select { case <-addrch: case <-timeout: @@ -110,7 +110,7 @@ func testAddrStream(ps pstore.Peerstore) func(t *testing.T) { cancel() // now check the *second* subscription. We should see 80 addresses. - for i := 0; i < 80; i++ { + for range 80 { <-addrch2 } @@ -131,14 +131,14 @@ func testGetStreamBeforePeerAdded(ps pstore.Peerstore) func(t *testing.T) { defer cancel() ach := ps.AddrStream(ctx, pid) - for i := 0; i < 10; i++ { + for i := range 10 { ps.AddAddr(pid, addrs[i], time.Hour) } received := make(map[string]bool) var count int - for i := 0; i < 10; i++ { + for range 10 { a, ok := <-ach if !ok { t.Fatal("channel shouldnt be closed yet") @@ -181,7 +181,7 @@ func testAddrStreamDuplicates(ps pstore.Peerstore) func(t *testing.T) { ach := ps.AddrStream(ctx, pid) go func() { - for i := 0; i < 10; i++ { + for i := range 10 { ps.AddAddr(pid, addrs[i], time.Hour) ps.AddAddr(pid, addrs[rand.Intn(10)], time.Hour) } @@ -374,8 +374,8 @@ func testCertifiedAddrBook(ps pstore.Peerstore) func(*testing.T) { } func getAddrs(t *testing.T, n int) []ma.Multiaddr { - var addrs []ma.Multiaddr - for i := 0; i < n; i++ { + addrs := make([]ma.Multiaddr, 0, n) + for i := range n { a, err := ma.NewMultiaddr(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", i)) if err != nil { t.Fatal(err) @@ -389,7 +389,7 @@ func getAddrs(t *testing.T, n int) []ma.Multiaddr { func TestPeerstoreProtoStoreLimits(t *testing.T, ps pstore.Peerstore, limit int) { p := peer.ID("foobar") protocols := make([]protocol.ID, limit) - for i := 0; i < limit; i++ { + for i := range limit { protocols[i] = protocol.ID(fmt.Sprintf("protocol %d", i)) } diff --git a/p2p/host/peerstore/test/utils.go b/p2p/host/peerstore/test/utils.go index cb7309290..b3095710b 100644 --- a/p2p/host/peerstore/test/utils.go +++ b/p2p/host/peerstore/test/utils.go @@ -2,6 +2,7 @@ package test import ( "fmt" + "slices" "testing" "github.com/libp2p/go-libp2p/core/peer" @@ -36,7 +37,7 @@ func RandomPeer(b *testing.B, addrCount int) *peerpair { b.Fatal(err) } - for i := 0; i < addrCount; i++ { + for i := range addrCount { if addrs[i], err = ma.NewMultiaddr(fmt.Sprintf(aFmt, i, pid)); err != nil { b.Fatal(err) } @@ -46,7 +47,7 @@ func RandomPeer(b *testing.B, addrCount int) *peerpair { func getPeerPairs(b *testing.B, n int, addrsPerPeer int) []*peerpair { pps := make([]*peerpair, n) - for i := 0; i < n; i++ { + for i := range n { pps[i] = RandomPeer(b, addrsPerPeer) } return pps @@ -54,7 +55,7 @@ func getPeerPairs(b *testing.B, n int, addrsPerPeer int) []*peerpair { func GenerateAddrs(count int) []ma.Multiaddr { var addrs = make([]ma.Multiaddr, count) - for i := 0; i < count; i++ { + for i := range count { addrs[i] = Multiaddr(fmt.Sprintf("/ip4/1.1.1.%d/tcp/1111", i)) } return addrs @@ -62,7 +63,7 @@ func GenerateAddrs(count int) []ma.Multiaddr { func GeneratePeerIDs(count int) []peer.ID { var ids = make([]peer.ID, count) - for i := 0; i < count; i++ { + for i := range count { ids[i], _ = pt.RandPeerID() } return ids @@ -75,14 +76,7 @@ func AssertAddressesEqual(t *testing.T, exp, act []ma.Multiaddr) { } for _, a := range exp { - found := false - - for _, b := range act { - if a.Equal(b) { - found = true - break - } - } + found := slices.ContainsFunc(act, a.Equal) if !found { t.Fatalf("expected address %s not found", a) diff --git a/p2p/host/resource-manager/allowlist_test.go b/p2p/host/resource-manager/allowlist_test.go index d665b63ff..9783f14ba 100644 --- a/p2p/host/resource-manager/allowlist_test.go +++ b/p2p/host/resource-manager/allowlist_test.go @@ -234,7 +234,7 @@ func BenchmarkAllowlistCheck(b *testing.B) { countOfTotalPeersForTest := 100_000 mas := make([]multiaddr.Multiaddr, countOfTotalPeersForTest) - for i := 0; i < countOfTotalPeersForTest; i++ { + for i := range countOfTotalPeersForTest { ip := make([]byte, 16) n, err := rand.Reader.Read(ip) diff --git a/p2p/host/resource-manager/conn_limiter_test.go b/p2p/host/resource-manager/conn_limiter_test.go index d86f9d7a3..371149769 100644 --- a/p2p/host/resource-manager/conn_limiter_test.go +++ b/p2p/host/resource-manager/conn_limiter_test.go @@ -64,7 +64,7 @@ func TestItLimits(t *testing.T) { t.Run("IPv6 with multiple limits", func(t *testing.T) { cl := newConnLimiter() - for i := 0; i < defaultMaxConcurrentConns; i++ { + for i := range defaultMaxConcurrentConns { ip := net.ParseIP("ff:2:3:4::1") binary.BigEndian.PutUint16(ip[14:], uint16(i)) ipAddr := netip.MustParseAddr(ip.String()) diff --git a/p2p/host/resource-manager/error.go b/p2p/host/resource-manager/error.go index 1e87e00aa..8940ce833 100644 --- a/p2p/host/resource-manager/error.go +++ b/p2p/host/resource-manager/error.go @@ -15,8 +15,8 @@ func (e *ErrStreamOrConnLimitExceeded) Error() string { return e.err.Error() } func (e *ErrStreamOrConnLimitExceeded) Unwrap() error { return e.err } // edge may be "" if this is not an edge error -func logValuesStreamLimit(scope, edge string, dir network.Direction, stat network.ScopeStat, err error) []interface{} { - logValues := make([]interface{}, 0, 2*8) +func logValuesStreamLimit(scope, edge string, dir network.Direction, stat network.ScopeStat, err error) []any { + logValues := make([]any, 0, 2*8) logValues = append(logValues, "scope", scope) if edge != "" { logValues = append(logValues, "edge", edge) @@ -34,8 +34,8 @@ func logValuesStreamLimit(scope, edge string, dir network.Direction, stat networ } // edge may be "" if this is not an edge error -func logValuesConnLimit(scope, edge string, dir network.Direction, usefd bool, stat network.ScopeStat, err error) []interface{} { - logValues := make([]interface{}, 0, 2*9) +func logValuesConnLimit(scope, edge string, dir network.Direction, usefd bool, stat network.ScopeStat, err error) []any { + logValues := make([]any, 0, 2*9) logValues = append(logValues, "scope", scope) if edge != "" { logValues = append(logValues, "edge", edge) @@ -62,8 +62,8 @@ func (e *ErrMemoryLimitExceeded) Error() string { return e.err.Error() } func (e *ErrMemoryLimitExceeded) Unwrap() error { return e.err } // edge may be "" if this is not an edge error -func logValuesMemoryLimit(scope, edge string, stat network.ScopeStat, err error) []interface{} { - logValues := make([]interface{}, 0, 2*8) +func logValuesMemoryLimit(scope, edge string, stat network.ScopeStat, err error) []any { + logValues := make([]any, 0, 2*8) logValues = append(logValues, "scope", scope) if edge != "" { logValues = append(logValues, "edge", edge) diff --git a/p2p/host/resource-manager/extapi.go b/p2p/host/resource-manager/extapi.go index 415d7f8bd..8f16c7c8d 100644 --- a/p2p/host/resource-manager/extapi.go +++ b/p2p/host/resource-manager/extapi.go @@ -2,6 +2,7 @@ package rcmgr import ( "bytes" + "slices" "sort" "strings" @@ -86,9 +87,7 @@ func (r *resourceManager) ListProtocols() []protocol.ID { result = append(result, p) } - sort.Slice(result, func(i, j int) bool { - return result[i] < result[j] - }) + slices.Sort(result) return result } diff --git a/p2p/host/resource-manager/limit_defaults.go b/p2p/host/resource-manager/limit_defaults.go index e7489c45d..ce68b994f 100644 --- a/p2p/host/resource-manager/limit_defaults.go +++ b/p2p/host/resource-manager/limit_defaults.go @@ -3,6 +3,7 @@ package rcmgr import ( "encoding/json" "fmt" + "maps" "math" "strconv" @@ -347,32 +348,32 @@ func (l *ResourceLimits) Build(defaults Limit) BaseLimit { } type PartialLimitConfig struct { - System ResourceLimits `json:",omitempty"` - Transient ResourceLimits `json:",omitempty"` + System ResourceLimits + Transient ResourceLimits // Limits that are applied to resources with an allowlisted multiaddr. // These will only be used if the normal System & Transient limits are // reached. - AllowlistedSystem ResourceLimits `json:",omitempty"` - AllowlistedTransient ResourceLimits `json:",omitempty"` + AllowlistedSystem ResourceLimits + AllowlistedTransient ResourceLimits - ServiceDefault ResourceLimits `json:",omitempty"` + ServiceDefault ResourceLimits Service map[string]ResourceLimits `json:",omitempty"` - ServicePeerDefault ResourceLimits `json:",omitempty"` + ServicePeerDefault ResourceLimits ServicePeer map[string]ResourceLimits `json:",omitempty"` - ProtocolDefault ResourceLimits `json:",omitempty"` + ProtocolDefault ResourceLimits Protocol map[protocol.ID]ResourceLimits `json:",omitempty"` - ProtocolPeerDefault ResourceLimits `json:",omitempty"` + ProtocolPeerDefault ResourceLimits ProtocolPeer map[protocol.ID]ResourceLimits `json:",omitempty"` - PeerDefault ResourceLimits `json:",omitempty"` + PeerDefault ResourceLimits Peer map[peer.ID]ResourceLimits `json:",omitempty"` - Conn ResourceLimits `json:",omitempty"` - Stream ResourceLimits `json:",omitempty"` + Conn ResourceLimits + Stream ResourceLimits } func (cfg *PartialLimitConfig) MarshalJSON() ([]byte, error) { @@ -493,9 +494,7 @@ func buildMapWithDefault[K comparable](definedLimits map[K]ResourceLimits, defau } out := make(map[K]BaseLimit) - for k, l := range defaults { - out[k] = l - } + maps.Copy(out, defaults) for k, l := range definedLimits { if defaultForKey, ok := out[k]; ok { @@ -653,11 +652,9 @@ func scale(base BaseLimit, inc BaseLimitIncrease, memory int64, numFD int) BaseL FD: base.FD, } if inc.FDFraction > 0 && numFD > 0 { - l.FD = int(inc.FDFraction * float64(numFD)) - if l.FD < base.FD { + l.FD = max(int(inc.FDFraction*float64(numFD)), // Use at least the base amount - l.FD = base.FD - } + base.FD) } return l } diff --git a/p2p/host/resource-manager/rcmgr.go b/p2p/host/resource-manager/rcmgr.go index 52561ff29..1eb070ba7 100644 --- a/p2p/host/resource-manager/rcmgr.go +++ b/p2p/host/resource-manager/rcmgr.go @@ -629,11 +629,11 @@ func PeerStrInScopeName(name string) string { return "" } // Index to avoid allocating a new string - peerSplitIdx := strings.Index(name, "peer:") - if peerSplitIdx == -1 { + _, after, ok := strings.Cut(name, "peer:") + if !ok { return "" } - p := (name[peerSplitIdx+len("peer:"):]) + p := (after) return p } @@ -647,11 +647,11 @@ func ParseProtocolScopeName(name string) string { } // Index to avoid allocating a new string - separatorIdx := strings.Index(name, ":") - if separatorIdx == -1 { + _, after, ok := strings.Cut(name, ":") + if !ok { return "" } - return name[separatorIdx+1:] + return after } return "" } diff --git a/p2p/host/resource-manager/scope_test.go b/p2p/host/resource-manager/scope_test.go index 7bf7ff721..4ffe2cef5 100644 --- a/p2p/host/resource-manager/scope_test.go +++ b/p2p/host/resource-manager/scope_test.go @@ -68,11 +68,9 @@ func TestCheckMemory(t *testing.T) { }) f := func(limit uint64, res uint64, currentMem uint64, priShift uint8) bool { - limit = (limit % math.MaxInt64) + 1 - if limit < 1024 { + limit = max((limit%math.MaxInt64)+1, // We set the min to 1KiB - limit = 1024 - } + 1024) currentMem = (currentMem % limit) // We can't have reserved more than our limit res = (res >> 14) // We won't reasonably ever have a reservation > 2^50 rc := resources{limit: &BaseLimit{ diff --git a/p2p/host/resource-manager/trace.go b/p2p/host/resource-manager/trace.go index abcecd8e0..6c843f526 100644 --- a/p2p/host/resource-manager/trace.go +++ b/p2p/host/resource-manager/trace.go @@ -23,7 +23,7 @@ type trace struct { mx sync.Mutex done bool - pendingWrites []interface{} + pendingWrites []any reporters []TraceReporter } @@ -191,7 +191,7 @@ type TraceEvt struct { Scope *scopeClass `json:",omitempty"` Name string `json:",omitempty"` - Limit interface{} `json:",omitempty"` + Limit any `json:",omitempty"` Priority uint8 `json:",omitempty"` @@ -243,7 +243,7 @@ func (t *trace) backgroundWriter(out io.WriteCloser) { ticker := time.NewTicker(time.Second) defer ticker.Stop() - var pend []interface{} + var pend []any getEvents := func() { t.mx.Lock() @@ -299,7 +299,7 @@ func (t *trace) backgroundWriter(out io.WriteCloser) { } } -func (t *trace) writeEvents(pend []interface{}, jout *json.Encoder) error { +func (t *trace) writeEvents(pend []any, jout *json.Encoder) error { for _, e := range pend { if err := jout.Encode(e); err != nil { return err diff --git a/p2p/http/auth/auth_test.go b/p2p/http/auth/auth_test.go index 54f0bb060..459abc56c 100644 --- a/p2p/http/auth/auth_test.go +++ b/p2p/http/auth/auth_test.go @@ -270,7 +270,7 @@ func TestConcurrentAuth(t *testing.T) { t.Cleanup(ts.Close) wg := sync.WaitGroup{} - for i := 0; i < 10; i++ { + for i := range 10 { wg.Add(1) go func() { defer wg.Done() @@ -278,7 +278,7 @@ func TestConcurrentAuth(t *testing.T) { require.NoError(t, err) clientAuth := ClientPeerIDAuth{PrivKey: clientKey} - reqBody := []byte(fmt.Sprintf("echo %d", i)) + reqBody := fmt.Appendf(nil, "echo %d", i) req, err := http.NewRequest("POST", ts.URL, bytes.NewReader(reqBody)) require.NoError(t, err) req.Host = "example.com" diff --git a/p2p/http/auth/internal/handshake/handshake.go b/p2p/http/auth/internal/handshake/handshake.go index 1c237ae3a..553363bbe 100644 --- a/p2p/http/auth/internal/handshake/handshake.go +++ b/p2p/http/auth/internal/handshake/handshake.go @@ -56,12 +56,12 @@ func (p *params) parsePeerIDAuthSchemeParams(headerVal []byte) error { for ; err == nil; advance, token, err = splitAuthHeaderParams(headerVal, true) { headerVal = headerVal[advance:] bs := token - splitAt := bytes.Index(bs, []byte("=")) - if splitAt == -1 { + before, after, ok := bytes.Cut(bs, []byte("=")) + if !ok { return errInvalid } - kB := bs[:splitAt] - v := bs[splitAt+1:] + kB := before + v := after if len(v) < 2 || v[0] != '"' || v[len(v)-1] != '"' { return errInvalid } diff --git a/p2p/http/libp2phttp.go b/p2p/http/libp2phttp.go index 7931846c0..f865bb56b 100644 --- a/p2p/http/libp2phttp.go +++ b/p2p/http/libp2phttp.go @@ -10,6 +10,7 @@ import ( "errors" "fmt" "io" + "maps" "net" "net/http" "net/url" @@ -1192,9 +1193,7 @@ func (h *Host) AddPeerMetadata(server peer.ID, meta PeerMeta) { h.peerMetadata.Add(server, meta) return } - for proto, m := range meta { - origMeta[proto] = m - } + maps.Copy(origMeta, meta) h.peerMetadata.Add(server, origMeta) } diff --git a/p2p/metricshelper/pool_test.go b/p2p/metricshelper/pool_test.go index 85021e559..b941101b9 100644 --- a/p2p/metricshelper/pool_test.go +++ b/p2p/metricshelper/pool_test.go @@ -8,7 +8,7 @@ import ( ) func TestStringSlicePool(t *testing.T) { - for i := 0; i < 1e5; i++ { + for range int(1e5) { s := GetStringSlice() require.Empty(t, *s) require.Equal(t, 8, cap(*s)) diff --git a/p2p/muxer/testsuite/mux.go b/p2p/muxer/testsuite/mux.go index 93d24785e..473948c90 100644 --- a/p2p/muxer/testsuite/mux.go +++ b/p2p/muxer/testsuite/mux.go @@ -40,7 +40,7 @@ func init() { } } -func getFunctionName(i interface{}) string { +func getFunctionName(i any) string { return runtime.FuncForPC(reflect.ValueOf(i).Pointer()).Name() } @@ -196,7 +196,7 @@ func SubtestStress(t *testing.T, opt Options) { rateLimitN := 5000 // max of 5k funcs, because -race has 8k max. rateLimitChan := make(chan struct{}, rateLimitN) - for i := 0; i < rateLimitN; i++ { + for range rateLimitN { rateLimitChan <- struct{}{} } @@ -356,7 +356,7 @@ func SubtestStreamOpenStress(t *testing.T, tr network.Multiplexer) { } stress := func() { defer wg.Done() - for i := 0; i < count; i++ { + for range count { s, err := muxa.OpenStream(context.Background()) if err != nil { t.Error(err) @@ -376,7 +376,7 @@ func SubtestStreamOpenStress(t *testing.T, tr network.Multiplexer) { } } - for i := 0; i < workers; i++ { + for range workers { wg.Add(1) go stress() } @@ -530,7 +530,7 @@ func SubtestStreamLeftOpen(t *testing.T, tr network.Multiplexer) { wg.Add(1 + numStreams) go func() { defer wg.Done() - for i := 0; i < numStreams; i++ { + for range numStreams { stra, err := muxa.OpenStream(context.Background()) checkErr(t, err) go func() { @@ -545,7 +545,7 @@ func SubtestStreamLeftOpen(t *testing.T, tr network.Multiplexer) { wg.Add(1 + numStreams) go func() { defer wg.Done() - for i := 0; i < numStreams; i++ { + for range numStreams { str, err := muxb.AcceptStream() checkErr(t, err) go func() { diff --git a/p2p/net/connmgr/bench_test.go b/p2p/net/connmgr/bench_test.go index 83442f916..a27d67352 100644 --- a/p2p/net/connmgr/bench_test.go +++ b/p2p/net/connmgr/bench_test.go @@ -26,7 +26,7 @@ func BenchmarkLockContention(b *testing.B) { kill := make(chan struct{}) var wg sync.WaitGroup - for i := 0; i < 16; i++ { + for range 16 { wg.Add(1) go func() { defer wg.Done() diff --git a/p2p/net/connmgr/connmgr.go b/p2p/net/connmgr/connmgr.go index 3b302e31d..9f78371c0 100644 --- a/p2p/net/connmgr/connmgr.go +++ b/p2p/net/connmgr/connmgr.go @@ -3,6 +3,7 @@ package connmgr import ( "context" "fmt" + "maps" "sort" "sync" "sync/atomic" @@ -554,9 +555,7 @@ func (cm *BasicConnMgr) GetTagInfo(p peer.ID) *connmgr.TagInfo { Conns: make(map[string]time.Time), } - for t, v := range pi.tags { - out.Tags[t] = v - } + maps.Copy(out.Tags, pi.tags) for t, v := range pi.decaying { out.Tags[t.name] = v.Value } diff --git a/p2p/net/connmgr/connmgr_test.go b/p2p/net/connmgr/connmgr_test.go index b9cc5c00e..543820840 100644 --- a/p2p/net/connmgr/connmgr_test.go +++ b/p2p/net/connmgr/connmgr_test.go @@ -144,8 +144,8 @@ func TestConnTrimming(t *testing.T) { defer cm.Close() not := cm.Notifee() - var conns []network.Conn - for i := 0; i < 300; i++ { + conns := make([]network.Conn, 0, 300) + for range 300 { rc := randConn(t, nil) conns = append(conns, rc) not.Connected(nil, rc) @@ -157,7 +157,7 @@ func TestConnTrimming(t *testing.T) { } } - for i := 0; i < 100; i++ { + for i := range 100 { cm.TagPeer(conns[i].RemotePeer(), "foo", 10) } @@ -165,7 +165,7 @@ func TestConnTrimming(t *testing.T) { cm.TrimOpenConns(context.Background()) - for i := 0; i < 100; i++ { + for i := range 100 { c := conns[i] if c.(*tconn).isClosed() { t.Fatal("these shouldnt be closed") @@ -180,7 +180,7 @@ func TestConnTrimming(t *testing.T) { func TestConnsToClose(t *testing.T) { addConns := func(cm *BasicConnMgr, n int) { not := cm.Notifee() - for i := 0; i < n; i++ { + for range n { conn := randConn(t, nil) not.Connected(nil, conn) } @@ -430,7 +430,7 @@ func TestGracePeriod(t *testing.T) { not := cm.Notifee() - var conns []network.Conn + conns := make([]network.Conn, 0, 31) // Add a connection and wait the grace period. { @@ -446,7 +446,7 @@ func TestGracePeriod(t *testing.T) { } // quickly add 30 connections (sending us above the high watermark) - for i := 0; i < 30; i++ { + for range 30 { rc := randConn(t, not.Disconnected) conns = append(conns, rc) not.Connected(nil, rc) @@ -484,10 +484,10 @@ func TestQuickBurstRespectsSilencePeriod(t *testing.T) { defer cm.Close() not := cm.Notifee() - var conns []network.Conn + conns := make([]network.Conn, 0, 30) // quickly produce 30 connections (sending us above the high watermark) - for i := 0; i < 30; i++ { + for range 30 { rc := randConn(t, not.Disconnected) conns = append(conns, rc) not.Connected(nil, rc) @@ -526,7 +526,7 @@ func TestPeerProtectionSingleTag(t *testing.T) { } // produce 20 connections with unique peers. - for i := 0; i < 20; i++ { + for range 20 { addConn(20) } @@ -552,7 +552,7 @@ func TestPeerProtectionSingleTag(t *testing.T) { } // add 5 more connection, sending the connection manager overboard. - for i := 0; i < 5; i++ { + for range 5 { addConn(20) } @@ -578,7 +578,7 @@ func TestPeerProtectionSingleTag(t *testing.T) { cm.Unprotect(protected[0].RemotePeer(), "global") // add 2 more connections, sending the connection manager overboard again. - for i := 0; i < 2; i++ { + for range 2 { addConn(20) } @@ -601,8 +601,8 @@ func TestPeerProtectionMultipleTags(t *testing.T) { not := cm.Notifee() // produce 20 connections with unique peers. - var conns []network.Conn - for i := 0; i < 20; i++ { + conns := make([]network.Conn, 0, 20) + for range 20 { rc := randConn(t, not.Disconnected) conns = append(conns, rc) not.Connected(nil, rc) @@ -638,7 +638,7 @@ func TestPeerProtectionMultipleTags(t *testing.T) { } // add 2 more connections, sending the connection manager overboard again. - for i := 0; i < 2; i++ { + for range 2 { rc := randConn(t, not.Disconnected) not.Connected(nil, rc) cm.TagPeer(rc.RemotePeer(), "test", 20) @@ -657,7 +657,7 @@ func TestPeerProtectionMultipleTags(t *testing.T) { cm.Unprotect(protected[0].RemotePeer(), "tag2") // add 2 more connections, sending the connection manager overboard again. - for i := 0; i < 2; i++ { + for range 2 { rc := randConn(t, not.Disconnected) not.Connected(nil, rc) cm.TagPeer(rc.RemotePeer(), "test", 20) @@ -794,7 +794,7 @@ func TestConcurrentCleanupAndTagging(t *testing.T) { require.NoError(t, err) defer cm.Close() - for i := 0; i < 1000; i++ { + for range 1000 { conn := randConn(t, nil) cm.TagPeer(conn.RemotePeer(), "test", 20) } @@ -950,12 +950,12 @@ func TestSafeConcurrency(t *testing.T) { const runs = 10 const concurrency = 10 var wg sync.WaitGroup - for i := 0; i < concurrency; i++ { + for range concurrency { wg.Add(1) go func() { // add conns. This mimics new connection events pis := peerInfos{p1, p2} - for i := 0; i < runs; i++ { + for i := range runs { pi := pis[i%len(pis)] s := ss.get(pi.id) s.Lock() @@ -968,7 +968,7 @@ func TestSafeConcurrency(t *testing.T) { wg.Add(1) go func() { pis := peerInfos{p1, p2} - for i := 0; i < runs; i++ { + for range runs { pis.SortByValueAndStreams(ss, false) } wg.Done() diff --git a/p2p/net/mock/mock.go b/p2p/net/mock/mock.go index db1c7d385..858bf338b 100644 --- a/p2p/net/mock/mock.go +++ b/p2p/net/mock/mock.go @@ -9,7 +9,7 @@ var log = logging.Logger("mocknet") // WithNPeers constructs a Mocknet with N peers. func WithNPeers(n int) (Mocknet, error) { m := New() - for i := 0; i < n; i++ { + for range n { if _, err := m.GenPeer(); err != nil { return nil, err } diff --git a/p2p/net/mock/mock_notif_test.go b/p2p/net/mock/mock_notif_test.go index 713e0a5d8..87ecb8f27 100644 --- a/p2p/net/mock/mock_notif_test.go +++ b/p2p/net/mock/mock_notif_test.go @@ -2,6 +2,8 @@ package mocknet import ( "context" + "maps" + "slices" "sync" "testing" "time" @@ -68,11 +70,8 @@ func TestNotifications(t *testing.T) { for _, c := range cons { var found bool - for _, c2 := range expect { - if c == c2 { - found = true - break - } + if slices.Contains(expect, c) { + found = true } if !found { @@ -138,9 +137,7 @@ func TestNotifications(t *testing.T) { // Avoid holding this lock while waiting, otherwise we can deadlock. streamStateCopy := map[network.Stream]chan struct{}{} n1.streamState.Lock() - for str, ch := range n1.streamState.m { - streamStateCopy[str] = ch - } + maps.Copy(streamStateCopy, n1.streamState.m) n1.streamState.Unlock() for str1, ch1 := range streamStateCopy { diff --git a/p2p/net/mock/mock_test.go b/p2p/net/mock/mock_test.go index d34c0728a..a3dbda92b 100644 --- a/p2p/net/mock/mock_test.go +++ b/p2p/net/mock/mock_test.go @@ -354,8 +354,8 @@ func TestAdding(t *testing.T) { mn := New() defer mn.Close() - var peers []peer.ID - for i := 0; i < 3; i++ { + peers := make([]peer.ID, 0, 3) + for range 3 { priv, _, err := crypto.GenerateEd25519Key(rand.Reader) if err != nil { t.Fatal(err) @@ -488,7 +488,7 @@ func TestLimitedStreams(t *testing.T) { messageSize := 500 handler := func(s network.Stream) { b := make([]byte, messageSize) - for i := 0; i < messages; i++ { + for range messages { if _, err := io.ReadFull(s, b); err != nil { t.Fatal(err) } @@ -524,7 +524,7 @@ func TestLimitedStreams(t *testing.T) { filler := make([]byte, messageSize-4) data := append([]byte("ping"), filler...) before := time.Now() - for i := 0; i < messages; i++ { + for range messages { wg.Add(1) if _, err := s.Write(data); err != nil { panic(err) diff --git a/p2p/net/nat/internal/nat/natpmp.go b/p2p/net/nat/internal/nat/natpmp.go index bffc0a99f..ed12628da 100644 --- a/p2p/net/nat/internal/nat/natpmp.go +++ b/p2p/net/nat/internal/nat/natpmp.go @@ -116,7 +116,7 @@ func (n *natpmpNAT) AddPortMapping(_ context.Context, protocol string, internalP } } - for i := 0; i < 3; i++ { + for range 3 { externalPort := randomPort() _, err = n.c.AddPortMapping(protocol, internalPort, externalPort, timeoutInSeconds) if err == nil { diff --git a/p2p/net/nat/internal/nat/upnp.go b/p2p/net/nat/internal/nat/upnp.go index 13d898e58..bceb1b1f2 100644 --- a/p2p/net/nat/internal/nat/upnp.go +++ b/p2p/net/nat/internal/nat/upnp.go @@ -193,7 +193,7 @@ func (u *upnp_NAT) AddPortMapping(ctx context.Context, protocol string, internal } } - for i := 0; i < 3; i++ { + for range 3 { externalPort := randomPort() err = u.c.AddPortMappingCtx(ctx, "", uint16(externalPort), mapProtocol(protocol), uint16(internalPort), ip.String(), true, description, timeoutInSeconds) if err == nil { diff --git a/p2p/net/nat/nat_test.go b/p2p/net/nat/nat_test.go index 98c0ee017..478267b21 100644 --- a/p2p/net/nat/nat_test.go +++ b/p2p/net/nat/nat_test.go @@ -195,7 +195,7 @@ func TestNATRediscoveryOnConnectionError(t *testing.T) { errConnectionRefused := errors.New("goupnp: error performing SOAP HTTP request: Post \"http://192.168.1.1:1234/ctl/IPConn\": dial tcp 192.168.1.1:1234: connect: connection refused") // Set up expectations for the failures that will trigger rediscovery - for i := 0; i < 3; i++ { + for i := range 3 { expectPortMappingFailure(mockNAT, "tcp", 10000+i, errConnectionRefused) } @@ -204,7 +204,7 @@ func TestNATRediscoveryOnConnectionError(t *testing.T) { expectPortMappingSuccess(newMockNAT, "udp", 4002, 4002) // Now trigger the failures - for i := 0; i < 3; i++ { + for i := range 3 { externalPort := n.establishMapping(context.Background(), "tcp", 10000+i) require.Equal(t, 0, externalPort) } @@ -270,7 +270,7 @@ func TestNATRediscoveryOldRouterReturns(t *testing.T) { errConnectionRefused := errors.New("goupnp: error performing SOAP HTTP request: dial tcp 192.168.1.1:1234: connect: connection refused") // Set up expectations for the first two failures - for i := 0; i < 2; i++ { + for i := range 2 { mockNAT.EXPECT().AddPortMapping(gomock.Any(), "tcp", 10000+i, gomock.Any(), MappingDuration).Return(0, errConnectionRefused).Times(1) mockNAT.EXPECT().AddPortMapping(gomock.Any(), "tcp", 10000+i, gomock.Any(), time.Duration(0)).Return(0, errConnectionRefused).Times(1) } @@ -283,7 +283,7 @@ func TestNATRediscoveryOldRouterReturns(t *testing.T) { mockNAT.EXPECT().AddPortMapping(gomock.Any(), "tcp", 4001, gomock.Any(), MappingDuration).Return(4001, nil).Times(1) // Trigger the failures - for i := 0; i < 2; i++ { + for i := range 2 { n.establishMapping(context.Background(), "tcp", 10000+i) } n.establishMapping(context.Background(), "tcp", 10002) @@ -330,7 +330,7 @@ func TestNATRediscoveryFailureThreshold(t *testing.T) { errOther := errors.New("some other error") // Test 1: Only 2 failures - should NOT trigger rediscovery - for i := 0; i < 2; i++ { + for i := range 2 { mockNAT.EXPECT().AddPortMapping(gomock.Any(), "tcp", 10000+i, gomock.Any(), MappingDuration).Return(0, errConnectionRefused).Times(1) mockNAT.EXPECT().AddPortMapping(gomock.Any(), "tcp", 10000+i, gomock.Any(), time.Duration(0)).Return(0, errConnectionRefused).Times(1) n.establishMapping(context.Background(), "tcp", 10000+i) @@ -345,7 +345,7 @@ func TestNATRediscoveryFailureThreshold(t *testing.T) { n.establishMapping(context.Background(), "tcp", 10002) // Now even 2 more connection failures shouldn't trigger (counter was reset) - for i := 0; i < 2; i++ { + for i := range 2 { mockNAT.EXPECT().AddPortMapping(gomock.Any(), "tcp", 10003+i, gomock.Any(), MappingDuration).Return(0, errConnectionRefused).Times(1) mockNAT.EXPECT().AddPortMapping(gomock.Any(), "tcp", 10003+i, gomock.Any(), time.Duration(0)).Return(0, errConnectionRefused).Times(1) n.establishMapping(context.Background(), "tcp", 10003+i) @@ -359,7 +359,7 @@ func TestNATRediscoveryFailureThreshold(t *testing.T) { n.establishMapping(context.Background(), "tcp", 10005) // Again, 2 failures shouldn't trigger - for i := 0; i < 2; i++ { + for i := range 2 { mockNAT.EXPECT().AddPortMapping(gomock.Any(), "tcp", 10006+i, gomock.Any(), MappingDuration).Return(0, errConnectionRefused).Times(1) mockNAT.EXPECT().AddPortMapping(gomock.Any(), "tcp", 10006+i, gomock.Any(), time.Duration(0)).Return(0, errConnectionRefused).Times(1) n.establishMapping(context.Background(), "tcp", 10006+i) @@ -410,14 +410,14 @@ func TestNATRediscoveryConcurrency(t *testing.T) { // Simulate multiple goroutines hitting failures after threshold // First get to threshold - for i := 0; i < 3; i++ { + for i := range 3 { mockNAT.EXPECT().AddPortMapping(gomock.Any(), "tcp", 10000+i, gomock.Any(), MappingDuration).Return(0, errConnectionRefused).Times(1) mockNAT.EXPECT().AddPortMapping(gomock.Any(), "tcp", 10000+i, gomock.Any(), time.Duration(0)).Return(0, errConnectionRefused).Times(1) n.establishMapping(context.Background(), "tcp", 10000+i) } // Set up expectations for concurrent failure attempts - for i := 0; i < 5; i++ { + for i := range 5 { port := 10003 + i mockNAT.EXPECT().AddPortMapping(gomock.Any(), "tcp", port, gomock.Any(), MappingDuration).Return(0, errConnectionRefused).AnyTimes() mockNAT.EXPECT().AddPortMapping(gomock.Any(), "tcp", port, gomock.Any(), time.Duration(0)).Return(0, errConnectionRefused).AnyTimes() @@ -425,7 +425,7 @@ func TestNATRediscoveryConcurrency(t *testing.T) { // Now launch multiple goroutines that would all try to trigger rediscovery var wg sync.WaitGroup - for i := 0; i < 5; i++ { + for i := range 5 { wg.Add(1) go func(port int) { defer wg.Done() diff --git a/p2p/net/pnet/psk_conn_test.go b/p2p/net/pnet/psk_conn_test.go index 0f4495161..0797ad6b8 100644 --- a/p2p/net/pnet/psk_conn_test.go +++ b/p2p/net/pnet/psk_conn_test.go @@ -72,7 +72,7 @@ func TestPSKFragmentation(t *testing.T) { wch <- err }() - for i := 0; i < 10; i++ { + for range 10 { if _, err := psk2.Read(out); err != nil { t.Fatal(err) } diff --git a/p2p/net/reuseport/transport_test.go b/p2p/net/reuseport/transport_test.go index c46d3a1f0..c2096ff08 100644 --- a/p2p/net/reuseport/transport_test.go +++ b/p2p/net/reuseport/transport_test.go @@ -4,6 +4,7 @@ import ( "context" "net" "runtime" + "slices" "testing" "time" @@ -82,10 +83,8 @@ func dialOne(t *testing.T, tr *Transport, listener manet.Listener, expected ...i if len(expected) == 0 { return port } - for _, p := range expected { - if p == port { - return port - } + if slices.Contains(expected, port) { + return port } t.Errorf("dialed %s from %v. expected to dial from port %v", listener.Multiaddr(), c.LocalAddr(), expected) return 0 @@ -271,7 +270,7 @@ func TestDuplicateGlobal(t *testing.T) { port := dialOne(t, &trB, listenerA) // Check consistency - for i := 0; i < 10; i++ { + for range 10 { dialOne(t, &trB, listenerA, port) } } diff --git a/p2p/net/swarm/black_hole_detector_test.go b/p2p/net/swarm/black_hole_detector_test.go index 1d59eb544..5193c4e86 100644 --- a/p2p/net/swarm/black_hole_detector_test.go +++ b/p2p/net/swarm/black_hole_detector_test.go @@ -35,7 +35,7 @@ func TestBlackHoleSuccessCounterReset(t *testing.T) { bhf.RecordResult(true) // check if calls up to n are probes again - for i := 0; i < n; i++ { + for range n { if bhf.HandleRequest() != blackHoleStateProbing { t.Fatalf("expected black hole detector state to reset after success") } @@ -95,10 +95,10 @@ func TestBlackHoleDetectorInApplicableAddress(t *testing.T) { ma.StringCast("/ip6/::1/udp/1234/quic-v1"), ma.StringCast("/ip4/192.168.1.5/udp/1234/quic-v1"), } - for i := 0; i < 1000; i++ { + for range 1000 { filteredAddrs, _ := bhd.FilterAddrs(addrs) require.ElementsMatch(t, addrs, filteredAddrs) - for j := 0; j < len(addrs); j++ { + for j := range addrs { bhd.RecordResult(addrs[j], false) } } @@ -109,7 +109,7 @@ func TestBlackHoleDetectorUDPDisabled(t *testing.T) { bhd := &blackHoleDetector{ipv6: ipv6F} publicAddr := ma.StringCast("/ip4/1.2.3.4/udp/1234/quic-v1") privAddr := ma.StringCast("/ip4/192.168.1.5/udp/1234/quic-v1") - for i := 0; i < 100; i++ { + for range 100 { bhd.RecordResult(publicAddr, false) } wantAddrs := []ma.Multiaddr{publicAddr, privAddr} @@ -125,7 +125,7 @@ func TestBlackHoleDetectorIPv6Disabled(t *testing.T) { bhd := &blackHoleDetector{udp: udpF} publicAddr := ma.StringCast("/ip6/2001::1/tcp/1234") privAddr := ma.StringCast("/ip6/::1/tcp/1234") - for i := 0; i < 100; i++ { + for range 100 { bhd.RecordResult(publicAddr, false) } @@ -144,7 +144,7 @@ func TestBlackHoleDetectorProbes(t *testing.T) { } udp6Addr := ma.StringCast("/ip6/2001::1/udp/1234/quic-v1") addrs := []ma.Multiaddr{udp6Addr} - for i := 0; i < 3; i++ { + for range 3 { bhd.RecordResult(udp6Addr, false) } for i := 1; i < 100; i++ { @@ -177,10 +177,10 @@ func TestBlackHoleDetectorAddrFiltering(t *testing.T) { udp: &BlackHoleSuccessCounter{N: 100, MinSuccesses: 10, Name: "udp"}, ipv6: &BlackHoleSuccessCounter{N: 100, MinSuccesses: 10, Name: "ipv6"}, } - for i := 0; i < 100; i++ { + for range 100 { bhd.RecordResult(udp4Pub, !udpBlocked) } - for i := 0; i < 100; i++ { + for range 100 { bhd.RecordResult(tcp6Pub, !ipv6Blocked) } return bhd @@ -217,7 +217,7 @@ func TestBlackHoleDetectorReadOnlyMode(t *testing.T) { bhd := &blackHoleDetector{udp: udpF, ipv6: ipv6F, readOnly: true} publicAddr := ma.StringCast("/ip4/1.2.3.4/udp/1234/quic-v1") privAddr := ma.StringCast("/ip6/::1/tcp/1234") - for i := 0; i < 100; i++ { + for range 100 { bhd.RecordResult(publicAddr, true) } allAddr := []ma.Multiaddr{privAddr, publicAddr} @@ -231,7 +231,7 @@ func TestBlackHoleDetectorReadOnlyMode(t *testing.T) { // a non readonly shared state black hole detector nbhd := &blackHoleDetector{udp: bhd.udp, ipv6: bhd.ipv6, readOnly: false} - for i := 0; i < 100; i++ { + for range 100 { nbhd.RecordResult(publicAddr, true) } // no addresses filtered because state is allowed diff --git a/p2p/net/swarm/dial_ranker.go b/p2p/net/swarm/dial_ranker.go index 154a0344a..302838e63 100644 --- a/p2p/net/swarm/dial_ranker.go +++ b/p2p/net/swarm/dial_ranker.go @@ -98,7 +98,7 @@ func DefaultDialRanker(addrs []ma.Multiaddr) []network.AddrDelay { maxDelay = res[len(res)-1].Delay } - for i := 0; i < len(addrs); i++ { + for i := range addrs { res = append(res, network.AddrDelay{Addr: addrs[i], Delay: maxDelay + PublicOtherDelay}) } @@ -273,7 +273,7 @@ func isQUICAddr(a ma.Multiaddr) bool { // filterAddrs filters an address slice in place func filterAddrs(addrs []ma.Multiaddr, f func(a ma.Multiaddr) bool) (filtered, rest []ma.Multiaddr) { j := 0 - for i := 0; i < len(addrs); i++ { + for i := range addrs { if f(addrs[i]) { addrs[i], addrs[j] = addrs[j], addrs[i] j++ diff --git a/p2p/net/swarm/dial_sync_test.go b/p2p/net/swarm/dial_sync_test.go index e44e33176..590258b52 100644 --- a/p2p/net/swarm/dial_sync_test.go +++ b/p2p/net/swarm/dial_sync_test.go @@ -139,7 +139,7 @@ func TestDialSyncAllCancel(t *testing.T) { }() cancel() - for i := 0; i < 2; i++ { + for range 2 { select { case <-finished: case <-time.After(time.Second): @@ -213,13 +213,13 @@ func TestStressActiveDial(_ *testing.T) { pid := peer.ID("foo") makeDials := func() { - for i := 0; i < 10000; i++ { + for range 10000 { ds.Dial(context.Background(), pid) } wg.Done() } - for i := 0; i < 100; i++ { + for range 100 { wg.Add(1) go makeDials() } diff --git a/p2p/net/swarm/dial_test.go b/p2p/net/swarm/dial_test.go index 101ce9912..213512b1b 100644 --- a/p2p/net/swarm/dial_test.go +++ b/p2p/net/swarm/dial_test.go @@ -141,7 +141,7 @@ func TestSimultDials(t *testing.T) { } log.Info("Connecting swarms simultaneously.") - for i := 0; i < 10; i++ { // connect 10x for each. + for range 10 { // connect 10x for each. wg.Add(2) go connect(swarms[0], swarms[1].LocalPeer(), ifaceAddrs1[0]) go connect(swarms[1], swarms[0].LocalPeer(), ifaceAddrs0[0]) @@ -254,7 +254,7 @@ func TestDialBackoff(t *testing.T) { dialOnlineNode := func(dst peer.ID, times int) <-chan bool { ch := make(chan bool) - for i := 0; i < times; i++ { + for range times { go func() { if _, err := s1.DialPeer(ctx, dst); err != nil { t.Error("error dialing", dst, err) @@ -269,7 +269,7 @@ func TestDialBackoff(t *testing.T) { dialOfflineNode := func(dst peer.ID, times int) <-chan bool { ch := make(chan bool) - for i := 0; i < times; i++ { + for range times { go func() { if c, err := s1.DialPeer(ctx, dst); err != nil { ch <- false @@ -304,7 +304,7 @@ func TestDialBackoff(t *testing.T) { } // 3) s1->s2 should succeed. - for i := 0; i < N; i++ { + for range N { select { case r := <-s2done: if !r { @@ -327,7 +327,7 @@ func TestDialBackoff(t *testing.T) { // 4) s1->s3 should not (and should place s3 on backoff) // N-1 should finish before dialTimeout1x * 2 - for i := 0; i < N; i++ { + for i := range N { select { case <-s2done: t.Error("s2 should have no more") @@ -391,7 +391,7 @@ func TestDialBackoff(t *testing.T) { } // 8) s2 dials should all hang, and succeed - for i := 0; i < N; i++ { + for range N { select { case r := <-s2done: if !r { @@ -471,7 +471,7 @@ func TestDialPeerFailed(t *testing.T) { testedSwarm, targetSwarm := swarms[0], swarms[1] const expectedErrorsCount = 5 - for i := 0; i < expectedErrorsCount; i++ { + for range expectedErrorsCount { _, silentPeerAddress, silentPeerListener := newSilentPeer(t) go acceptAndHang(silentPeerListener) defer silentPeerListener.Close() @@ -614,7 +614,7 @@ func TestDialSimultaneousJoin(t *testing.T) { c3 := <-connch // raise any errors from the previous goroutines - for i := 0; i < 3; i++ { + for range 3 { require.NoError(t, <-errs) } diff --git a/p2p/net/swarm/dial_worker_test.go b/p2p/net/swarm/dial_worker_test.go index bc422ee4a..ebde85983 100644 --- a/p2p/net/swarm/dial_worker_test.go +++ b/p2p/net/swarm/dial_worker_test.go @@ -202,7 +202,7 @@ func TestDialWorkerLoopConcurrent(t *testing.T) { const dials = 100 var wg sync.WaitGroup resch := make(chan dialResponse, dials) - for i := 0; i < dials; i++ { + for range dials { wg.Add(1) go func() { defer wg.Done() @@ -218,7 +218,7 @@ func TestDialWorkerLoopConcurrent(t *testing.T) { } wg.Wait() - for i := 0; i < dials; i++ { + for range dials { res := <-resch require.NoError(t, res.err) } @@ -270,7 +270,7 @@ func TestDialWorkerLoopConcurrentFailure(t *testing.T) { var errTimeout = errors.New("timed out!") var wg sync.WaitGroup resch := make(chan dialResponse, dials) - for i := 0; i < dials; i++ { + for range dials { wg.Add(1) go func() { defer wg.Done() @@ -287,7 +287,7 @@ func TestDialWorkerLoopConcurrentFailure(t *testing.T) { } wg.Wait() - for i := 0; i < dials; i++ { + for range dials { res := <-resch require.Error(t, res.err) if res.err == errTimeout { @@ -317,7 +317,7 @@ func TestDialWorkerLoopConcurrentMix(t *testing.T) { const dials = 100 var wg sync.WaitGroup resch := make(chan dialResponse, dials) - for i := 0; i < dials; i++ { + for range dials { wg.Add(1) go func() { defer wg.Done() @@ -333,7 +333,7 @@ func TestDialWorkerLoopConcurrentMix(t *testing.T) { } wg.Wait() - for i := 0; i < dials; i++ { + for range dials { res := <-resch require.NoError(t, res.err) } @@ -350,8 +350,8 @@ func TestDialWorkerLoopConcurrentFailureStress(t *testing.T) { _, p2 := newPeer(t) - var addrs []ma.Multiaddr - for i := 0; i < 16; i++ { + addrs := make([]ma.Multiaddr, 0, 16) + for i := range 16 { addrs = append(addrs, ma.StringCast(fmt.Sprintf("/ip4/11.0.0.%d/tcp/%d", i%256, 1234+i))) } s1.Peerstore().AddAddrs(p2, addrs, peerstore.PermanentAddrTTL) @@ -364,7 +364,7 @@ func TestDialWorkerLoopConcurrentFailureStress(t *testing.T) { var errTimeout = errors.New("timed out!") var wg sync.WaitGroup resch := make(chan dialResponse, dials) - for i := 0; i < dials; i++ { + for range dials { wg.Add(1) go func() { defer wg.Done() @@ -381,7 +381,7 @@ func TestDialWorkerLoopConcurrentFailureStress(t *testing.T) { } wg.Wait() - for i := 0; i < dials; i++ { + for range dials { res := <-resch require.Error(t, res.err) if res.err == errTimeout { @@ -397,7 +397,7 @@ func TestDialWorkerLoopConcurrentFailureStress(t *testing.T) { func TestDialQueueNextBatch(t *testing.T) { addrs := make([]ma.Multiaddr, 0) - for i := 0; i < 10; i++ { + for i := range 10 { addrs = append(addrs, ma.StringCast(fmt.Sprintf("/ip4/1.2.3.4/tcp/%d", i))) } testcase := []struct { @@ -485,7 +485,7 @@ func TestDialQueueNextBatch(t *testing.T) { } sort.Slice(b, func(i, j int) bool { return b[i].Addr.String() < b[j].Addr.String() }) sort.Slice(batch, func(i, j int) bool { return batch[i].String() < batch[j].String() }) - for i := 0; i < len(b); i++ { + for i := range b { if !b[i].Addr.Equal(batch[i]) { log.Error("expected address mismatch", "expected", batch[i], "got", b[i].Addr) } @@ -745,7 +745,7 @@ loop: func makeRanker(tc []timedDial) network.DialRanker { return func(_ []ma.Multiaddr) []network.AddrDelay { res := make([]network.AddrDelay, len(tc)) - for i := 0; i < len(tc); i++ { + for i := range tc { res[i] = network.AddrDelay{Addr: tc[i].addr, Delay: tc[i].delay} } return res @@ -755,7 +755,7 @@ func makeRanker(tc []timedDial) network.DialRanker { // TestCheckDialWorkerLoopScheduling will check the checker func TestCheckDialWorkerLoopScheduling(t *testing.T) { addrs := make([]ma.Multiaddr, 0) - for i := 0; i < 10; i++ { + for i := range 10 { for { p := 20000 + i addrs = append(addrs, ma.StringCast(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", p))) @@ -801,7 +801,7 @@ func TestCheckDialWorkerLoopScheduling(t *testing.T) { func TestDialWorkerLoopRanking(t *testing.T) { addrs := make([]ma.Multiaddr, 0) - for i := 0; i < 10; i++ { + for i := range 10 { for { p := 20000 + i addrs = append(addrs, ma.StringCast(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", p))) @@ -972,7 +972,7 @@ func TestDialWorkerLoopHolePunching(t *testing.T) { s1.dialRanker = func(addrs []ma.Multiaddr) (res []network.AddrDelay) { res = make([]network.AddrDelay, len(addrs)) - for i := 0; i < len(addrs); i++ { + for i := range addrs { delay := 10 * time.Second if addrs[i].Equal(t1) { // fire t1 immediately @@ -1163,14 +1163,14 @@ func BenchmarkDialRanker(b *testing.B) { } } addrs := make([]ma.Multiaddr, N) - for i := 0; i < N; i++ { + for i := range N { addrs[i] = ma.StringCast(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", i)) } b.Run("equal delay", func(b *testing.B) { b.ReportAllocs() addrDelays := make([]network.AddrDelay, N) - for i := 0; i < N; i++ { + for i := range N { addrDelays[i] = network.AddrDelay{ Addr: addrs[i], Delay: 0, @@ -1183,7 +1183,7 @@ func BenchmarkDialRanker(b *testing.B) { b.Run("sorted delay", func(b *testing.B) { b.ReportAllocs() addrDelays := make([]network.AddrDelay, N) - for i := 0; i < N; i++ { + for i := range N { addrDelays[i] = network.AddrDelay{ Addr: addrs[i], Delay: time.Millisecond * time.Duration(i), diff --git a/p2p/net/swarm/limiter_test.go b/p2p/net/swarm/limiter_test.go index 82a2f5349..f7a994cb1 100644 --- a/p2p/net/swarm/limiter_test.go +++ b/p2p/net/swarm/limiter_test.go @@ -291,15 +291,15 @@ func TestStressLimiter(t *testing.T) { l := newDialLimiterWithParams(df, 20, 5) - var bads []ma.Multiaddr - for i := 0; i < 100; i++ { + bads := make([]ma.Multiaddr, 0, 101) + for i := range 100 { bads = append(bads, addrWithPort(i)) } addresses := append(bads, addrWithPort(2000)) success := make(chan struct{}) - for i := 0; i < 20; i++ { + for i := range 20 { go func(id peer.ID) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -324,7 +324,7 @@ func TestStressLimiter(t *testing.T) { }(peer.ID(fmt.Sprintf("testpeer%d", i))) } - for i := 0; i < 20; i++ { + for range 20 { select { case <-success: case <-time.After(time.Minute): @@ -354,7 +354,7 @@ func TestFDLimitUnderflow(t *testing.T) { const num = 3 * fdLimit wg.Add(num) errs := make(chan error, num) - for i := 0; i < num; i++ { + for i := range num { go func(id peer.ID, i int) { defer wg.Done() ctx, cancel := context.WithCancel(context.Background()) diff --git a/p2p/net/swarm/peers_test.go b/p2p/net/swarm/peers_test.go index 20b522fb9..bbb37b547 100644 --- a/p2p/net/swarm/peers_test.go +++ b/p2p/net/swarm/peers_test.go @@ -37,7 +37,7 @@ func TestPeers(t *testing.T) { require.Eventually(t, func() bool { return len(s2.Peers()) > 0 }, 3*time.Second, 50*time.Millisecond) connect(s2, s1.LocalPeer(), s1.ListenAddresses()[0]) - for i := 0; i < 100; i++ { + for range 100 { connect(s1, s2.LocalPeer(), s2.ListenAddresses()[0]) connect(s2, s1.LocalPeer(), s1.ListenAddresses()[0]) } diff --git a/p2p/net/swarm/resolve_test.go b/p2p/net/swarm/resolve_test.go index 1921e9433..b01bf3ffc 100644 --- a/p2p/net/swarm/resolve_test.go +++ b/p2p/net/swarm/resolve_test.go @@ -37,7 +37,7 @@ func TestSwarmResolver(t *testing.T) { t.Run("Test Limits", func(t *testing.T) { var ipaddrs []net.IPAddr var manyDNSAddrs []string - for i := 0; i < 255; i++ { + for i := range 255 { ip := "1.2.3." + strconv.Itoa(i) ipaddrs = append(ipaddrs, net.IPAddr{IP: net.ParseIP(ip)}) manyDNSAddrs = append(manyDNSAddrs, "dnsaddr=/ip4/"+ip) @@ -53,21 +53,21 @@ func TestSwarmResolver(t *testing.T) { res, err := swarmResolver.ResolveDNSComponent(ctx, multiaddr.StringCast("/dns/example.com"), 10) require.NoError(t, err) require.Equal(t, 10, len(res)) - for i := 0; i < 10; i++ { + for i := range 10 { require.Equal(t, "/ip4/1.2.3."+strconv.Itoa(i), res[i].String()) } res, err = swarmResolver.ResolveDNSAddr(ctx, "", multiaddr.StringCast("/dnsaddr/example.com"), 1, 10) require.NoError(t, err) require.Equal(t, 10, len(res)) - for i := 0; i < 10; i++ { + for i := range 10 { require.Equal(t, "/ip4/1.2.3."+strconv.Itoa(i), res[i].String()) } }) t.Run("Test Recursive Limits", func(t *testing.T) { recursiveDNSAddr := make(map[string][]string) - for i := 0; i < 255; i++ { + for i := range 255 { recursiveDNSAddr["_dnsaddr."+strconv.Itoa(i)+".example.com"] = []string{"dnsaddr=/dnsaddr/" + strconv.Itoa(i+1) + ".example.com"} } recursiveDNSAddr["_dnsaddr.255.example.com"] = []string{"dnsaddr=/ip4/127.0.0.1"} diff --git a/p2p/net/swarm/simul_test.go b/p2p/net/swarm/simul_test.go index 3fbe8f085..36ff3e96c 100644 --- a/p2p/net/swarm/simul_test.go +++ b/p2p/net/swarm/simul_test.go @@ -70,7 +70,7 @@ func TestSimultOpenFewStress(t *testing.T) { rounds := 10 // rounds := 100 - for i := 0; i < rounds; i++ { + for range rounds { subtestSwarm(t, swarms, msgs) <-time.After(10 * time.Millisecond) } diff --git a/p2p/net/swarm/swarm_dial.go b/p2p/net/swarm/swarm_dial.go index 19e835e82..b180bf489 100644 --- a/p2p/net/swarm/swarm_dial.go +++ b/p2p/net/swarm/swarm_dial.go @@ -179,10 +179,7 @@ func (db *DialBackoff) AddBackoff(p peer.ID, addr ma.Multiaddr) { return } - backoffTime := BackoffBase + BackoffCoef*time.Duration(ba.tries*ba.tries) - if backoffTime > BackoffMax { - backoffTime = BackoffMax - } + backoffTime := min(BackoffBase+BackoffCoef*time.Duration(ba.tries*ba.tries), BackoffMax) ba.until = time.Now().Add(backoffTime) ba.tries++ } @@ -202,10 +199,7 @@ func (db *DialBackoff) cleanup() { for p, e := range db.entries { good := false for _, backoff := range e { - backoffTime := BackoffBase + BackoffCoef*time.Duration(backoff.tries*backoff.tries) - if backoffTime > BackoffMax { - backoffTime = BackoffMax - } + backoffTime := min(BackoffBase+BackoffCoef*time.Duration(backoff.tries*backoff.tries), BackoffMax) if now.Before(backoff.until.Add(backoffTime)) { good = true break diff --git a/p2p/net/swarm/swarm_dial_test.go b/p2p/net/swarm/swarm_dial_test.go index 836ad7334..d8193874e 100644 --- a/p2p/net/swarm/swarm_dial_test.go +++ b/p2p/net/swarm/swarm_dial_test.go @@ -347,7 +347,7 @@ func TestAddrsForDialFiltering(t *testing.T) { if len(result) != len(tc.output) { t.Fatalf("output mismatch got: %s want: %s", result, tc.output) } - for i := 0; i < len(result); i++ { + for i := range result { if !result[i].Equal(tc.output[i]) { t.Fatalf("output mismatch got: %s want: %s", result, tc.output) } diff --git a/p2p/net/swarm/swarm_event_test.go b/p2p/net/swarm/swarm_event_test.go index 5010215fc..6438f11d1 100644 --- a/p2p/net/swarm/swarm_event_test.go +++ b/p2p/net/swarm/swarm_event_test.go @@ -106,7 +106,7 @@ func TestNoDeadlockWhenConsumingConnectednessEvents(t *testing.T) { } }() - for i := 0; i < 10; i++ { + for range 10 { // Connect and disconnect to trigger a bunch of events _, err := dialer.DialPeer(context.Background(), listener.LocalPeer()) require.NoError(t, err) @@ -120,7 +120,7 @@ func TestConnectednessEvents(t *testing.T) { s1, sub1 := newSwarmWithSubscription(t) const N = 100 peers := make([]*Swarm, N) - for i := 0; i < N; i++ { + for i := range N { peers[i] = swarmt.GenSwarm(t) } @@ -128,7 +128,7 @@ func TestConnectednessEvents(t *testing.T) { done := make(chan struct{}) go func() { defer close(done) - for i := 0; i < N; i++ { + for range N { e := <-sub1.Out() evt, ok := e.(event.EvtPeerConnectednessChanged) if !ok { @@ -141,7 +141,7 @@ func TestConnectednessEvents(t *testing.T) { } } }() - for i := 0; i < N; i++ { + for i := range N { s1.Peerstore().AddAddrs(peers[i].LocalPeer(), []ma.Multiaddr{peers[i].ListenAddresses()[0]}, time.Hour) _, err := s1.DialPeer(context.Background(), peers[i].LocalPeer()) require.NoError(t, err) @@ -156,7 +156,7 @@ func TestConnectednessEvents(t *testing.T) { done = make(chan struct{}) go func() { defer close(done) - for i := 0; i < N/2; i++ { + for range N / 2 { e := <-sub1.Out() evt, ok := e.(event.EvtPeerConnectednessChanged) if !ok { @@ -169,7 +169,7 @@ func TestConnectednessEvents(t *testing.T) { } } }() - for i := 0; i < N/2; i++ { + for i := range N / 2 { err := s1.ClosePeer(peers[i].LocalPeer()) require.NoError(t, err) } @@ -208,7 +208,7 @@ func TestConnectednessEventDeadlock(t *testing.T) { s1, sub1 := newSwarmWithSubscription(t) const N = 100 peers := make([]*Swarm, N) - for i := 0; i < N; i++ { + for i := range N { peers[i] = swarmt.GenSwarm(t) } @@ -232,7 +232,7 @@ func TestConnectednessEventDeadlock(t *testing.T) { s1.ClosePeer(evt.Peer) } }() - for i := 0; i < N; i++ { + for i := range N { s1.Peerstore().AddAddrs(peers[i].LocalPeer(), []ma.Multiaddr{peers[i].ListenAddresses()[0]}, time.Hour) go func(i int) { _, err := s1.DialPeer(context.Background(), peers[i].LocalPeer()) @@ -250,11 +250,11 @@ func TestConnectednessEventDeadlockWithDial(t *testing.T) { s1, sub1 := newSwarmWithSubscription(t) const N = 200 peers := make([]*Swarm, N) - for i := 0; i < N; i++ { + for i := range N { peers[i] = swarmt.GenSwarm(t) } peers2 := make([]*Swarm, N) - for i := 0; i < N; i++ { + for i := range N { peers2[i] = swarmt.GenSwarm(t) } @@ -266,7 +266,7 @@ func TestConnectednessEventDeadlockWithDial(t *testing.T) { defer subWG.Done() count := 0 for { - var e interface{} + var e any select { case e = <-sub1.Out(): case <-done: @@ -293,7 +293,7 @@ func TestConnectednessEventDeadlockWithDial(t *testing.T) { }() var wg sync.WaitGroup wg.Add(N) - for i := 0; i < N; i++ { + for i := range N { s1.Peerstore().AddAddrs(peers[i].LocalPeer(), []ma.Multiaddr{peers[i].ListenAddresses()[0]}, time.Hour) go func(i int) { ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) diff --git a/p2p/net/swarm/swarm_listen.go b/p2p/net/swarm/swarm_listen.go index 307c55580..bc09878f4 100644 --- a/p2p/net/swarm/swarm_listen.go +++ b/p2p/net/swarm/swarm_listen.go @@ -191,10 +191,5 @@ func (s *Swarm) AddListenAddr(a ma.Multiaddr) error { } func containsMultiaddr(addrs []ma.Multiaddr, addr ma.Multiaddr) bool { - for _, a := range addrs { - if addr.Equal(a) { - return true - } - } - return false + return slices.ContainsFunc(addrs, addr.Equal) } diff --git a/p2p/net/swarm/swarm_net_test.go b/p2p/net/swarm/swarm_net_test.go index 8b363c70c..48474a9d4 100644 --- a/p2p/net/swarm/swarm_net_test.go +++ b/p2p/net/swarm/swarm_net_test.go @@ -17,7 +17,7 @@ import ( // and tests Connectedness value is correct. func TestConnectednessCorrect(t *testing.T) { nets := make([]network.Network, 4) - for i := 0; i < 4; i++ { + for i := range 4 { nets[i] = GenSwarm(t) } @@ -93,7 +93,7 @@ func TestNetworkOpenStream(t *testing.T) { testString := "hello ipfs" nets := make([]network.Network, 4) - for i := 0; i < 4; i++ { + for i := range 4 { nets[i] = GenSwarm(t) } diff --git a/p2p/net/swarm/swarm_notif_test.go b/p2p/net/swarm/swarm_notif_test.go index e6fb1698d..4ef419ab2 100644 --- a/p2p/net/swarm/swarm_notif_test.go +++ b/p2p/net/swarm/swarm_notif_test.go @@ -2,6 +2,7 @@ package swarm_test import ( "context" + "slices" "testing" "time" @@ -80,11 +81,8 @@ func TestNotifications(t *testing.T) { for _, c := range cons { var found bool - for _, c2 := range expect { - if c == c2 { - found = true - break - } + if slices.Contains(expect, c) { + found = true } if !found { diff --git a/p2p/net/swarm/swarm_test.go b/p2p/net/swarm/swarm_test.go index eca721c2b..71eac5c31 100644 --- a/p2p/net/swarm/swarm_test.go +++ b/p2p/net/swarm/swarm_test.go @@ -70,7 +70,7 @@ func makeDialOnlySwarm(t *testing.T) *swarm.Swarm { func makeSwarms(t *testing.T, num int, opts ...Option) []*swarm.Swarm { swarms := make([]*swarm.Swarm, 0, num) - for i := 0; i < num; i++ { + for range num { swarm := GenSwarm(t, opts...) swarm.SetStreamHandler(EchoStreamHandler) swarms = append(swarms, swarm) @@ -135,7 +135,7 @@ func subtestSwarm(t *testing.T, SwarmNum int, MsgNum int) { } // send out ping! - for k := 0; k < MsgNum; k++ { // with k messages + for k := range MsgNum { // with k messages msg := "ping" log.Debug("sending message", "local", s1.LocalPeer(), "msg", msg, "peer", p, "count", k) if _, err := stream.Write([]byte(msg)); err != nil { @@ -171,7 +171,7 @@ func subtestSwarm(t *testing.T, SwarmNum int, MsgNum int) { // receive pings msgCount := 0 msg := make([]byte, 4) - for k := 0; k < MsgNum; k++ { // with k messages + for k := range MsgNum { // with k messages // read from the stream if _, err := stream.Read(msg); err != nil { @@ -438,7 +438,7 @@ func TestStreamCount(t *testing.T) { streamAccepted <- struct{}{} }) - for i := 0; i < 10; i++ { + for range 10 { str, err := s2.NewStream(context.Background(), s1.LocalPeer()) require.NoError(t, err) str.Write([]byte("foobar")) diff --git a/p2p/net/upgrader/listener_test.go b/p2p/net/upgrader/listener_test.go index 95b16f123..e7185d0d5 100644 --- a/p2p/net/upgrader/listener_test.go +++ b/p2p/net/upgrader/listener_test.go @@ -56,14 +56,14 @@ func TestAcceptMultipleConns(t *testing.T) { ln := createListener(t, u) defer ln.Close() - var toClose []io.Closer + toClose := make([]io.Closer, 0, 20) defer func() { for _, c := range toClose { _ = c.Close() } }() - for i := 0; i < 10; i++ { + for range 10 { cconn, err := dial(t, u, ln.Multiaddr(), id, &network.NullScope{}) require.NoError(err) toClose = append(toClose, cconn) @@ -171,8 +171,8 @@ func TestListenerCloseClosesQueued(t *testing.T) { id, upgrader := createUpgrader(t) ln := createListener(t, upgrader) - var conns []transport.CapableConn - for i := 0; i < 10; i++ { + conns := make([]transport.CapableConn, 0, 10) + for range 10 { conn, err := dial(t, upgrader, ln.Multiaddr(), id, &network.NullScope{}) require.NoError(err) conns = append(conns, conn) @@ -228,7 +228,7 @@ func TestConcurrentAccept(t *testing.T) { // start num dials, which all block while setting up the muxer errCh := make(chan error, num) var wg sync.WaitGroup - for i := 0; i < num; i++ { + for range num { wg.Add(1) go func() { defer wg.Done() diff --git a/p2p/protocol/autonatv2/autonat.go b/p2p/protocol/autonatv2/autonat.go index 248830524..95f78c932 100644 --- a/p2p/protocol/autonatv2/autonat.go +++ b/p2p/protocol/autonatv2/autonat.go @@ -153,7 +153,7 @@ func (an *AutoNAT) Start(h host.Host) error { an.host = h // Listen on event.EvtPeerProtocolsUpdated, event.EvtPeerConnectednessChanged // event.EvtPeerIdentificationCompleted to maintain our set of autonat supporting peers. - sub, err := an.host.EventBus().Subscribe([]interface{}{ + sub, err := an.host.EventBus().Subscribe([]any{ new(event.EvtPeerProtocolsUpdated), new(event.EvtPeerConnectednessChanged), new(event.EvtPeerIdentificationCompleted), diff --git a/p2p/protocol/autonatv2/autonat_test.go b/p2p/protocol/autonatv2/autonat_test.go index 82b5014d3..a0d8e7814 100644 --- a/p2p/protocol/autonatv2/autonat_test.go +++ b/p2p/protocol/autonatv2/autonat_test.go @@ -109,7 +109,7 @@ func TestClientRequest(t *testing.T) { addrs := an.host.Addrs() addrbs := make([][]byte, len(addrs)) - for i := 0; i < len(addrs); i++ { + for i := range addrs { addrbs[i] = addrs[i].Bytes() } @@ -739,10 +739,7 @@ func FuzzClient(f *testing.F) { } ips = ips[1:] var x, y int64 - split := 128 / 8 - if len(ips) < split { - split = len(ips) - } + split := min(len(ips), 128/8) var b [8]byte copy(b[:], ips[:split]) x = int64(binary.LittleEndian.Uint64(b[:])) diff --git a/p2p/protocol/autonatv2/server_test.go b/p2p/protocol/autonatv2/server_test.go index fb50b3a55..a524b84a7 100644 --- a/p2p/protocol/autonatv2/server_test.go +++ b/p2p/protocol/autonatv2/server_test.go @@ -27,7 +27,7 @@ import ( func newTestRequests(addrs []ma.Multiaddr, sendDialData bool) (reqs []Request) { reqs = make([]Request, len(addrs)) - for i := 0; i < len(addrs); i++ { + for i := range addrs { reqs[i] = Request{Addr: addrs[i], SendDialData: sendDialData} } return @@ -106,7 +106,7 @@ func TestServerInvalidAddrsRejected(t *testing.T) { defer an.host.Close() var addrs []ma.Multiaddr - for i := 0; i < 100; i++ { + for i := range 100 { addrs = append(addrs, ma.StringCast(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", 2000+i))) } addrs = append(addrs, c.host.Addrs()...) @@ -125,7 +125,7 @@ func TestServerInvalidAddrsRejected(t *testing.T) { defer an.host.Close() var addrs []ma.Multiaddr - for i := 0; i < 10000; i++ { + for i := range 10000 { addrs = append(addrs, ma.StringCast(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", 2000+i))) } addrs = append(addrs, c.host.Addrs()...) @@ -215,7 +215,7 @@ func TestServerMaxConcurrentRequestsPerPeer(t *testing.T) { errChan := make(chan error) const n = 10 // num concurrentRequests will stall and n will fail - for i := 0; i < concurrentRequests+n; i++ { + for range concurrentRequests + n { go func() { _, err := c.GetReachability(context.Background(), []Request{{Addr: c.host.Addrs()[0], SendDialData: false}}) errChan <- err @@ -223,7 +223,7 @@ func TestServerMaxConcurrentRequestsPerPeer(t *testing.T) { } // check N failures - for i := 0; i < n; i++ { + for i := range n { select { case err := <-errChan: require.Error(t, err) @@ -237,7 +237,7 @@ func TestServerMaxConcurrentRequestsPerPeer(t *testing.T) { close(stallChan) // complete stalled requests // check concurrentRequests failures, as we won't send dial data - for i := 0; i < concurrentRequests; i++ { + for i := range concurrentRequests { select { case err := <-errChan: require.Error(t, err) @@ -284,7 +284,7 @@ func TestServerDataRequestJitter(t *testing.T) { } } - for i := 0; i < 10; i++ { + for range 10 { st := time.Now() res, err := c.GetReachability(context.Background(), []Request{{Addr: quicAddr, SendDialData: true}, {Addr: tcpAddr}}) took := time.Since(st) @@ -402,7 +402,7 @@ func TestRateLimiterConcurrentRequests(t *testing.T) { for concurrentRequests := 1; concurrentRequests <= N; concurrentRequests++ { cl := test.NewMockClock() r := rateLimiter{RPM: 10 * Peers * N, PerPeerRPM: 10 * Peers * N, DialDataRPM: 10 * Peers * N, now: cl.Now, MaxConcurrentRequestsPerPeer: concurrentRequests} - for p := 0; p < Peers; p++ { + for p := range Peers { for i := 0; i < concurrentRequests; i++ { require.True(t, r.Accept(peer.ID(fmt.Sprintf("peer-%d", p)))) } @@ -422,21 +422,21 @@ func TestRateLimiterConcurrentRequests(t *testing.T) { func TestRateLimiterStress(t *testing.T) { cl := test.NewMockClock() - for i := 0; i < 10; i++ { + for i := range 10 { r := rateLimiter{RPM: 20 + i, PerPeerRPM: 10 + i, DialDataRPM: i, MaxConcurrentRequestsPerPeer: 1, now: cl.Now} peers := make([]peer.ID, 10+i) - for i := 0; i < len(peers); i++ { + for i := range peers { peers[i] = peer.ID(fmt.Sprintf("peer-%d", i)) } peerSuccesses := make([]atomic.Int64, len(peers)) var success, dialDataSuccesses atomic.Int64 var wg sync.WaitGroup - for k := 0; k < 5; k++ { + for range 5 { wg.Add(1) go func() { defer wg.Done() - for i := 0; i < 2*60; i++ { + for range 2 * 60 { for j, p := range peers { if r.Accept(p) { success.Add(1) diff --git a/p2p/protocol/circuitv2/client/dial.go b/p2p/protocol/circuitv2/client/dial.go index 5175803e7..74511751f 100644 --- a/p2p/protocol/circuitv2/client/dial.go +++ b/p2p/protocol/circuitv2/client/dial.go @@ -29,7 +29,7 @@ func (e relayError) Error() string { return e.err } -func newRelayError(t string, args ...interface{}) error { +func newRelayError(t string, args ...any) error { return relayError{err: fmt.Sprintf(t, args...)} } @@ -182,7 +182,7 @@ func (c *Client) connect(s network.Stream, dest peer.AddrInfo) (*Conn, error) { var stat network.ConnStats if limit := msg.GetLimit(); limit != nil { stat.Limited = true - stat.Extra = make(map[interface{}]interface{}) + stat.Extra = make(map[any]any) stat.Extra[StatLimitDuration] = time.Duration(limit.GetDuration()) * time.Second stat.Extra[StatLimitData] = limit.GetData() } diff --git a/p2p/protocol/circuitv2/client/handlers.go b/p2p/protocol/circuitv2/client/handlers.go index 9859ec908..88659d57b 100644 --- a/p2p/protocol/circuitv2/client/handlers.go +++ b/p2p/protocol/circuitv2/client/handlers.go @@ -70,7 +70,7 @@ func (c *Client) handleStreamV2(s network.Stream) { var stat network.ConnStats if limit := msg.GetLimit(); limit != nil { stat.Limited = true - stat.Extra = make(map[interface{}]interface{}) + stat.Extra = make(map[any]any) stat.Extra[StatLimitDuration] = time.Duration(limit.GetDuration()) * time.Second stat.Extra[StatLimitData] = limit.GetData() } diff --git a/p2p/protocol/circuitv2/relay/constraints_test.go b/p2p/protocol/circuitv2/relay/constraints_test.go index bced8e409..93bce7ac0 100644 --- a/p2p/protocol/circuitv2/relay/constraints_test.go +++ b/p2p/protocol/circuitv2/relay/constraints_test.go @@ -40,7 +40,7 @@ func TestConstraints(t *testing.T) { res := infResources() res.MaxReservations = limit c := newConstraints(res) - for i := 0; i < limit; i++ { + for range limit { if err := c.Reserve(test.RandPeerIDFatal(t), randomIPv4Addr(t), expiry); err != nil { t.Fatal(err) } @@ -77,7 +77,7 @@ func TestConstraints(t *testing.T) { res := infResources() res.MaxReservationsPerIP = limit c := newConstraints(res) - for i := 0; i < limit; i++ { + for range limit { if err := c.Reserve(test.RandPeerIDFatal(t), ip, expiry); err != nil { t.Fatal(err) } @@ -104,7 +104,7 @@ func TestConstraints(t *testing.T) { res.MaxReservationsPerASN = limit c := newConstraints(res) const ipv6Prefix = "2a03:2880:f003:c07:face:b00c::" - for i := 0; i < limit; i++ { + for i := range limit { addr := getAddr(t, net.ParseIP(fmt.Sprintf("%s%d", ipv6Prefix, i+1))) if err := c.Reserve(test.RandPeerIDFatal(t), addr, expiry); err != nil { t.Fatal(err) @@ -130,7 +130,7 @@ func TestConstraintsCleanup(t *testing.T) { MaxReservationsPerASN: math.MaxInt32, } c := newConstraints(res) - for i := 0; i < limit; i++ { + for range limit { if err := c.Reserve(test.RandPeerIDFatal(t), randomIPv4Addr(t), expiry); err != nil { t.Fatal(err) } diff --git a/p2p/protocol/circuitv2/relay/relay_test.go b/p2p/protocol/circuitv2/relay/relay_test.go index 93390678d..8f7910e24 100644 --- a/p2p/protocol/circuitv2/relay/relay_test.go +++ b/p2p/protocol/circuitv2/relay/relay_test.go @@ -31,7 +31,7 @@ import ( ) func getNetHosts(t *testing.T, _ context.Context, n int) (hosts []host.Host, upgraders []transport.Upgrader) { - for i := 0; i < n; i++ { + for range n { privk, pubk, err := crypto.GenerateKeyPair(crypto.Ed25519, 0) if err != nil { t.Fatal(err) @@ -156,7 +156,7 @@ func TestBasicRelay(t *testing.T) { t.Fatal(err) } for { - var e interface{} + var e any select { case e = <-sub.Out(): case <-time.After(2 * time.Second): @@ -289,7 +289,7 @@ func TestRelayLimitData(t *testing.T) { defer close(rch) buf := make([]byte, 1024) - for i := 0; i < 3; i++ { + for range 3 { n, err := s.Read(buf) if err != nil { t.Fatal(err) @@ -347,7 +347,7 @@ func TestRelayLimitData(t *testing.T) { } buf := make([]byte, 1024) - for i := 0; i < 3; i++ { + for range 3 { if _, err := rand.Read(buf); err != nil { t.Fatal(err) } diff --git a/p2p/protocol/holepunch/tracer.go b/p2p/protocol/holepunch/tracer.go index 3ba06f653..07afc5080 100644 --- a/p2p/protocol/holepunch/tracer.go +++ b/p2p/protocol/holepunch/tracer.go @@ -78,11 +78,11 @@ type EventTracer interface { } type Event struct { - Timestamp int64 // UNIX nanos - Peer peer.ID // local peer ID - Remote peer.ID // remote peer ID - Type string // event type - Evt interface{} // the actual event + Timestamp int64 // UNIX nanos + Peer peer.ID // local peer ID + Remote peer.ID // remote peer ID + Type string // event type + Evt any // the actual event } // Event Types diff --git a/p2p/protocol/identify/id.go b/p2p/protocol/identify/id.go index ca56c364f..97a00322a 100644 --- a/p2p/protocol/identify/id.go +++ b/p2p/protocol/identify/id.go @@ -563,7 +563,7 @@ func (ids *idService) handleIdentifyResponse(s network.Stream, isPush bool) erro func readAllIDMessages(r pbio.Reader, finalMsg proto.Message) error { mes := &pb.Identify{} - for i := 0; i < maxMessages; i++ { + for range maxMessages { switch err := r.ReadMsg(mes); err { case io.EOF: return nil @@ -585,7 +585,7 @@ func (ids *idService) updateSnapshot() (updated bool) { slices.SortFunc(addrs, func(a, b ma.Multiaddr) int { return bytes.Compare(a.Bytes(), b.Bytes()) }) usedSpace := len(ids.ProtocolVersion) + len(ids.UserAgent) - for i := 0; i < len(protos); i++ { + for i := range protos { usedSpace += len(protos[i]) } addrs = trimHostAddrList(addrs, maxOwnIdentifyMsgSize-usedSpace-256) // 256 bytes of buffer @@ -702,11 +702,8 @@ func diff(a, b []protocol.ID) (added, removed []protocol.ID) { // This is O(n^2), but it's fine because the slices are small. for _, x := range b { var found bool - for _, y := range a { - if x == y { - found = true - break - } + if slices.Contains(a, x) { + found = true } if !found { added = append(added, x) @@ -714,11 +711,8 @@ func diff(a, b []protocol.ID) (added, removed []protocol.ID) { } for _, x := range a { var found bool - for _, y := range b { - if x == y { - found = true - break - } + if slices.Contains(b, x) { + found = true } if !found { removed = append(removed, x) diff --git a/p2p/protocol/identify/id_test.go b/p2p/protocol/identify/id_test.go index 66337c358..489b6e974 100644 --- a/p2p/protocol/identify/id_test.go +++ b/p2p/protocol/identify/id_test.go @@ -214,7 +214,7 @@ func TestIDService(t *testing.T) { } } -func assertCorrectEvtPeerIdentificationCompleted(t *testing.T, evtAny interface{}, other host.Host) { +func assertCorrectEvtPeerIdentificationCompleted(t *testing.T, evtAny any, other host.Host) { t.Helper() evt := evtAny.(event.EvtPeerIdentificationCompleted) require.NotNil(t, evt.Conn) @@ -620,7 +620,7 @@ func TestLargeIdentifyMessage(t *testing.T) { // add protocol strings to make the message larger // about 2K of protocol strings - for i := 0; i < 500; i++ { + for i := range 500 { r := protocol.ID(fmt.Sprintf("rand%d", i)) h1.SetStreamHandler(r, func(network.Stream) {}) h2.SetStreamHandler(r, func(network.Stream) {}) @@ -717,7 +717,7 @@ func TestLargeIdentifyMessage(t *testing.T) { func randString(n int) string { chars := "abcdefghijklmnopqrstuvwxyz" buf := make([]byte, n) - for i := 0; i < n; i++ { + for i := range n { buf[i] = chars[rand.Intn(len(chars))] } return string(buf) @@ -731,7 +731,7 @@ func TestLargePushMessage(t *testing.T) { // add protocol strings to make the message larger // about 3K of protocol strings - for i := 0; i < 100; i++ { + for i := range 100 { r := protocol.ID(fmt.Sprintf("%s-%d", randString(30), i)) h1.SetStreamHandler(r, func(network.Stream) {}) h2.SetStreamHandler(r, func(network.Stream) {}) diff --git a/p2p/protocol/ping/ping_test.go b/p2p/protocol/ping/ping_test.go index 355b3ad64..d78e2212e 100644 --- a/p2p/protocol/ping/ping_test.go +++ b/p2p/protocol/ping/ping_test.go @@ -43,7 +43,7 @@ func testPing(t *testing.T, ps *ping.PingService, p peer.ID) { defer cancel() ts := ps.Ping(pctx, p) - for i := 0; i < 5; i++ { + for range 5 { select { case res := <-ts: require.NoError(t, res.Error) diff --git a/p2p/security/noise/benchmark_test.go b/p2p/security/noise/benchmark_test.go index d59a1cb97..0e380bb7d 100644 --- a/p2p/security/noise/benchmark_test.go +++ b/p2p/security/noise/benchmark_test.go @@ -167,7 +167,7 @@ func benchDataTransfer(b *benchenv, dataSize int64, m testMode) { plainTextBufs := make([][]byte, 61) writeTos := make(map[int]io.Writer) - for i := 0; i < len(plainTextBufs); i++ { + for i := range plainTextBufs { var rbuf []byte // plaintext will be 2 KB to 62 KB plainTextBufs[i] = make([]byte, (i+2)*1024) diff --git a/p2p/security/noise/rw.go b/p2p/security/noise/rw.go index d52768f48..2bd8997e9 100644 --- a/p2p/security/noise/rw.go +++ b/p2p/security/noise/rw.go @@ -107,10 +107,7 @@ func (s *secureSession) Write(data []byte) (int, error) { defer pool.Put(cbuf) for written < total { - end := written + MaxPlaintextLength - if end > total { - end = total - } + end := min(written+MaxPlaintextLength, total) b, err := s.encrypt(cbuf[:LengthPrefixLength], data[written:end]) if err != nil { diff --git a/p2p/security/noise/transport.go b/p2p/security/noise/transport.go index c97b77159..492a5f370 100644 --- a/p2p/security/noise/transport.go +++ b/p2p/security/noise/transport.go @@ -3,6 +3,7 @@ package noise import ( "context" "net" + "slices" "github.com/libp2p/go-libp2p/core/crypto" "github.com/libp2p/go-libp2p/core/peer" @@ -89,10 +90,8 @@ func (t *Transport) ID() protocol.ID { func matchMuxers(initiatorMuxers, responderMuxers []protocol.ID) protocol.ID { for _, initMuxer := range initiatorMuxers { - for _, respMuxer := range responderMuxers { - if initMuxer == respMuxer { - return initMuxer - } + if slices.Contains(responderMuxers, initMuxer) { + return initMuxer } } return "" diff --git a/p2p/security/tls/crypto.go b/p2p/security/tls/crypto.go index 70a594d06..99bf5e009 100644 --- a/p2p/security/tls/crypto.go +++ b/p2p/security/tls/crypto.go @@ -128,7 +128,7 @@ func (i *Identity) ConfigForPeer(remote peer.ID) (*tls.Config, <-chan ic.PubKey) defer close(keyCh) chain := make([]*x509.Certificate, len(rawCerts)) - for i := 0; i < len(rawCerts); i++ { + for i := range rawCerts { cert, err := x509.ParseCertificate(rawCerts[i]) if err != nil { return err diff --git a/p2p/test/backpressure/backpressure_test.go b/p2p/test/backpressure/backpressure_test.go index 0660dc848..73a57af58 100644 --- a/p2p/test/backpressure/backpressure_test.go +++ b/p2p/test/backpressure/backpressure_test.go @@ -48,7 +48,7 @@ func TestStBackpressureStreamWrite(t *testing.T) { // If nobody is reading, we should eventually time out. require.NoError(t, s.SetWriteDeadline(time.Now().Add(100*time.Millisecond))) data := make([]byte, 16*1024) - for i := 0; i < 5*1024; i++ { // write at most 100MiB + for range 5 * 1024 { // write at most 100MiB if _, err := s.Write(data); err != nil { require.True(t, os.IsTimeout(err), err) return diff --git a/p2p/test/basichost/basic_host_test.go b/p2p/test/basichost/basic_host_test.go index 0197387b1..69df9fce7 100644 --- a/p2p/test/basichost/basic_host_test.go +++ b/p2p/test/basichost/basic_host_test.go @@ -242,7 +242,7 @@ func TestWebRTCWithQUICManyConnections(t *testing.T) { const N = 200 // These N dialers have both /quic-v1 and /webrtc-direct transports var dialers [N]host.Host - for i := 0; i < N; i++ { + for i := range N { dialers[i], err = libp2p.New(libp2p.NoListenAddrs) require.NoError(t, err) defer dialers[i].Close() @@ -252,7 +252,7 @@ func TestWebRTCWithQUICManyConnections(t *testing.T) { require.NoError(t, err) defer d.Close() - for i := 0; i < N; i++ { + for i := range N { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() // With happy eyeballs these dialers will connect over only /quic-v1 diff --git a/p2p/test/negotiation/muxer_test.go b/p2p/test/negotiation/muxer_test.go index 0047c5ab6..ccd9256a0 100644 --- a/p2p/test/negotiation/muxer_test.go +++ b/p2p/test/negotiation/muxer_test.go @@ -84,10 +84,8 @@ func TestMuxerNegotiation(t *testing.T) { } for _, tc := range testcases { - tc := tc for _, sec := range securities { - sec := sec t.Run(fmt.Sprintf("%s: %s", sec.Name, tc.Name), func(t *testing.T) { server, err := libp2p.New( diff --git a/p2p/test/negotiation/security_test.go b/p2p/test/negotiation/security_test.go index b7324744b..cab8bbf56 100644 --- a/p2p/test/negotiation/security_test.go +++ b/p2p/test/negotiation/security_test.go @@ -54,7 +54,6 @@ func TestSecurityNegotiation(t *testing.T) { require.NoError(t, err) for _, tc := range testcases { - tc := tc t.Run(tc.Name, func(t *testing.T) { server, err := libp2p.New( diff --git a/p2p/test/reconnects/reconnect_test.go b/p2p/test/reconnects/reconnect_test.go index cf05c80f3..6e37ddf1a 100644 --- a/p2p/test/reconnects/reconnect_test.go +++ b/p2p/test/reconnects/reconnect_test.go @@ -33,7 +33,7 @@ func TestReconnect5(t *testing.T) { const num = 5 hosts := make([]host.Host, 0, num) - for i := 0; i < num; i++ { + for range num { h, err := bhost.NewHost(swarmt.GenSwarm(t, swarmOpt), nil) require.NoError(t, err) defer h.Close() @@ -42,7 +42,7 @@ func TestReconnect5(t *testing.T) { h.SetStreamHandler(protocol.TestingID, EchoStreamHandler) } - for i := 0; i < 4; i++ { + for range 4 { runRound(t, hosts) } } @@ -81,7 +81,7 @@ func runRound(t *testing.T, hosts []host.Host) { } var wg sync.WaitGroup wg.Add(numStreams) - for i := 0; i < numStreams; i++ { + for range numStreams { data := make([]byte, rand.Intn(maxDataLen)+1) rnd.Read(data) go func() { diff --git a/p2p/test/resource-manager/echo_test.go b/p2p/test/resource-manager/echo_test.go index d6896fbde..36d1d2b40 100644 --- a/p2p/test/resource-manager/echo_test.go +++ b/p2p/test/resource-manager/echo_test.go @@ -15,7 +15,7 @@ import ( func createEchos(t *testing.T, count int, makeOpts ...func(int) libp2p.Option) []*Echo { result := make([]*Echo, 0, count) - for i := 0; i < count; i++ { + for i := range count { opts := make([]libp2p.Option, 0, len(makeOpts)+2) // only use a single transport, otherwise we might end up with a TCP and a QUIC connection to the same host opts = append(opts, libp2p.Transport(tcp.NewTCPTransport), libp2p.DefaultListenAddrs) @@ -32,8 +32,8 @@ func createEchos(t *testing.T, count int, makeOpts ...func(int) libp2p.Option) [ result = append(result, e) } - for i := 0; i < count; i++ { - for j := 0; j < count; j++ { + for i := range count { + for j := range count { if i == j { continue } diff --git a/p2p/test/resource-manager/rcmgr_test.go b/p2p/test/resource-manager/rcmgr_test.go index 816c58da0..707354d2b 100644 --- a/p2p/test/resource-manager/rcmgr_test.go +++ b/p2p/test/resource-manager/rcmgr_test.go @@ -236,7 +236,7 @@ func TestResourceManagerServicePeerInbound(t *testing.T) { echos[0].BeforeDone(waitForChannel(ready, time.Minute)) var once sync.Once - for i := 0; i < 3; i++ { + for range 3 { eg.Add(1) wg.Add(1) go func() { diff --git a/p2p/test/transport/transport_test.go b/p2p/test/transport/transport_test.go index 9b92dc96c..4c26d987c 100644 --- a/p2p/test/transport/transport_test.go +++ b/p2p/test/transport/transport_test.go @@ -409,7 +409,7 @@ func TestBigPing(t *testing.T) { defer s.Close() go func() { - for i := 0; i < totalSends; i++ { + for range totalSends { _, err := io.ReadFull(s, recvBuf) if err != nil { errCh <- err @@ -424,7 +424,7 @@ func TestBigPing(t *testing.T) { errCh <- err }() - for i := 0; i < totalSends; i++ { + for range totalSends { s.Write(sendBuf) } s.CloseWrite() @@ -481,7 +481,7 @@ func TestLotsOfDataManyStreams(t *testing.T) { sem := make(chan struct{}, parallel) var wg sync.WaitGroup - for i := 0; i < totalStreams; i++ { + for range totalStreams { wg.Add(1) sem <- struct{}{} go func() { @@ -531,7 +531,7 @@ func TestManyStreams(t *testing.T) { }) streams := make([]network.Stream, streamCount) - for i := 0; i < streamCount; i++ { + for i := range streamCount { s, err := h2.NewStream(context.Background(), h1.ID(), "echo") require.NoError(t, err) streams[i] = s @@ -606,7 +606,7 @@ func TestMoreStreamsThanOurLimits(t *testing.T) { var sawFirstErr atomic.Bool workQueue := make(chan struct{}, streamCount) - for i := 0; i < streamCount; i++ { + for range streamCount { workQueue <- struct{}{} } close(workQueue) diff --git a/p2p/transport/quic/conn_test.go b/p2p/transport/quic/conn_test.go index 703255a8b..26829edb7 100644 --- a/p2p/transport/quic/conn_test.go +++ b/p2p/transport/quic/conn_test.go @@ -543,7 +543,7 @@ func testDialTwo(t *testing.T, tc *connTestCase) { }(c) } - for i := 0; i < 2; i++ { + for range 2 { require.Eventually(t, func() bool { select { case <-done: diff --git a/p2p/transport/quic/transport.go b/p2p/transport/quic/transport.go index 0176409e4..911c96569 100644 --- a/p2p/transport/quic/transport.go +++ b/p2p/transport/quic/transport.go @@ -237,10 +237,9 @@ loop: break } - maxSleep := 10 * (i + 1) * (i + 1) // in ms - if maxSleep > 200 { - maxSleep = 200 - } + maxSleep := min( + // in ms + 10*(i+1)*(i+1), 200) d := 10*time.Millisecond + time.Duration(rand.Intn(maxSleep))*time.Millisecond if timer == nil { timer = time.NewTimer(d) diff --git a/p2p/transport/quicreuse/connmgr_test.go b/p2p/transport/quicreuse/connmgr_test.go index f99646cdc..cb55c07cb 100644 --- a/p2p/transport/quicreuse/connmgr_test.go +++ b/p2p/transport/quicreuse/connmgr_test.go @@ -124,7 +124,7 @@ func TestAcceptErrorGetCleanedUp(t *testing.T) { // Now make sure we have less goroutines than before // Manually doing the same as require.Eventually, except avoiding adding a goroutine goRoutinesCleanedUp := false - for i := 0; i < 50; i++ { + for range 50 { t.Log("num goroutines:", runtime.NumGoroutine()) if runtime.NumGoroutine() <= originalNumberOfGoroutines { goRoutinesCleanedUp = true @@ -529,7 +529,7 @@ func TestAssociationCleanup(t *testing.T) { numTries := 100 - for i := 0; i < numTries; i++ { + for range numTries { tr, err := cm.TransportWithAssociationForDial(assoc1, "udp4", dialAddr) require.NoError(t, err) require.Equal(t, addr1, tr.LocalAddr().String(), "assoc1 should use addr1") @@ -540,7 +540,7 @@ func TestAssociationCleanup(t *testing.T) { // Call TransportWithAssociationForDial 10 times with assoc1 and check if we get at least one different address foundDifferentAddr := false - for i := 0; i < numTries; i++ { + for range numTries { tr, err := cm.TransportWithAssociationForDial(assoc1, "udp4", dialAddr) require.NoError(t, err) actualAddr := tr.LocalAddr().String() @@ -551,7 +551,7 @@ func TestAssociationCleanup(t *testing.T) { } require.True(t, foundDifferentAddr, "assoc1 should use a different address than addr1 at least once after ln1 is closed") - for i := 0; i < numTries; i++ { + for range numTries { // Test that dialing with assoc2 still uses the second listener's address tr2Still, err := cm.TransportWithAssociationForDial(assoc2, "udp4", dialAddr) require.NoError(t, err) @@ -563,7 +563,7 @@ func TestAssociationCleanup(t *testing.T) { // Call TransportWithAssociationForDial 10 times with assoc2 and check if we get at least one different address foundDifferentAddr2 := false - for i := 0; i < numTries; i++ { + for range numTries { tr, err := cm.TransportWithAssociationForDial(assoc2, "udp4", dialAddr) require.NoError(t, err) actualAddr := tr.LocalAddr().String() @@ -573,7 +573,7 @@ func TestAssociationCleanup(t *testing.T) { } require.True(t, foundDifferentAddr2, "assoc2 should use a different address than addr2 at least once after ln2 is closed") - for i := 0; i < numTries; i++ { + for range numTries { // Test that dialing with assoc3 still uses the third listener's address tr3Still, err := cm.TransportWithAssociationForDial(assoc3, "udp4", dialAddr) require.NoError(t, err) diff --git a/p2p/transport/tcp/tcp_test.go b/p2p/transport/tcp/tcp_test.go index 304bce251..1fd7700fb 100644 --- a/p2p/transport/tcp/tcp_test.go +++ b/p2p/transport/tcp/tcp_test.go @@ -27,7 +27,7 @@ import ( var muxers = []tptu.StreamMuxer{{ID: "/yamux", Muxer: yamux.DefaultTransport}} func TestTcpTransport(t *testing.T) { - for i := 0; i < 2; i++ { + for range 2 { peerA, ia := makeInsecureMuxer(t) _, ib := makeInsecureMuxer(t) @@ -116,7 +116,7 @@ func TestResourceManager(t *testing.T) { } func TestTcpTransportCantDialDNS(t *testing.T) { - for i := 0; i < 2; i++ { + for range 2 { dnsa, err := ma.NewMultiaddr("/dns4/example.com/tcp/1234") require.NoError(t, err) @@ -134,7 +134,7 @@ func TestTcpTransportCantDialDNS(t *testing.T) { } func TestTcpTransportCantListenUtp(t *testing.T) { - for i := 0; i < 2; i++ { + for range 2 { utpa, err := ma.NewMultiaddr("/ip4/127.0.0.1/udp/0/utp") require.NoError(t, err) diff --git a/p2p/transport/tcpreuse/internal/sampledconn/sampledconn_test.go b/p2p/transport/tcpreuse/internal/sampledconn/sampledconn_test.go index 6c4e989b1..af23d43e2 100644 --- a/p2p/transport/tcpreuse/internal/sampledconn/sampledconn_test.go +++ b/p2p/transport/tcpreuse/internal/sampledconn/sampledconn_test.go @@ -28,7 +28,7 @@ func TestSampledConn(t *testing.T) { // Server goroutine go func() { - for i := 0; i < len(testCases); i++ { + for range testCases { conn, err := listener.Accept() assert.NoError(t, err) defer conn.Close() diff --git a/p2p/transport/tcpreuse/listener_test.go b/p2p/transport/tcpreuse/listener_test.go index 0f91d4992..3095dd6c2 100644 --- a/p2p/transport/tcpreuse/listener_test.go +++ b/p2p/transport/tcpreuse/listener_test.go @@ -92,7 +92,7 @@ func TestListenerSingle(t *testing.T) { require.NoError(t, err) go func() { d := net.Dialer{} - for i := 0; i < N; i++ { + for i := range N { go func() { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() @@ -116,7 +116,7 @@ func TestListenerSingle(t *testing.T) { }() var wg sync.WaitGroup - for i := 0; i < N; i++ { + for range N { c, _, err := l.Accept() require.NoError(t, err) wg.Add(1) @@ -147,7 +147,7 @@ func TestListenerSingle(t *testing.T) { }() go func() { d := websocket.Dialer{} - for i := 0; i < N; i++ { + for i := range N { go func() { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() @@ -168,7 +168,7 @@ func TestListenerSingle(t *testing.T) { } }() var wg sync.WaitGroup - for i := 0; i < N; i++ { + for range N { c := <-wh.conns wg.Add(1) go func() { @@ -201,7 +201,7 @@ func TestListenerSingle(t *testing.T) { }() go func() { d := websocket.Dialer{TLSClientConfig: &tls.Config{InsecureSkipVerify: true}} - for i := 0; i < N; i++ { + for i := range N { go func() { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() @@ -222,7 +222,7 @@ func TestListenerSingle(t *testing.T) { } }() var wg sync.WaitGroup - for i := 0; i < N; i++ { + for range N { c := <-wh.conns wg.Add(1) go func() { @@ -276,7 +276,7 @@ func TestListenerMultiplexed(t *testing.T) { // multistream connections go func() { d := net.Dialer{} - for i := 0; i < N; i++ { + for i := range N { go func() { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() @@ -302,7 +302,7 @@ func TestListenerMultiplexed(t *testing.T) { // ws connections go func() { d := websocket.Dialer{} - for i := 0; i < N; i++ { + for i := range N { go func() { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() @@ -326,7 +326,7 @@ func TestListenerMultiplexed(t *testing.T) { // wss connections go func() { d := websocket.Dialer{TLSClientConfig: &tls.Config{InsecureSkipVerify: true}} - for i := 0; i < N; i++ { + for i := range N { go func() { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() @@ -351,7 +351,7 @@ func TestListenerMultiplexed(t *testing.T) { wg.Add(1) go func() { defer wg.Done() - for i := 0; i < N; i++ { + for range N { c, _, err := msl.Accept() if !assert.NoError(t, err) { return @@ -376,7 +376,7 @@ func TestListenerMultiplexed(t *testing.T) { wg.Add(1) go func() { defer wg.Done() - for i := 0; i < N; i++ { + for range N { c := <-wh.conns wg.Add(1) go func() { @@ -399,7 +399,7 @@ func TestListenerMultiplexed(t *testing.T) { wg.Add(1) go func() { defer wg.Done() - for i := 0; i < N; i++ { + for range N { c := <-whs.conns wg.Add(1) go func() { diff --git a/p2p/transport/testsuite/stream_suite.go b/p2p/transport/testsuite/stream_suite.go index b139976b9..0d207914a 100644 --- a/p2p/transport/testsuite/stream_suite.go +++ b/p2p/transport/testsuite/stream_suite.go @@ -125,7 +125,7 @@ func SubtestStress(t *testing.T, ta, tb transport.Transport, maddr ma.Multiaddr, rateLimitN := 5000 // max of 5k funcs, because -race has 8k max. rateLimitChan := make(chan struct{}, rateLimitN) - for i := 0; i < rateLimitN; i++ { + for range rateLimitN { rateLimitChan <- struct{}{} } @@ -281,7 +281,7 @@ func SubtestStreamOpenStress(t *testing.T, ta, tb transport.Transport, maddr ma. wg.Add(1) go func() { defer wg.Done() - for j := 0; j < workers; j++ { + for range workers { wg.Add(1) go func() { defer wg.Done() diff --git a/p2p/transport/testsuite/transport_suite.go b/p2p/transport/testsuite/transport_suite.go index a24b2c6a7..46436137b 100644 --- a/p2p/transport/testsuite/transport_suite.go +++ b/p2p/transport/testsuite/transport_suite.go @@ -193,7 +193,7 @@ func SubtestPingPong(t *testing.T, ta, tb transport.Transport, maddr ma.Multiadd } var sWg sync.WaitGroup - for i := 0; i < streams; i++ { + for range streams { s, err := connA.AcceptStream() if err != nil { t.Error(err) @@ -241,7 +241,7 @@ func SubtestPingPong(t *testing.T, ta, tb transport.Transport, maddr ma.Multiadd t.Fatal(err) } - for i := 0; i < streams; i++ { + for i := range streams { s, err := connB.OpenStream(context.Background()) if err != nil { t.Error(err) @@ -251,7 +251,7 @@ func SubtestPingPong(t *testing.T, ta, tb transport.Transport, maddr ma.Multiadd wg.Add(1) go func(i int) { defer wg.Done() - data := []byte(fmt.Sprintf("%s - %d", testData, i)) + data := fmt.Appendf(nil, "%s - %d", testData, i) n, err := s.Write(data) if err != nil { s.Reset() diff --git a/p2p/transport/testsuite/utils_suite.go b/p2p/transport/testsuite/utils_suite.go index 8b002f890..b4ea08aae 100644 --- a/p2p/transport/testsuite/utils_suite.go +++ b/p2p/transport/testsuite/utils_suite.go @@ -30,7 +30,7 @@ var Subtests = []TransportSubTestFn{ SubtestStreamReset, } -func getFunctionName(i interface{}) string { +func getFunctionName(i any) string { return runtime.FuncForPC(reflect.ValueOf(i).Pointer()).Name() } diff --git a/p2p/transport/webrtc/hex.go b/p2p/transport/webrtc/hex.go index 482036540..d33d34527 100644 --- a/p2p/transport/webrtc/hex.go +++ b/p2p/transport/webrtc/hex.go @@ -49,7 +49,7 @@ func decodeInterspersedHexFromASCIIString(s string) ([]byte, error) { n := len(s) buffer := make([]byte, n/3*2+n%3) j := 0 - for i := 0; i < n; i++ { + for i := range n { if i%3 == 2 { if s[i] != ':' { return nil, errUnexpectedIntersperseHexChar diff --git a/p2p/transport/webrtc/logger.go b/p2p/transport/webrtc/logger.go index d9a8f6f35..6fcd598f9 100644 --- a/p2p/transport/webrtc/logger.go +++ b/p2p/transport/webrtc/logger.go @@ -31,7 +31,7 @@ func (l pionLogger) Debug(s string) { l.Logger.Debug(s) } -func (l pionLogger) Debugf(s string, args ...interface{}) { +func (l pionLogger) Debugf(s string, args ...any) { if l.Logger.Enabled(context.Background(), slog.LevelDebug) { l.Logger.Debug(fmt.Sprintf(s, args...)) } @@ -41,7 +41,7 @@ func (l pionLogger) Error(s string) { l.Logger.Debug(s) } -func (l pionLogger) Errorf(s string, args ...interface{}) { +func (l pionLogger) Errorf(s string, args ...any) { if l.Logger.Enabled(context.Background(), slog.LevelDebug) { l.Logger.Debug(fmt.Sprintf(s, args...)) } @@ -51,7 +51,7 @@ func (l pionLogger) Info(s string) { l.Logger.Debug(s) } -func (l pionLogger) Infof(s string, args ...interface{}) { +func (l pionLogger) Infof(s string, args ...any) { if l.Logger.Enabled(context.Background(), slog.LevelDebug) { l.Logger.Debug(fmt.Sprintf(s, args...)) } @@ -61,7 +61,7 @@ func (l pionLogger) Warn(s string) { l.Logger.Debug(s) } -func (l pionLogger) Warnf(s string, args ...interface{}) { +func (l pionLogger) Warnf(s string, args ...any) { if l.Logger.Enabled(context.Background(), slog.LevelDebug) { l.Logger.Debug(fmt.Sprintf(s, args...)) } @@ -70,7 +70,7 @@ func (l pionLogger) Warnf(s string, args ...interface{}) { func (l pionLogger) Trace(s string) { l.Logger.Debug(s) } -func (l pionLogger) Tracef(s string, args ...interface{}) { +func (l pionLogger) Tracef(s string, args ...any) { if l.Logger.Enabled(context.Background(), slog.LevelDebug) { l.Logger.Debug(fmt.Sprintf(s, args...)) } diff --git a/p2p/transport/webrtc/stream_test.go b/p2p/transport/webrtc/stream_test.go index 461ed27ff..ea15c9439 100644 --- a/p2p/transport/webrtc/stream_test.go +++ b/p2p/transport/webrtc/stream_test.go @@ -112,7 +112,7 @@ func assertDataChannelOpen(t *testing.T, dc *datachannel.DataChannel) { if err != nil { t.Fatal("unexpected mashalling error", err) } - for i := 0; i < 3; i++ { + for range 3 { _, err := dc.Write(msg) if err != nil { t.Fatal("unexpected write err: ", err) @@ -132,7 +132,7 @@ func assertDataChannelClosed(t *testing.T, dc *datachannel.DataChannel) { if err != nil { t.Fatal("unexpected mashalling error", err) } - for i := 0; i < 5; i++ { + for range 5 { _, err := dc.Write(msg) if err != nil { if errors.Is(err, sctp.ErrStreamClosed) { @@ -221,22 +221,22 @@ func TestStreamSkipEmptyFrames(t *testing.T) { clientStr := newStream(client.dc, client.rwc, maxSendMessageSize, func() {}) serverStr := newStream(server.dc, server.rwc, maxSendMessageSize, func() {}) - for i := 0; i < 10; i++ { + for range 10 { require.NoError(t, serverStr.writer.WriteMsg(&pb.Message{})) } require.NoError(t, serverStr.writer.WriteMsg(&pb.Message{Message: []byte("foo")})) - for i := 0; i < 10; i++ { + for range 10 { require.NoError(t, serverStr.writer.WriteMsg(&pb.Message{})) } require.NoError(t, serverStr.writer.WriteMsg(&pb.Message{Message: []byte("bar")})) - for i := 0; i < 10; i++ { + for range 10 { require.NoError(t, serverStr.writer.WriteMsg(&pb.Message{})) } require.NoError(t, serverStr.writer.WriteMsg(&pb.Message{Flag: pb.Message_FIN.Enum()})) var read []byte var count int - for i := 0; i < 100; i++ { + for range 100 { b := make([]byte, 10) count++ n, err := clientStr.Read(b) @@ -356,7 +356,7 @@ func TestStreamWriteDeadlineAsync(t *testing.T) { } clientStr.SetWriteDeadline(start.Add(timeout)) var hitDeadline bool - for i := 0; i < 2000; i++ { + for i := range 2000 { if _, err := clientStr.Write(b); err != nil { t.Logf("wrote %d kB", i) require.ErrorIs(t, err, os.ErrDeadlineExceeded) diff --git a/p2p/transport/webrtc/stream_write.go b/p2p/transport/webrtc/stream_write.go index 4900f0fcd..9c6e19b75 100644 --- a/p2p/transport/webrtc/stream_write.go +++ b/p2p/transport/webrtc/stream_write.go @@ -84,10 +84,7 @@ func (s *stream) Write(b []byte) (int, error) { s.mx.Lock() continue } - end := s.maxSendMessageSize - if end > availableSpace { - end = availableSpace - } + end := min(s.maxSendMessageSize, availableSpace) end -= protoOverhead + varintOverhead if end > len(b) { end = len(b) diff --git a/p2p/transport/webrtc/transport.go b/p2p/transport/webrtc/transport.go index b0365ff0f..b49888949 100644 --- a/p2p/transport/webrtc/transport.go +++ b/p2p/transport/webrtc/transport.go @@ -432,7 +432,7 @@ func genUfrag() string { rand.Read(seed[:]) r := mrand.New(mrand.New(mrand.NewChaCha8(seed))) b := make([]byte, uFragLength) - for i := 0; i < len(uFragPrefix); i++ { + for i := range len(uFragPrefix) { b[i] = uFragPrefix[i] } for i := len(uFragPrefix); i < uFragLength; i++ { diff --git a/p2p/transport/webrtc/transport_test.go b/p2p/transport/webrtc/transport_test.go index 4f38dc593..a5e969fd1 100644 --- a/p2p/transport/webrtc/transport_test.go +++ b/p2p/transport/webrtc/transport_test.go @@ -248,7 +248,7 @@ func TestTransportWebRTC_CanListenMultiple(t *testing.T) { var wg sync.WaitGroup go func() { - for i := 0; i < count; i++ { + for range count { conn, err := listener.Accept() assert.NoError(t, err) assert.NotNil(t, conn) @@ -258,7 +258,7 @@ func TestTransportWebRTC_CanListenMultiple(t *testing.T) { cancel() }() - for i := 0; i < count; i++ { + for range count { wg.Add(1) go func() { defer wg.Done() @@ -293,7 +293,7 @@ func TestTransportWebRTC_CanCreateSuccessiveConnections(t *testing.T) { var wg sync.WaitGroup wg.Add(count) go func() { - for i := 0; i < count; i++ { + for range count { ctr, _ := getTransport(t) conn, err := ctr.Dial(context.Background(), listener.Multiaddr(), listeningPeer) require.NoError(t, err) @@ -303,7 +303,7 @@ func TestTransportWebRTC_CanCreateSuccessiveConnections(t *testing.T) { } }() - for i := 0; i < count; i++ { + for range count { conn, err := listener.Accept() require.NoError(t, err) defer conn.Close() @@ -427,7 +427,7 @@ func TestTransportWebRTC_DialerCanCreateStreamsMultiple(t *testing.T) { defer lconn.Close() var wg sync.WaitGroup var doneStreams atomic.Int32 - for i := 0; i < numListeners; i++ { + for range numListeners { wg.Add(1) go func() { defer wg.Done() @@ -456,7 +456,7 @@ func TestTransportWebRTC_DialerCanCreateStreamsMultiple(t *testing.T) { var writerWG sync.WaitGroup var cnt atomic.Int32 var streamsStarted atomic.Int32 - for i := 0; i < numWriters; i++ { + for range numWriters { writerWG.Add(1) go func() { defer writerWG.Done() @@ -574,7 +574,7 @@ func TestTransportWebRTC_StreamWriteBufferContention(t *testing.T) { require.NoError(t, err) t.Cleanup(func() { lconn.Close() }) require.Equal(t, connectingPeer, lconn.RemotePeer()) - for i := 0; i < 2; i++ { + for range 2 { go func() { defer wg.Done() _, err := lconn.AcceptStream() @@ -589,7 +589,7 @@ func TestTransportWebRTC_StreamWriteBufferContention(t *testing.T) { errC := make(chan error) // writers - for i := 0; i < 2; i++ { + for range 2 { go func() { stream, err := conn.OpenStream(context.Background()) require.NoError(t, err) @@ -876,7 +876,7 @@ func TestMaxInFlightRequests(t *testing.T) { var wg sync.WaitGroup var success, fails atomic.Int32 - for i := 0; i < count+1; i++ { + for range count + 1 { wg.Add(1) go func() { defer wg.Done() @@ -898,23 +898,22 @@ func TestMaxInFlightRequests(t *testing.T) { } func TestGenUfrag(t *testing.T) { - for i := 0; i < 10; i++ { + for range 10 { s := genUfrag() require.True(t, strings.HasPrefix(s, "libp2p+webrtc+v1/")) } } func TestManyConnections(t *testing.T) { - var listeners []tpt.Listener - var listenerPeerIDs []peer.ID - const numListeners = 5 const dialersPerListener = 5 const connsPerDialer = 10 errCh := make(chan error, 10*numListeners*dialersPerListener*connsPerDialer) successCh := make(chan struct{}, 10*numListeners*dialersPerListener*connsPerDialer) + listeners := make([]tpt.Listener, 0, numListeners) + listenerPeerIDs := make([]peer.ID, 0, numListeners) - for i := 0; i < numListeners; i++ { + for range numListeners { tr, lp := getTransport(t) listenerPeerIDs = append(listenerPeerIDs, lp) ln, err := tr.Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/webrtc-direct")) @@ -971,7 +970,7 @@ func TestManyConnections(t *testing.T) { } runListener := func(ln tpt.Listener) { - for i := 0; i < dialersPerListener*connsPerDialer; i++ { + for range dialersPerListener * connsPerDialer { conn, err := ln.Accept() if err != nil { t.Errorf("listener failed to accept conneciton: %s", err) @@ -983,7 +982,7 @@ func TestManyConnections(t *testing.T) { runDialer := func(ln tpt.Listener, lp peer.ID) { tp, _ := getTransport(t) - for i := 0; i < connsPerDialer; i++ { + for range connsPerDialer { // We want to test for deadlocks, set a high timeout ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second) conn, err := tp.Dial(ctx, ln.Multiaddr(), lp) @@ -998,16 +997,16 @@ func TestManyConnections(t *testing.T) { } } - for i := 0; i < numListeners; i++ { + for i := range numListeners { go runListener(listeners[i]) } - for i := 0; i < numListeners; i++ { - for j := 0; j < dialersPerListener; j++ { + for i := range numListeners { + for range dialersPerListener { go runDialer(listeners[i], listenerPeerIDs[i]) } } - for i := 0; i < numListeners*dialersPerListener*connsPerDialer; i++ { + for i := range numListeners * dialersPerListener * connsPerDialer { select { case <-successCh: t.Log("completed conn: ", i) diff --git a/p2p/transport/webrtc/udpmux/mux_test.go b/p2p/transport/webrtc/udpmux/mux_test.go index b75f3e830..f0603c2f8 100644 --- a/p2p/transport/webrtc/udpmux/mux_test.go +++ b/p2p/transport/webrtc/udpmux/mux_test.go @@ -17,7 +17,7 @@ func getSTUNBindingRequest(ufrag string) *stun.Message { msg.SetType(stun.BindingRequest) uattr := stun.RawAttribute{ Type: stun.AttrUsername, - Value: []byte(fmt.Sprintf("%s:%s", ufrag, ufrag)), // This is the format we expect in our connections + Value: fmt.Appendf(nil, "%s:%s", ufrag, ufrag), // This is the format we expect in our connections } uattr.AddTo(msg) msg.Encode() @@ -143,13 +143,13 @@ func TestRemoveConnByUfrag(t *testing.T) { ufrag := "a" count := 10 conns := make([]net.PacketConn, count) - for i := 0; i < 10; i++ { + for i := range 10 { conns[i] = newPacketConn(t) setupMapping(t, ufrag, conns[i], m) } mc, err := m.GetConn(ufrag, conns[0].LocalAddr()) require.NoError(t, err) - for i := 0; i < 10; i++ { + for i := range 10 { mc1, err := m.GetConn(ufrag, conns[i].LocalAddr()) require.NoError(t, err) if mc1 != mc { @@ -162,12 +162,12 @@ func TestRemoveConnByUfrag(t *testing.T) { // All connections should now be associated with b ufrag = "b" - for i := 0; i < 10; i++ { + for i := range 10 { setupMapping(t, ufrag, conns[i], m) } mc, err = m.GetConn(ufrag, conns[0].LocalAddr()) require.NoError(t, err) - for i := 0; i < 10; i++ { + for i := range 10 { mc1, err := m.GetConn(ufrag, conns[i].LocalAddr()) require.NoError(t, err) if mc1 != mc { @@ -196,7 +196,7 @@ func TestMuxedConnection(t *testing.T) { addrUfragMap := make(map[string]string) ufragConnsMap := make(map[string][]net.PacketConn) for _, ufrag := range ufrags { - for i := 0; i < connCount; i++ { + for range connCount { cc := newPacketConn(t) addrUfragMap[cc.LocalAddr().String()] = ufrag ufragConnsMap[ufrag] = append(ufragConnsMap[ufrag], cc) @@ -208,14 +208,14 @@ func TestMuxedConnection(t *testing.T) { go func(ufrag string) { for _, cc := range ufragConnsMap[ufrag] { setupMapping(t, ufrag, cc, m) - for j := 0; j < msgCount; j++ { + for range msgCount { cc.WriteTo([]byte(ufrag), c.LocalAddr()) } } done <- true }(ufrag) } - for i := 0; i < len(ufrags); i++ { + for range ufrags { <-done } @@ -226,7 +226,7 @@ func TestMuxedConnection(t *testing.T) { stunRequests := 0 msg := make([]byte, 1500) addrPacketCount := make(map[string]int) - for i := 0; i < connCount; i++ { + for range connCount { for j := 0; j < msgCount+1; j++ { n, addr1, err := mc.ReadFrom(msg) require.NoError(t, err) diff --git a/p2p/transport/websocket/websocket_test.go b/p2p/transport/websocket/websocket_test.go index 7dda6c6bd..4f4864df4 100644 --- a/p2p/transport/websocket/websocket_test.go +++ b/p2p/transport/websocket/websocket_test.go @@ -472,7 +472,7 @@ func TestConcurrentClose(t *testing.T) { msg := []byte("HELLO WORLD") go func() { - for i := 0; i < 100; i++ { + for range 100 { c, err := tpt.maDial(context.Background(), l.Multiaddr(), &network.NullScope{}) if err != nil { t.Error(err) @@ -488,7 +488,7 @@ func TestConcurrentClose(t *testing.T) { } }() - for i := 0; i < 100; i++ { + for range 100 { c, _, err := l.Accept() if err != nil { t.Fatal(err) @@ -519,7 +519,7 @@ func TestWriteZero(t *testing.T) { } defer c.Close() - for i := 0; i < 100; i++ { + for range 100 { n, err := c.Write(msg) if n != 0 { t.Errorf("expected to write 0 bytes, wrote %d", n) diff --git a/p2p/transport/webtransport/cert_manager_test.go b/p2p/transport/webtransport/cert_manager_test.go index 942d47174..9549a07dd 100644 --- a/p2p/transport/webtransport/cert_manager_test.go +++ b/p2p/transport/webtransport/cert_manager_test.go @@ -115,7 +115,7 @@ func TestCertRenewal(t *testing.T) { func TestDeterministicCertsAcrossReboots(t *testing.T) { // Run this test 100 times to make sure it's deterministic runs := 100 - for i := 0; i < runs; i++ { + for i := range runs { t.Run(fmt.Sprintf("Run=%d", i), func(t *testing.T) { cl := clock.NewMock() priv, _, err := test.SeededTestKeyPair(crypto.Ed25519, 256, 0) diff --git a/p2p/transport/webtransport/crypto_test.go b/p2p/transport/webtransport/crypto_test.go index ba439c28a..b4f806ca4 100644 --- a/p2p/transport/webtransport/crypto_test.go +++ b/p2p/transport/webtransport/crypto_test.go @@ -93,7 +93,6 @@ func TestCertificateVerification(t *testing.T) { errStr: "cert not valid", }, } { - tc := tc t.Run(fmt.Sprintf("rejecting invalid certificates: %s", tc.name), func(t *testing.T) { err := verifyRawCerts([][]byte{tc.cert.Raw}, []multihash.DecodedMultihash{sha256Multihash(t, tc.cert.Raw)}) require.Error(t, err) @@ -125,7 +124,6 @@ func TestCertificateVerification(t *testing.T) { errStr: "cert hash not found", }, } { - tc := tc t.Run(fmt.Sprintf("rejecting invalid certificates: %s", tc.name), func(t *testing.T) { err := verifyRawCerts(tc.certs, tc.hashes) require.Error(t, err) @@ -137,7 +135,7 @@ func TestCertificateVerification(t *testing.T) { func TestDeterministicCertHashes(t *testing.T) { // Run this test 1000 times since we want to make sure the signatures are deterministic runs := 1000 - for i := 0; i < runs; i++ { + for range runs { zeroSeed := [32]byte{} priv, _, err := ic.GenerateEd25519Key(bytes.NewReader(zeroSeed[:])) require.NoError(t, err) @@ -165,7 +163,7 @@ func TestDeterministicCertHashes(t *testing.T) { func TestDeterministicSig(t *testing.T) { // Run this test 1000 times since we want to make sure the signatures are deterministic runs := 1000 - for i := 0; i < runs; i++ { + for range runs { zeroSeed := [32]byte{} deterministicHKDFReader := newDeterministicReader(zeroSeed[:], nil, deterministicCertInfo) b := [1024]byte{} diff --git a/p2p/transport/webtransport/transport_test.go b/p2p/transport/webtransport/transport_test.go index 266e01d18..91a9cb429 100644 --- a/p2p/transport/webtransport/transport_test.go +++ b/p2p/transport/webtransport/transport_test.go @@ -14,6 +14,7 @@ import ( "net/http" "os" "runtime" + "slices" "sync/atomic" "testing" "testing/quick" @@ -478,7 +479,7 @@ func TestAcceptQueueFilledUp(t *testing.T) { const num = 16 + 1 // one more than the accept queue capacity // Dial one more connection than the accept queue can hold. errChan := make(chan error, num) - for i := 0; i < num; i++ { + for range num { go func() { conn, err := newConn() if err != nil { @@ -508,7 +509,7 @@ func TestAcceptQueueFilledUp(t *testing.T) { var count int timer := time.NewTimer(time.Second) defer timer.Stop() - for i := 0; i < 16; i++ { + for range 16 { select { case <-errChan: count++ @@ -779,11 +780,9 @@ func TestServerRotatesCertCorrectly(t *testing.T) { var found bool ma.ForEach(l.Multiaddr(), func(c ma.Component) bool { if c.Protocol().Code == ma.P_CERTHASH { - for _, prevCerthash := range certhashes { - if c.Value() == prevCerthash { - found = true - return false - } + if slices.Contains(certhashes, c.Value()) { + found = true + return false } } return true @@ -812,7 +811,7 @@ func TestServerRotatesCertCorrectlyAfterSteps(t *testing.T) { // Traverse various time boundaries and make sure we always keep a common certhash. // e.g. certhash/A/certhash/B ... -> ... certhash/B/certhash/C ... -> ... certhash/C/certhash/D - for i := 0; i < 200; i++ { + for i := range 200 { cl.Add(24 * time.Hour) tr, err := libp2pwebtransport.New(priv, nil, newConnManager(t), nil, &network.NullResourceManager{}, libp2pwebtransport.WithClock(cl)) require.NoError(t, err) @@ -822,11 +821,9 @@ func TestServerRotatesCertCorrectlyAfterSteps(t *testing.T) { var found bool ma.ForEach(l.Multiaddr(), func(c ma.Component) bool { if c.Protocol().Code == ma.P_CERTHASH { - for _, prevCerthash := range certhashes { - if prevCerthash == c.Value() { - found = true - return false - } + if slices.Contains(certhashes, c.Value()) { + found = true + return false } } return true diff --git a/x/rate/limiter_test.go b/x/rate/limiter_test.go index b3c730283..3fa8ad4f2 100644 --- a/x/rate/limiter_test.go +++ b/x/rate/limiter_test.go @@ -26,10 +26,10 @@ func getSleepDurationAndRequestCount(rps float64) (time.Duration, int) { func assertLimiter(t *testing.T, rl *Limiter, ipAddr netip.Addr, allowed, errorMargin int) { t.Helper() - for i := 0; i < allowed; i++ { + for range allowed { require.True(t, rl.Allow(ipAddr)) } - for i := 0; i < errorMargin; i++ { + for range errorMargin { rl.Allow(ipAddr) } require.False(t, rl.Allow(ipAddr)) @@ -51,7 +51,7 @@ func TestLimiterGlobal(t *testing.T) { } if limit.RPS == 0 { // 0 implies no rate limiting, any large number would do - for i := 0; i < 1000; i++ { + for range 1000 { require.True(t, rl.Allow(addr)) } return