mirror of
https://github.com/libp2p/go-libp2p.git
synced 2026-04-22 16:17:19 +08:00
refactor: apply go fix modernizers from Go 1.26 (#3463)
Co-authored-by: sukun <sukunrt@gmail.com>
This commit is contained in:
@@ -6,6 +6,10 @@ run:
|
||||
issues:
|
||||
max-issues-per-linter: 0
|
||||
max-same-issues: 0
|
||||
exclude-rules:
|
||||
- path: _test\.go
|
||||
linters:
|
||||
- prealloc
|
||||
|
||||
linters:
|
||||
enable:
|
||||
|
||||
+1
-1
@@ -74,7 +74,7 @@ type AutoNATConfig struct {
|
||||
|
||||
type Security struct {
|
||||
ID protocol.ID
|
||||
Constructor interface{}
|
||||
Constructor any
|
||||
}
|
||||
|
||||
// Config describes a set of settings for a libp2p node
|
||||
|
||||
@@ -11,7 +11,7 @@ type Options struct {
|
||||
Limit int
|
||||
|
||||
// Other (implementation-specific) options
|
||||
Other map[interface{}]interface{}
|
||||
Other map[any]any
|
||||
}
|
||||
|
||||
// Apply applies the given options to this DiscoveryOpts
|
||||
|
||||
+7
-7
@@ -6,17 +6,17 @@ import (
|
||||
)
|
||||
|
||||
// SubscriptionOpt represents a subscriber option. Use the options exposed by the implementation of choice.
|
||||
type SubscriptionOpt = func(interface{}) error
|
||||
type SubscriptionOpt = func(any) error
|
||||
|
||||
// EmitterOpt represents an emitter option. Use the options exposed by the implementation of choice.
|
||||
type EmitterOpt = func(interface{}) error
|
||||
type EmitterOpt = func(any) error
|
||||
|
||||
// CancelFunc closes a subscriber.
|
||||
type CancelFunc = func()
|
||||
|
||||
// wildcardSubscriptionType is a virtual type to represent wildcard
|
||||
// subscriptions.
|
||||
type wildcardSubscriptionType interface{}
|
||||
type wildcardSubscriptionType any
|
||||
|
||||
// WildcardSubscription is the type to subscribe to receive all events
|
||||
// emitted in the eventbus.
|
||||
@@ -30,7 +30,7 @@ type Emitter interface {
|
||||
// calls to Emit will block.
|
||||
//
|
||||
// Calling this function with wrong event type will cause a panic.
|
||||
Emit(evt interface{}) error
|
||||
Emit(evt any) error
|
||||
}
|
||||
|
||||
// Subscription represents a subscription to one or multiple event types.
|
||||
@@ -38,7 +38,7 @@ type Subscription interface {
|
||||
io.Closer
|
||||
|
||||
// Out returns the channel from which to consume events.
|
||||
Out() <-chan interface{}
|
||||
Out() <-chan any
|
||||
|
||||
// Name returns the name for the subscription
|
||||
Name() string
|
||||
@@ -79,7 +79,7 @@ type Bus interface {
|
||||
// [...]
|
||||
// }
|
||||
// }
|
||||
Subscribe(eventType interface{}, opts ...SubscriptionOpt) (Subscription, error)
|
||||
Subscribe(eventType any, opts ...SubscriptionOpt) (Subscription, error)
|
||||
|
||||
// Emitter creates a new event emitter.
|
||||
//
|
||||
@@ -89,7 +89,7 @@ type Bus interface {
|
||||
// em, err := eventbus.Emitter(new(EventT))
|
||||
// defer em.Close() // MUST call this after being done with the emitter
|
||||
// em.Emit(EventT{})
|
||||
Emitter(eventType interface{}, opts ...EmitterOpt) (Emitter, error)
|
||||
Emitter(eventType any, opts ...EmitterOpt) (Emitter, error)
|
||||
|
||||
// GetAllEventTypes returns all the event types that this bus knows about
|
||||
// (having emitters and subscribers). It omits the WildcardSubscription.
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
var panicWriter io.Writer = os.Stderr
|
||||
|
||||
// HandlePanic handles and logs panics.
|
||||
func HandlePanic(rerr interface{}, err *error, where string) {
|
||||
func HandlePanic(rerr any, err *error, where string) {
|
||||
if rerr != nil {
|
||||
fmt.Fprintf(panicWriter, "caught panic: %s\n%s\n", rerr, debug.Stack())
|
||||
*err = fmt.Errorf("panic in %s: %s", where, rerr)
|
||||
|
||||
@@ -35,15 +35,15 @@ func round(bwc *BandwidthCounter, b *testing.B) {
|
||||
start := make(chan struct{})
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(10000)
|
||||
for i := 0; i < 1000; i++ {
|
||||
for i := range 1000 {
|
||||
p := peer.ID(fmt.Sprintf("peer-%d", i))
|
||||
for j := 0; j < 10; j++ {
|
||||
for j := range 10 {
|
||||
proto := protocol.ID(fmt.Sprintf("bitswap-%d", j))
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
<-start
|
||||
|
||||
for i := 0; i < 1000; i++ {
|
||||
for range 1000 {
|
||||
bwc.LogSentMessage(100)
|
||||
bwc.LogSentMessageStream(100, proto, p)
|
||||
time.Sleep(1 * time.Millisecond)
|
||||
@@ -60,10 +60,10 @@ func round(bwc *BandwidthCounter, b *testing.B) {
|
||||
|
||||
func TestBandwidthCounter(t *testing.T) {
|
||||
bwc := NewBandwidthCounter()
|
||||
for i := 0; i < 40; i++ {
|
||||
for i := 0; i < 100; i++ {
|
||||
for range 40 {
|
||||
for i := range 100 {
|
||||
p := peer.ID(fmt.Sprintf("peer-%d", i))
|
||||
for j := 0; j < 2; j++ {
|
||||
for j := range 2 {
|
||||
proto := protocol.ID(fmt.Sprintf("proto-%d", j))
|
||||
|
||||
// make sure the bandwidth counters are active
|
||||
@@ -81,7 +81,7 @@ func TestBandwidthCounter(t *testing.T) {
|
||||
assertProtocols := func(check func(Stats)) {
|
||||
byProtocol := bwc.GetBandwidthByProtocol()
|
||||
require.Len(t, byProtocol, 2, "expected 2 protocols")
|
||||
for i := 0; i < 2; i++ {
|
||||
for i := range 2 {
|
||||
p := protocol.ID(fmt.Sprintf("proto-%d", i))
|
||||
for _, stats := range [...]Stats{bwc.GetBandwidthForProtocol(p), byProtocol[p]} {
|
||||
check(stats)
|
||||
@@ -92,7 +92,7 @@ func TestBandwidthCounter(t *testing.T) {
|
||||
assertPeers := func(check func(Stats)) {
|
||||
byPeer := bwc.GetBandwidthByPeer()
|
||||
require.Len(t, byPeer, 100, "expected 100 peers")
|
||||
for i := 0; i < 100; i++ {
|
||||
for i := range 100 {
|
||||
p := peer.ID(fmt.Sprintf("peer-%d", i))
|
||||
for _, stats := range [...]Stats{bwc.GetBandwidthForPeer(p), byPeer[p]} {
|
||||
check(stats)
|
||||
|
||||
@@ -123,7 +123,7 @@ type Stats struct {
|
||||
// relay.
|
||||
Limited bool
|
||||
// Extra stores additional metadata about this connection.
|
||||
Extra map[interface{}]interface{}
|
||||
Extra map[any]any
|
||||
}
|
||||
|
||||
// StreamHandler is the type of function used to listen for
|
||||
|
||||
@@ -118,8 +118,8 @@ func AddrInfoToP2pAddrs(pi *AddrInfo) ([]ma.Multiaddr, error) {
|
||||
return addrs, nil
|
||||
}
|
||||
|
||||
func (pi *AddrInfo) Loggable() map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
func (pi *AddrInfo) Loggable() map[string]any {
|
||||
return map[string]any{
|
||||
"peerID": pi.ID.String(),
|
||||
"addrs": pi.Addrs,
|
||||
}
|
||||
|
||||
+2
-2
@@ -42,8 +42,8 @@ const maxInlineKeyLength = 42
|
||||
type ID string
|
||||
|
||||
// Loggable returns a pretty peer ID string in loggable JSON format.
|
||||
func (id ID) Loggable() map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
func (id ID) Loggable() map[string]any {
|
||||
return map[string]any{
|
||||
"peerID": id.String(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -57,7 +57,7 @@ func TestSignedPeerRecordFromEnvelope(t *testing.T) {
|
||||
// low clock precision. This makes sure we never get a duplicate.
|
||||
func TestTimestampSeq(t *testing.T) {
|
||||
var last uint64
|
||||
for i := 0; i < 1000; i++ {
|
||||
for range 1000 {
|
||||
next := TimestampSeq()
|
||||
if next <= last {
|
||||
t.Errorf("non-increasing timestamp found: %d <= %d", next, last)
|
||||
|
||||
@@ -83,8 +83,8 @@ type PeerMetadata interface {
|
||||
// Get / Put is a simple registry for other peer-related key/value pairs.
|
||||
// If we find something we use often, it should become its own set of
|
||||
// methods. This is a last resort.
|
||||
Get(p peer.ID, key string) (interface{}, error)
|
||||
Put(p peer.ID, key string, val interface{}) error
|
||||
Get(p peer.ID, key string) (any, error)
|
||||
Put(p peer.ID, key string, val any) error
|
||||
|
||||
// RemovePeer removes all values stored for a peer.
|
||||
RemovePeer(peer.ID)
|
||||
|
||||
@@ -43,7 +43,7 @@ func testDecodeBad(t *testing.T, windows bool) {
|
||||
|
||||
func testDecodeHex(t *testing.T, windows bool) {
|
||||
b := bufWithBase("/base16/", windows)
|
||||
for i := 0; i < 32; i++ {
|
||||
for range 32 {
|
||||
b.WriteString("FF")
|
||||
}
|
||||
|
||||
@@ -67,7 +67,7 @@ func TestDecodeB64(t *testing.T) {
|
||||
func testDecodeB64(t *testing.T, windows bool) {
|
||||
b := bufWithBase("/base64/", windows)
|
||||
key := make([]byte, 32)
|
||||
for i := 0; i < 32; i++ {
|
||||
for i := range 32 {
|
||||
key[i] = byte(i)
|
||||
}
|
||||
|
||||
@@ -102,7 +102,7 @@ func TestDecodeBin(t *testing.T) {
|
||||
func testDecodeBin(t *testing.T, windows bool) {
|
||||
b := bufWithBase("/bin/", windows)
|
||||
key := make([]byte, 32)
|
||||
for i := 0; i < 32; i++ {
|
||||
for i := range 32 {
|
||||
key[i] = byte(i)
|
||||
}
|
||||
|
||||
|
||||
@@ -96,9 +96,9 @@ func blankRecordForPayloadType(payloadType []byte) (Record, error) {
|
||||
return asRecord, nil
|
||||
}
|
||||
|
||||
func getValueType(i interface{}) reflect.Type {
|
||||
func getValueType(i any) reflect.Type {
|
||||
valueType := reflect.TypeOf(i)
|
||||
if valueType.Kind() == reflect.Ptr {
|
||||
if valueType.Kind() == reflect.Pointer {
|
||||
valueType = valueType.Elem()
|
||||
}
|
||||
return valueType
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
package routing
|
||||
|
||||
import "maps"
|
||||
|
||||
// Option is a single routing option.
|
||||
type Option func(opts *Options) error
|
||||
|
||||
@@ -9,7 +11,7 @@ type Options struct {
|
||||
Expired bool
|
||||
Offline bool
|
||||
// Other (ValueStore implementation specific) options.
|
||||
Other map[interface{}]interface{}
|
||||
Other map[any]any
|
||||
}
|
||||
|
||||
// Apply applies the given options to this Options
|
||||
@@ -27,10 +29,8 @@ func (opts *Options) ToOption() Option {
|
||||
return func(nopts *Options) error {
|
||||
*nopts = *opts
|
||||
if opts.Other != nil {
|
||||
nopts.Other = make(map[interface{}]interface{}, len(opts.Other))
|
||||
for k, v := range opts.Other {
|
||||
nopts.Other[k] = v
|
||||
}
|
||||
nopts.Other = make(map[any]any, len(opts.Other))
|
||||
maps.Copy(nopts.Other, opts.Other)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
)
|
||||
|
||||
func (qe *QueryEvent) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(map[string]interface{}{
|
||||
return json.Marshal(map[string]any{
|
||||
"ID": qe.ID.String(),
|
||||
"Type": int(qe.Type),
|
||||
"Responses": qe.Responses,
|
||||
|
||||
@@ -15,7 +15,7 @@ func TestEventsCancel(t *testing.T) {
|
||||
wg.Add(2)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for i := 0; i < 100; i++ {
|
||||
for i := range 100 {
|
||||
PublishQueryEvent(ctx, &QueryEvent{Extra: fmt.Sprint(i)})
|
||||
}
|
||||
close(goch)
|
||||
|
||||
+3
-9
@@ -2,6 +2,7 @@ package test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
@@ -9,7 +10,7 @@ import (
|
||||
|
||||
func GenerateTestAddrs(n int) []ma.Multiaddr {
|
||||
out := make([]ma.Multiaddr, n)
|
||||
for i := 0; i < n; i++ {
|
||||
for i := range n {
|
||||
a, err := ma.NewMultiaddr(fmt.Sprintf("/ip4/1.2.3.4/tcp/%d", i))
|
||||
if err != nil {
|
||||
continue
|
||||
@@ -26,14 +27,7 @@ func AssertAddressesEqual(t *testing.T, exp, act []ma.Multiaddr) {
|
||||
}
|
||||
|
||||
for _, a := range exp {
|
||||
found := false
|
||||
|
||||
for _, b := range act {
|
||||
if a.Equal(b) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
found := slices.ContainsFunc(act, a.Equal)
|
||||
|
||||
if !found {
|
||||
t.Fatalf("expected address %s not found", a)
|
||||
|
||||
@@ -220,7 +220,7 @@ func parseIPFSGoLogEnv(loggingLevelEnvStr string) (slog.Level, map[string]slog.L
|
||||
fallbackLvl := slog.LevelError
|
||||
var systemToLevel map[string]slog.Level
|
||||
if loggingLevelEnvStr != "" {
|
||||
for _, kvs := range strings.Split(loggingLevelEnvStr, ",") {
|
||||
for kvs := range strings.SplitSeq(loggingLevelEnvStr, ",") {
|
||||
kv := strings.SplitN(kvs, "=", 2)
|
||||
var lvl slog.Level
|
||||
err := lvl.UnmarshalText([]byte(kv[len(kv)-1]))
|
||||
|
||||
+3
-3
@@ -70,7 +70,7 @@ func ListenAddrs(addrs ...ma.Multiaddr) Option {
|
||||
// * Host
|
||||
// * Network
|
||||
// * Peerstore
|
||||
func Security(name string, constructor interface{}) Option {
|
||||
func Security(name string, constructor any) Option {
|
||||
return func(cfg *Config) error {
|
||||
if cfg.Insecure {
|
||||
return fmt.Errorf("cannot use security transports with an insecure libp2p configuration")
|
||||
@@ -99,7 +99,7 @@ func Muxer(name string, muxer network.Multiplexer) Option {
|
||||
}
|
||||
}
|
||||
|
||||
func QUICReuse(constructor interface{}, opts ...quicreuse.Option) Option {
|
||||
func QUICReuse(constructor any, opts ...quicreuse.Option) Option {
|
||||
return func(cfg *Config) error {
|
||||
tag := `group:"quicreuseopts"`
|
||||
typ := reflect.ValueOf(constructor).Type()
|
||||
@@ -141,7 +141,7 @@ func QUICReuse(constructor interface{}, opts ...quicreuse.Option) Option {
|
||||
// * Public Key
|
||||
// * Address filter (filter.Filter)
|
||||
// * Peerstore
|
||||
func Transport(constructor interface{}, opts ...interface{}) Option {
|
||||
func Transport(constructor any, opts ...any) Option {
|
||||
return func(cfg *Config) error {
|
||||
// generate a random identifier, so that fx can associate the constructor with its options
|
||||
b := make([]byte, 8)
|
||||
|
||||
@@ -89,7 +89,7 @@ func minMaxJitterTest(jitter Jitter, t *testing.T) {
|
||||
|
||||
func TestNoJitter(t *testing.T) {
|
||||
minMaxJitterTest(NoJitter, t)
|
||||
for i := 0; i < 10; i++ {
|
||||
for i := range 10 {
|
||||
expected := time.Second * time.Duration(i)
|
||||
if calculated := NoJitter(expected, time.Duration(0), time.Second*100, nil); calculated != expected {
|
||||
t.Fatalf("expected %v, got %v", expected, calculated)
|
||||
@@ -106,7 +106,7 @@ func TestFullJitter(t *testing.T) {
|
||||
|
||||
histogram := make([]int, numBuckets)
|
||||
|
||||
for i := 0; i < (numBuckets-1)*multiplier; i++ {
|
||||
for range (numBuckets - 1) * multiplier {
|
||||
started := time.Nanosecond * 50
|
||||
calculated := FullJitter(started, 0, 100, rng)
|
||||
histogram[calculated]++
|
||||
@@ -148,7 +148,7 @@ func testManyBackoffFactoryHelper(concurrent int, bkf BackoffFactory) {
|
||||
backoffCh := make(chan BackoffStrategy, concurrent)
|
||||
|
||||
errGrp := errgroup.Group{}
|
||||
for i := 0; i < concurrent; i++ {
|
||||
for range concurrent {
|
||||
errGrp.Go(func() (err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
@@ -174,8 +174,8 @@ func testManyBackoffFactoryHelper(concurrent int, bkf BackoffFactory) {
|
||||
}
|
||||
}()
|
||||
|
||||
for i := 0; i < 5; i++ {
|
||||
for j := 0; j < 10; j++ {
|
||||
for range 5 {
|
||||
for range 10 {
|
||||
backoff.Delay()
|
||||
}
|
||||
backoff.Reset()
|
||||
|
||||
@@ -202,7 +202,7 @@ func TestBackoffDiscoverySimultaneousQuery(t *testing.T) {
|
||||
n := 40
|
||||
advertisers := make([]discovery.Discovery, n)
|
||||
|
||||
for i := 0; i < n; i++ {
|
||||
for i := range n {
|
||||
h := bhost.NewBlankHost(swarmt.GenSwarm(t))
|
||||
defer h.Close()
|
||||
advertisers[i] = mocks.NewDiscoveryClient(h, discServer)
|
||||
@@ -263,7 +263,7 @@ func TestBackoffDiscoveryCacheCapacity(t *testing.T) {
|
||||
n := 40
|
||||
advertisers := make([]discovery.Discovery, n)
|
||||
|
||||
for i := 0; i < n; i++ {
|
||||
for i := range n {
|
||||
h := bhost.NewBlankHost(swarmt.GenSwarm(t))
|
||||
defer h.Close()
|
||||
advertisers[i] = mocks.NewDiscoveryClient(h, discServer)
|
||||
@@ -283,7 +283,7 @@ func TestBackoffDiscoveryCacheCapacity(t *testing.T) {
|
||||
const ns = "test"
|
||||
|
||||
// add speers
|
||||
for i := 0; i < n; i++ {
|
||||
for i := range n {
|
||||
advertisers[i].Advertise(ctx, ns, discovery.TTL(time.Hour))
|
||||
}
|
||||
// Advance clock by one step
|
||||
|
||||
@@ -40,9 +40,9 @@ func (h *maxDialHost) Connect(ctx context.Context, ai peer.AddrInfo) error {
|
||||
}
|
||||
|
||||
func getNetHosts(t *testing.T, n int) []host.Host {
|
||||
var out []host.Host
|
||||
out := make([]host.Host, 0, n)
|
||||
|
||||
for i := 0; i < n; i++ {
|
||||
for range n {
|
||||
netw := swarmt.GenSwarm(t)
|
||||
h := bhost.NewBlankHost(netw)
|
||||
t.Cleanup(func() { h.Close() })
|
||||
|
||||
@@ -257,7 +257,7 @@ func (s *mdnsService) startResolver(ctx context.Context) {
|
||||
func randomString(l int) string {
|
||||
const alphabet = "abcdefghijklmnopqrstuvwxyz0123456789"
|
||||
s := make([]byte, 0, l)
|
||||
for i := 0; i < l; i++ {
|
||||
for range l {
|
||||
s = append(s, alphabet[rand.Intn(len(alphabet))])
|
||||
}
|
||||
return string(s)
|
||||
|
||||
@@ -3,6 +3,7 @@ package mdns
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
"slices"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -58,7 +59,7 @@ func TestOtherDiscovery(t *testing.T) {
|
||||
|
||||
notifs := make([]*notif, n)
|
||||
hostIDs := make([]peer.ID, n)
|
||||
for i := 0; i < n; i++ {
|
||||
for i := range n {
|
||||
notif := ¬if{}
|
||||
notifs[i] = notif
|
||||
hostIDs[i] = setupMDNS(t, notif)
|
||||
@@ -70,11 +71,8 @@ func TestOtherDiscovery(t *testing.T) {
|
||||
if currentHostID == id {
|
||||
continue
|
||||
}
|
||||
for _, i := range ids {
|
||||
if id == i {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
if slices.Contains(ids, id) {
|
||||
found = true
|
||||
}
|
||||
if !found {
|
||||
return false
|
||||
|
||||
@@ -140,7 +140,7 @@ func TestAutoNATServiceGlobalLimiter(t *testing.T) {
|
||||
|
||||
hs := c.host
|
||||
|
||||
for i := 0; i < 5; i++ {
|
||||
for range 5 {
|
||||
hc, ac := makeAutoNATClient(t)
|
||||
connect(t, hs, hc)
|
||||
|
||||
|
||||
@@ -110,12 +110,7 @@ func newRelay(t *testing.T) host.Host {
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.Eventually(t, func() bool {
|
||||
for _, p := range h.Mux().Protocols() {
|
||||
if p == protoIDv2 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
return slices.Contains(h.Mux().Protocols(), protoIDv2)
|
||||
}, time.Second, 10*time.Millisecond)
|
||||
return h
|
||||
}
|
||||
@@ -150,7 +145,7 @@ func TestSingleRelay(t *testing.T) {
|
||||
const numCandidates = 3
|
||||
var called bool
|
||||
peerChan := make(chan peer.AddrInfo, numCandidates)
|
||||
for i := 0; i < numCandidates; i++ {
|
||||
for range numCandidates {
|
||||
r := newRelay(t)
|
||||
t.Cleanup(func() { r.Close() })
|
||||
peerChan <- peer.AddrInfo{ID: r.ID(), Addrs: r.Addrs()}
|
||||
@@ -260,7 +255,7 @@ func TestBackoff(t *testing.T) {
|
||||
}, 2*time.Second, 100*time.Millisecond, "counter load should be 2")
|
||||
|
||||
// make sure we don't add any relays yet
|
||||
for i := 0; i < 2; i++ {
|
||||
for range 2 {
|
||||
cl.AdvanceBy(backoff / 3)
|
||||
require.Equal(t, 1, int(reservations.Load()))
|
||||
}
|
||||
@@ -274,8 +269,8 @@ func TestBackoff(t *testing.T) {
|
||||
|
||||
func TestStaticRelays(t *testing.T) {
|
||||
const numStaticRelays = 3
|
||||
var staticRelays []peer.AddrInfo
|
||||
for i := 0; i < numStaticRelays; i++ {
|
||||
staticRelays := make([]peer.AddrInfo, 0, numStaticRelays)
|
||||
for range numStaticRelays {
|
||||
r := newRelay(t)
|
||||
t.Cleanup(func() { r.Close() })
|
||||
staticRelays = append(staticRelays, peer.AddrInfo{ID: r.ID(), Addrs: r.Addrs()})
|
||||
@@ -294,7 +289,7 @@ func TestConnectOnDisconnect(t *testing.T) {
|
||||
const num = 3
|
||||
peerChan := make(chan peer.AddrInfo, num)
|
||||
relays := make([]host.Host, 0, num)
|
||||
for i := 0; i < 3; i++ {
|
||||
for range 3 {
|
||||
r := newRelay(t)
|
||||
t.Cleanup(func() { r.Close() })
|
||||
peerChan <- peer.AddrInfo{ID: r.ID(), Addrs: r.Addrs()}
|
||||
@@ -322,7 +317,7 @@ func TestConnectOnDisconnect(t *testing.T) {
|
||||
}
|
||||
require.EventuallyWithT(t, func(collect *assert.CollectT) {
|
||||
relaysInUse = usedRelays(h)
|
||||
assert.Len(collect, relaysInUse, 1)
|
||||
require.Len(collect, relaysInUse, 1)
|
||||
assert.NotEqualf(collect, oldRelay, relaysInUse[0], "old relay should not be used again")
|
||||
}, 10*time.Second, 100*time.Millisecond)
|
||||
}
|
||||
@@ -335,7 +330,7 @@ func TestMaxAge(t *testing.T) {
|
||||
peerChan2 := make(chan peer.AddrInfo, num)
|
||||
relays1 := make([]host.Host, 0, num)
|
||||
relays2 := make([]host.Host, 0, num)
|
||||
for i := 0; i < num; i++ {
|
||||
for range num {
|
||||
r1 := newRelay(t)
|
||||
t.Cleanup(func() { r1.Close() })
|
||||
peerChan1 <- peer.AddrInfo{ID: r1.ID(), Addrs: r1.Addrs()}
|
||||
@@ -415,10 +410,8 @@ func TestMaxAge(t *testing.T) {
|
||||
}
|
||||
|
||||
require.Eventually(t, func() bool {
|
||||
for _, id := range ids {
|
||||
if id == relays[0] {
|
||||
return true
|
||||
}
|
||||
if slices.Contains(ids, relays[0]) {
|
||||
return true
|
||||
}
|
||||
fmt.Println("waiting for", ids, "to contain", relays[0])
|
||||
return false
|
||||
@@ -428,10 +421,10 @@ func TestMaxAge(t *testing.T) {
|
||||
|
||||
func TestReconnectToStaticRelays(t *testing.T) {
|
||||
cl := newMockClock()
|
||||
var staticRelays []peer.AddrInfo
|
||||
const numStaticRelays = 1
|
||||
staticRelays := make([]peer.AddrInfo, 0, numStaticRelays)
|
||||
relays := make([]host.Host, 0, numStaticRelays)
|
||||
for i := 0; i < numStaticRelays; i++ {
|
||||
for range numStaticRelays {
|
||||
r := newRelay(t)
|
||||
t.Cleanup(func() { r.Close() })
|
||||
relays = append(relays, r)
|
||||
|
||||
@@ -489,8 +489,8 @@ func TestAddrsManagerPeerstoreUpdated(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRemoveIfNotInSource(t *testing.T) {
|
||||
var addrs []ma.Multiaddr
|
||||
for i := 0; i < 10; i++ {
|
||||
addrs := make([]ma.Multiaddr, 0, 10)
|
||||
for i := range 10 {
|
||||
addrs = append(addrs, ma.StringCast(fmt.Sprintf("/ip4/1.2.3.4/tcp/%d", i)))
|
||||
}
|
||||
slices.SortFunc(addrs, func(a, b ma.Multiaddr) int { return a.Compare(b) })
|
||||
@@ -517,7 +517,7 @@ func TestRemoveIfNotInSource(t *testing.T) {
|
||||
|
||||
func BenchmarkAreAddrsDifferent(b *testing.B) {
|
||||
var addrs [10]ma.Multiaddr
|
||||
for i := 0; i < len(addrs); i++ {
|
||||
for i := range len(addrs) {
|
||||
addrs[i] = ma.StringCast(fmt.Sprintf("/ip4/1.1.1.%d/tcp/1", i))
|
||||
}
|
||||
b.Run("areAddrsDifferent", func(b *testing.B) {
|
||||
@@ -531,7 +531,7 @@ func BenchmarkAreAddrsDifferent(b *testing.B) {
|
||||
|
||||
func BenchmarkRemoveIfNotInSource(b *testing.B) {
|
||||
var addrs [10]ma.Multiaddr
|
||||
for i := 0; i < len(addrs); i++ {
|
||||
for i := range len(addrs) {
|
||||
addrs[i] = ma.StringCast(fmt.Sprintf("/ip4/1.1.1.%d/tcp/1", i))
|
||||
}
|
||||
b.ReportAllocs()
|
||||
|
||||
@@ -116,8 +116,8 @@ func TestProbeManager(t *testing.T) {
|
||||
|
||||
t.Run("successes", func(t *testing.T) {
|
||||
pm := makeNewProbeManager([]ma.Multiaddr{pub1, pub2})
|
||||
for j := 0; j < 2; j++ {
|
||||
for i := 0; i < targetConfidence; i++ {
|
||||
for range 2 {
|
||||
for range targetConfidence {
|
||||
reqs := nextProbe(pm)
|
||||
pm.CompleteProbe(reqs, autonatv2.Result{Addr: reqs[0].Addr, Idx: 0, Reachability: network.ReachabilityPublic}, nil)
|
||||
}
|
||||
@@ -396,7 +396,7 @@ func TestAddrsReachabilityTracker(t *testing.T) {
|
||||
}
|
||||
tr := newTracker(mockClient, nil)
|
||||
var addrs []ma.Multiaddr
|
||||
for i := 0; i < 10; i++ {
|
||||
for i := range 10 {
|
||||
addrs = append(addrs, ma.StringCast(fmt.Sprintf("/ip4/1.1.1.1/tcp/%d", i)))
|
||||
}
|
||||
slices.SortFunc(addrs, func(a, b ma.Multiaddr) int { return -a.Compare(b) }) // sort in reverse order
|
||||
@@ -460,7 +460,7 @@ func TestAddrsReachabilityTracker(t *testing.T) {
|
||||
require.True(t, drainNotify()) // check that we did receive probes
|
||||
|
||||
backoffInterval := backoffStartInterval
|
||||
for i := 0; i < 4; i++ {
|
||||
for range 4 {
|
||||
drainNotify()
|
||||
cl.Add(backoffInterval / 2)
|
||||
select {
|
||||
@@ -512,7 +512,7 @@ func TestAddrsReachabilityTracker(t *testing.T) {
|
||||
tr.UpdateAddrs([]ma.Multiaddr{pub1})
|
||||
assertFirstEvent(t, tr, []ma.Multiaddr{pub1})
|
||||
|
||||
for i := 0; i < minConfidence; i++ {
|
||||
for range minConfidence {
|
||||
select {
|
||||
case <-notify:
|
||||
case <-time.After(1 * time.Second):
|
||||
@@ -677,7 +677,7 @@ func TestRefreshReachability(t *testing.T) {
|
||||
time.Sleep(50 * time.Millisecond) // wait for the cancellation to be processed
|
||||
|
||||
outer:
|
||||
for i := 0; i < defaultMaxConcurrency; i++ {
|
||||
for range defaultMaxConcurrency {
|
||||
select {
|
||||
case <-block:
|
||||
default:
|
||||
@@ -991,10 +991,7 @@ func FuzzAddrsReachabilityTracker(f *testing.F) {
|
||||
}
|
||||
ips = ips[1:]
|
||||
var x, y int64
|
||||
split := 128 / 8
|
||||
if len(ips) < split {
|
||||
split = len(ips)
|
||||
}
|
||||
split := min(len(ips), 128/8)
|
||||
var b [8]byte
|
||||
copy(b[:], ips[:split])
|
||||
x = int64(binary.LittleEndian.Uint64(b[:]))
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
@@ -442,12 +443,7 @@ func TestHostProtoPreknowledge(t *testing.T) {
|
||||
require.Never(t, func() bool {
|
||||
protos, err := h1.Peerstore().GetProtocols(h2.ID())
|
||||
require.NoError(t, err)
|
||||
for _, p := range protos {
|
||||
if p == "/foo" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
return slices.Contains(protos, "/foo")
|
||||
}, time.Second, 100*time.Millisecond)
|
||||
|
||||
s, err := h1.NewStream(context.Background(), h2.ID(), "/foo", "/bar", "/super")
|
||||
|
||||
+19
-19
@@ -38,7 +38,7 @@ type emitter struct {
|
||||
metricsTracer MetricsTracer
|
||||
}
|
||||
|
||||
func (e *emitter) Emit(evt interface{}) error {
|
||||
func (e *emitter) Emit(evt any) error {
|
||||
if e.closed.Load() {
|
||||
return fmt.Errorf("emitter is closed")
|
||||
}
|
||||
@@ -118,14 +118,14 @@ func (b *basicBus) tryDropNode(typ reflect.Type) {
|
||||
}
|
||||
|
||||
type wildcardSub struct {
|
||||
ch chan interface{}
|
||||
ch chan any
|
||||
w *wildcardNode
|
||||
metricsTracer MetricsTracer
|
||||
name string
|
||||
closeOnce sync.Once
|
||||
}
|
||||
|
||||
func (w *wildcardSub) Out() <-chan interface{} {
|
||||
func (w *wildcardSub) Out() <-chan any {
|
||||
return w.ch
|
||||
}
|
||||
|
||||
@@ -146,11 +146,11 @@ func (w *wildcardSub) Name() string {
|
||||
|
||||
type namedSink struct {
|
||||
name string
|
||||
ch chan interface{}
|
||||
ch chan any
|
||||
}
|
||||
|
||||
type sub struct {
|
||||
ch chan interface{}
|
||||
ch chan any
|
||||
nodes []*node
|
||||
dropper func(reflect.Type)
|
||||
metricsTracer MetricsTracer
|
||||
@@ -162,7 +162,7 @@ func (s *sub) Name() string {
|
||||
return s.name
|
||||
}
|
||||
|
||||
func (s *sub) Out() <-chan interface{} {
|
||||
func (s *sub) Out() <-chan any {
|
||||
return s.ch
|
||||
}
|
||||
|
||||
@@ -207,7 +207,7 @@ var _ event.Subscription = (*sub)(nil)
|
||||
// Subscribe creates new subscription. Failing to drain the channel will cause
|
||||
// publishers to get blocked. CancelFunc is guaranteed to return after last send
|
||||
// to the channel
|
||||
func (b *basicBus) Subscribe(evtTypes interface{}, opts ...event.SubscriptionOpt) (_ event.Subscription, err error) {
|
||||
func (b *basicBus) Subscribe(evtTypes any, opts ...event.SubscriptionOpt) (_ event.Subscription, err error) {
|
||||
settings := newSubSettings()
|
||||
for _, opt := range opts {
|
||||
if err := opt(&settings); err != nil {
|
||||
@@ -217,7 +217,7 @@ func (b *basicBus) Subscribe(evtTypes interface{}, opts ...event.SubscriptionOpt
|
||||
|
||||
if evtTypes == event.WildcardSubscription {
|
||||
out := &wildcardSub{
|
||||
ch: make(chan interface{}, settings.buffer),
|
||||
ch: make(chan any, settings.buffer),
|
||||
w: b.wildcard,
|
||||
metricsTracer: b.metricsTracer,
|
||||
name: settings.name,
|
||||
@@ -226,9 +226,9 @@ func (b *basicBus) Subscribe(evtTypes interface{}, opts ...event.SubscriptionOpt
|
||||
return out, nil
|
||||
}
|
||||
|
||||
types, ok := evtTypes.([]interface{})
|
||||
types, ok := evtTypes.([]any)
|
||||
if !ok {
|
||||
types = []interface{}{evtTypes}
|
||||
types = []any{evtTypes}
|
||||
}
|
||||
|
||||
if len(types) > 1 {
|
||||
@@ -240,7 +240,7 @@ func (b *basicBus) Subscribe(evtTypes interface{}, opts ...event.SubscriptionOpt
|
||||
}
|
||||
|
||||
out := &sub{
|
||||
ch: make(chan interface{}, settings.buffer),
|
||||
ch: make(chan any, settings.buffer),
|
||||
nodes: make([]*node, len(types)),
|
||||
|
||||
dropper: b.tryDropNode,
|
||||
@@ -249,7 +249,7 @@ func (b *basicBus) Subscribe(evtTypes interface{}, opts ...event.SubscriptionOpt
|
||||
}
|
||||
|
||||
for _, etyp := range types {
|
||||
if reflect.TypeOf(etyp).Kind() != reflect.Ptr {
|
||||
if reflect.TypeOf(etyp).Kind() != reflect.Pointer {
|
||||
return nil, errors.New("subscribe called with non-pointer type")
|
||||
}
|
||||
}
|
||||
@@ -287,7 +287,7 @@ func (b *basicBus) Subscribe(evtTypes interface{}, opts ...event.SubscriptionOpt
|
||||
// defer emit.Close() // MUST call this after being done with the emitter
|
||||
//
|
||||
// emit(EventT{})
|
||||
func (b *basicBus) Emitter(evtType interface{}, opts ...event.EmitterOpt) (e event.Emitter, err error) {
|
||||
func (b *basicBus) Emitter(evtType any, opts ...event.EmitterOpt) (e event.Emitter, err error) {
|
||||
if evtType == event.WildcardSubscription {
|
||||
return nil, fmt.Errorf("illegal emitter for wildcard subscription")
|
||||
}
|
||||
@@ -300,7 +300,7 @@ func (b *basicBus) Emitter(evtType interface{}, opts ...event.EmitterOpt) (e eve
|
||||
}
|
||||
|
||||
typ := reflect.TypeOf(evtType)
|
||||
if typ.Kind() != reflect.Ptr {
|
||||
if typ.Kind() != reflect.Pointer {
|
||||
return nil, errors.New("emitter called with non-pointer type")
|
||||
}
|
||||
typ = typ.Elem()
|
||||
@@ -349,7 +349,7 @@ func (n *wildcardNode) addSink(sink *namedSink) {
|
||||
}
|
||||
}
|
||||
|
||||
func (n *wildcardNode) removeSink(ch chan interface{}) {
|
||||
func (n *wildcardNode) removeSink(ch chan any) {
|
||||
go func() {
|
||||
// drain the event channel, will return when closed and drained.
|
||||
// this is necessary to unblock publishes to this channel.
|
||||
@@ -370,7 +370,7 @@ func (n *wildcardNode) removeSink(ch chan interface{}) {
|
||||
|
||||
var wildcardType = reflect.TypeOf(event.WildcardSubscription)
|
||||
|
||||
func (n *wildcardNode) emit(evt interface{}) {
|
||||
func (n *wildcardNode) emit(evt any) {
|
||||
if n.nSinks.Load() == 0 {
|
||||
return
|
||||
}
|
||||
@@ -406,7 +406,7 @@ type node struct {
|
||||
nEmitters atomic.Int32
|
||||
|
||||
keepLast bool
|
||||
last interface{}
|
||||
last any
|
||||
|
||||
sinks []*namedSink
|
||||
metricsTracer MetricsTracer
|
||||
@@ -421,7 +421,7 @@ func newNode(typ reflect.Type, metricsTracer MetricsTracer) *node {
|
||||
}
|
||||
}
|
||||
|
||||
func (n *node) emit(evt interface{}) {
|
||||
func (n *node) emit(evt any) {
|
||||
typ := reflect.TypeOf(evt)
|
||||
if typ != n.typ {
|
||||
panic(fmt.Sprintf("Emit called with wrong type. expected: %s, got: %s", n.typ, typ))
|
||||
@@ -446,7 +446,7 @@ func (n *node) emit(evt interface{}) {
|
||||
n.lk.Unlock()
|
||||
}
|
||||
|
||||
func emitAndLogError(timer *time.Timer, typ reflect.Type, evt interface{}, sink *namedSink) *time.Timer {
|
||||
func emitAndLogError(timer *time.Timer, typ reflect.Type, evt any, sink *namedSink) *time.Timer {
|
||||
// Slow consumer. Log a warning if stalled for the timeout
|
||||
if timer == nil {
|
||||
timer = time.NewTimer(slowConsumerWarningTimeout)
|
||||
|
||||
@@ -245,7 +245,7 @@ func TestClosingRaces(t *testing.T) {
|
||||
|
||||
b := NewBus()
|
||||
|
||||
for i := 0; i < subs; i++ {
|
||||
for range subs {
|
||||
go func() {
|
||||
lk.RLock()
|
||||
defer lk.RUnlock()
|
||||
@@ -257,7 +257,7 @@ func TestClosingRaces(t *testing.T) {
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
for i := 0; i < emits; i++ {
|
||||
for range emits {
|
||||
go func() {
|
||||
lk.RLock()
|
||||
defer lk.RUnlock()
|
||||
@@ -291,7 +291,7 @@ func TestSubMany(t *testing.T) {
|
||||
wait.Add(n)
|
||||
ready.Add(n)
|
||||
|
||||
for i := 0; i < n; i++ {
|
||||
for range n {
|
||||
go func() {
|
||||
sub, err := bus.Subscribe(new(EventB))
|
||||
if err != nil {
|
||||
@@ -340,7 +340,7 @@ func TestWildcardSubscription(t *testing.T) {
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
var evts []interface{}
|
||||
var evts []any
|
||||
|
||||
LOOP:
|
||||
for {
|
||||
@@ -366,8 +366,8 @@ LOOP:
|
||||
|
||||
func TestManyWildcardSubscriptions(t *testing.T) {
|
||||
bus := NewBus()
|
||||
var subs []event.Subscription
|
||||
for i := 0; i < 10; i++ {
|
||||
subs := make([]event.Subscription, 0, 10)
|
||||
for range 10 {
|
||||
sub, err := bus.Subscribe(event.WildcardSubscription)
|
||||
require.NoError(t, err)
|
||||
subs = append(subs, sub)
|
||||
@@ -423,7 +423,7 @@ func TestManyWildcardSubscriptions(t *testing.T) {
|
||||
func TestWildcardValidations(t *testing.T) {
|
||||
bus := NewBus()
|
||||
|
||||
_, err := bus.Subscribe([]interface{}{event.WildcardSubscription, new(EventA), new(EventB)})
|
||||
_, err := bus.Subscribe([]any{event.WildcardSubscription, new(EventA), new(EventB)})
|
||||
require.Error(t, err)
|
||||
|
||||
_, err = bus.Emitter(event.WildcardSubscription)
|
||||
@@ -432,7 +432,7 @@ func TestWildcardValidations(t *testing.T) {
|
||||
|
||||
func TestSubType(t *testing.T) {
|
||||
bus := NewBus()
|
||||
sub, err := bus.Subscribe([]interface{}{new(EventA), new(EventB)})
|
||||
sub, err := bus.Subscribe([]any{new(EventA), new(EventB)})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -555,7 +555,7 @@ func TestSubFailFully(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = bus.Subscribe([]interface{}{new(EventB), 5})
|
||||
_, err = bus.Subscribe([]any{new(EventB), 5})
|
||||
if err == nil || err.Error() != "subscribe called with non-pointer type" {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -576,7 +576,7 @@ func TestSubFailFully(t *testing.T) {
|
||||
func TestSubCloseMultiple(t *testing.T) {
|
||||
bus := NewBus()
|
||||
|
||||
sub, err := bus.Subscribe([]interface{}{new(EventB)})
|
||||
sub, err := bus.Subscribe([]any{new(EventB)})
|
||||
require.NoError(t, err)
|
||||
err = sub.Close()
|
||||
require.NoError(t, err)
|
||||
@@ -598,7 +598,7 @@ func testMany(t testing.TB, subs, emits, msgs int, stateful bool) {
|
||||
wait.Add(subs + emits)
|
||||
ready.Add(subs)
|
||||
|
||||
for i := 0; i < subs; i++ {
|
||||
for range subs {
|
||||
go func() {
|
||||
sub, err := bus.Subscribe(new(EventB))
|
||||
if err != nil {
|
||||
@@ -618,9 +618,9 @@ func testMany(t testing.TB, subs, emits, msgs int, stateful bool) {
|
||||
}()
|
||||
}
|
||||
|
||||
for i := 0; i < emits; i++ {
|
||||
for range emits {
|
||||
go func() {
|
||||
em, err := bus.Emitter(new(EventB), func(settings interface{}) error {
|
||||
em, err := bus.Emitter(new(EventB), func(settings any) error {
|
||||
settings.(*emitterSettings).makeStateful = stateful
|
||||
return nil
|
||||
})
|
||||
@@ -631,7 +631,7 @@ func testMany(t testing.TB, subs, emits, msgs int, stateful bool) {
|
||||
|
||||
ready.Wait()
|
||||
|
||||
for i := 0; i < msgs; i++ {
|
||||
for range msgs {
|
||||
em.Emit(EventB(97))
|
||||
}
|
||||
|
||||
@@ -662,7 +662,7 @@ func (bc benchCase) name() string {
|
||||
|
||||
func genTestCases() []benchCase {
|
||||
ret := make([]benchCase, 0, 200)
|
||||
for stateful := 0; stateful < 2; stateful++ {
|
||||
for stateful := range 2 {
|
||||
for subs := uint(0); subs <= 8; subs = subs + 4 {
|
||||
for emits := uint(0); emits <= 8; emits = emits + 4 {
|
||||
ret = append(ret, benchCase{1 << subs, 1 << emits, stateful == 1})
|
||||
@@ -690,7 +690,7 @@ func benchMany(bc benchCase) func(*testing.B) {
|
||||
wait.Add(subs + emits)
|
||||
ready.Add(subs + emits)
|
||||
|
||||
for i := 0; i < subs; i++ {
|
||||
for range subs {
|
||||
go func() {
|
||||
sub, err := bus.Subscribe(new(EventB))
|
||||
if err != nil {
|
||||
@@ -710,9 +710,9 @@ func benchMany(bc benchCase) func(*testing.B) {
|
||||
}()
|
||||
}
|
||||
|
||||
for i := 0; i < emits; i++ {
|
||||
for range emits {
|
||||
go func() {
|
||||
em, err := bus.Emitter(new(EventB), func(settings interface{}) error {
|
||||
em, err := bus.Emitter(new(EventB), func(settings any) error {
|
||||
settings.(*emitterSettings).makeStateful = stateful
|
||||
return nil
|
||||
})
|
||||
@@ -743,7 +743,7 @@ func BenchmarkSubscribe(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N/div; i++ {
|
||||
bus := NewBus()
|
||||
for j := 0; j < div; j++ {
|
||||
for range div {
|
||||
bus.Subscribe(new(EventA))
|
||||
}
|
||||
}
|
||||
@@ -753,7 +753,7 @@ func BenchmarkEmitter(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N/div; i++ {
|
||||
bus := NewBus()
|
||||
for j := 0; j < div; j++ {
|
||||
for range div {
|
||||
bus.Emitter(new(EventA))
|
||||
}
|
||||
}
|
||||
@@ -763,7 +763,7 @@ func BenchmarkSubscribeAndEmitter(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N/div; i++ {
|
||||
bus := NewBus()
|
||||
for j := 0; j < div; j++ {
|
||||
for range div {
|
||||
bus.Subscribe(new(EventA))
|
||||
bus.Emitter(new(EventA))
|
||||
}
|
||||
|
||||
@@ -39,15 +39,15 @@ func newSubSettings() subSettings {
|
||||
return settings
|
||||
}
|
||||
|
||||
func BufSize(n int) func(interface{}) error {
|
||||
return func(s interface{}) error {
|
||||
func BufSize(n int) func(any) error {
|
||||
return func(s any) error {
|
||||
s.(*subSettings).buffer = n
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func Name(name string) func(interface{}) error {
|
||||
return func(s interface{}) error {
|
||||
func Name(name string) func(any) error {
|
||||
return func(s any) error {
|
||||
s.(*subSettings).name = name
|
||||
return nil
|
||||
}
|
||||
@@ -64,7 +64,7 @@ type emitterSettings struct {
|
||||
//
|
||||
// This allows to provide state tracking for dynamic systems, and/or
|
||||
// allows new subscribers to verify that there are Emitters on the channel
|
||||
func Stateful(s interface{}) error {
|
||||
func Stateful(s any) error {
|
||||
s.(*emitterSettings).makeStateful = true
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -304,10 +304,7 @@ func (o *Manager) getTopExternalAddrs(localTWStr string, minObservers int) []*ob
|
||||
})
|
||||
// TODO(sukunrt): Improve this logic. Return only if the addresses have a
|
||||
// threshold fraction of the maximum observations
|
||||
n := len(observerSets)
|
||||
if n > maxExternalThinWaistAddrsPerLocalAddr {
|
||||
n = maxExternalThinWaistAddrsPerLocalAddr
|
||||
}
|
||||
n := min(len(observerSets), maxExternalThinWaistAddrsPerLocalAddr)
|
||||
return observerSets[:n]
|
||||
}
|
||||
|
||||
|
||||
@@ -93,7 +93,7 @@ func TestObservedAddrsManager(t *testing.T) {
|
||||
}
|
||||
|
||||
conns := make([]*mockConn, 0, n)
|
||||
for i := 0; i < n; i++ {
|
||||
for i := range n {
|
||||
ipPart := ma.StringCast(fmt.Sprintf("/ip4/1.2.3.%d", i))
|
||||
conns = append(conns, newConn(localAddr, ma.Join(ipPart, protoPart)))
|
||||
}
|
||||
@@ -129,7 +129,7 @@ func TestObservedAddrsManager(t *testing.T) {
|
||||
defer o.Close()
|
||||
conns := getConns(t, 40, ma.P_TCP)
|
||||
observedAddrs := make([]ma.Multiaddr, maxExternalThinWaistAddrsPerLocalAddr*2)
|
||||
for i := 0; i < len(observedAddrs); i++ {
|
||||
for i := range observedAddrs {
|
||||
observedAddrs[i] = ma.StringCast(fmt.Sprintf("/ip4/2.2.2.%d/tcp/2", i))
|
||||
}
|
||||
for i, c := range conns {
|
||||
@@ -207,11 +207,11 @@ func TestObservedAddrsManager(t *testing.T) {
|
||||
|
||||
const N = 4 // ActivationThresh
|
||||
var ob1, ob2 [N]connMultiaddrs
|
||||
for i := 0; i < N; i++ {
|
||||
for i := range N {
|
||||
ob1[i] = newConn(quic4ListenAddr, ma.StringCast(fmt.Sprintf("/ip4/1.2.3.%d/udp/1/quic-v1", i)))
|
||||
ob2[i] = newConn(quic4ListenAddr, ma.StringCast(fmt.Sprintf("/ip4/1.2.3.%d/udp/2/quic-v1", i)))
|
||||
}
|
||||
for i := 0; i < N-1; i++ {
|
||||
for i := range N - 1 {
|
||||
o.maybeRecordObservation(ob1[i], observedQuic)
|
||||
o.maybeRecordObservation(ob2[i], observedQuic)
|
||||
}
|
||||
@@ -226,7 +226,7 @@ func TestObservedAddrsManager(t *testing.T) {
|
||||
}, 2*time.Second, 100*time.Millisecond)
|
||||
|
||||
// Now disconnect first observer group
|
||||
for i := 0; i < N; i++ {
|
||||
for i := range N {
|
||||
o.removeConn(ob1[i])
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
@@ -235,7 +235,7 @@ func TestObservedAddrsManager(t *testing.T) {
|
||||
}
|
||||
|
||||
// Now disconnect the second group to check cleanup
|
||||
for i := 0; i < N; i++ {
|
||||
for i := range N {
|
||||
o.removeConn(ob2[i])
|
||||
}
|
||||
require.Eventually(t, func() bool {
|
||||
@@ -254,11 +254,11 @@ func TestObservedAddrsManager(t *testing.T) {
|
||||
|
||||
const N = 4 // ActivationThresh
|
||||
var ob1, ob2 [N]connMultiaddrs
|
||||
for i := 0; i < N; i++ {
|
||||
for i := range N {
|
||||
ob1[i] = newConn(quic4ListenAddr, ma.StringCast(fmt.Sprintf("/ip4/1.2.3.%d/udp/1/quic-v1", i)))
|
||||
ob2[i] = newConn(quic4ListenAddr, ma.StringCast(fmt.Sprintf("/ip4/1.2.3.%d/udp/2/quic-v1", i)))
|
||||
}
|
||||
for i := 0; i < N-1; i++ {
|
||||
for i := range N - 1 {
|
||||
o.maybeRecordObservation(ob1[i], observedQuic1)
|
||||
o.maybeRecordObservation(ob2[i], observedQuic2)
|
||||
}
|
||||
@@ -273,7 +273,7 @@ func TestObservedAddrsManager(t *testing.T) {
|
||||
}, 2*time.Second, 100*time.Millisecond)
|
||||
|
||||
// Now disconnect first observer group
|
||||
for i := 0; i < N; i++ {
|
||||
for i := range N {
|
||||
o.removeConn(ob1[i])
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
@@ -282,7 +282,7 @@ func TestObservedAddrsManager(t *testing.T) {
|
||||
}
|
||||
|
||||
// Now disconnect the second group to check cleanup
|
||||
for i := 0; i < N; i++ {
|
||||
for i := range N {
|
||||
o.removeConn(ob2[i])
|
||||
}
|
||||
require.Eventually(t, func() bool {
|
||||
@@ -300,7 +300,7 @@ func TestObservedAddrsManager(t *testing.T) {
|
||||
c5 := newConn(quic4ListenAddr, ma.StringCast("/ip4/1.2.3.5/udp/1/quic-v1"))
|
||||
c6 := newConn(quic4ListenAddr, ma.StringCast("/ip4/1.2.3.6/udp/1/quic-v1"))
|
||||
var observedQuic, observedWebTransport, observedWebTransportWithCertHash ma.Multiaddr
|
||||
for i := 0; i < 10; i++ {
|
||||
for i := range 10 {
|
||||
// Change the IP address in each observation
|
||||
observedQuic = ma.StringCast(fmt.Sprintf("/ip4/2.2.2.%d/udp/2/quic-v1", i))
|
||||
observedWebTransport = ma.StringCast(fmt.Sprintf("/ip4/2.2.2.%d/udp/2/quic-v1/webtransport", i))
|
||||
@@ -325,7 +325,7 @@ func TestObservedAddrsManager(t *testing.T) {
|
||||
requireEqualAddrs(t, []ma.Multiaddr{observedQuic}, o.AddrsFor(quic4ListenAddr))
|
||||
requireAddrsMatch(t, []ma.Multiaddr{observedQuic, observedWebTransportWithCertHash}, o.Addrs(0))
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
for range 3 {
|
||||
// remove non-recorded connection
|
||||
o.removeConn(c6)
|
||||
}
|
||||
@@ -374,7 +374,7 @@ func TestObservedAddrsManager(t *testing.T) {
|
||||
|
||||
observedWebTransport := ma.StringCast("/ip4/2.2.2.2/udp/1/quic-v1/webtransport")
|
||||
var udpConns [5 * maxExternalThinWaistAddrsPerLocalAddr]connMultiaddrs
|
||||
for i := 0; i < len(udpConns); i++ {
|
||||
for i := range len(udpConns) {
|
||||
udpConns[i] = newConn(webTransport4ListenAddr, ma.StringCast(fmt.Sprintf("/ip4/1.2.3.%d/udp/1/quic-v1/webtransport", i)))
|
||||
o.maybeRecordObservation(udpConns[i], observedWebTransport)
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
@@ -391,12 +391,12 @@ func TestObservedAddrsManager(t *testing.T) {
|
||||
defer o.Close()
|
||||
const N = 100
|
||||
var tcpConns, quicConns [N]*mockConn
|
||||
for i := 0; i < N; i++ {
|
||||
for i := range N {
|
||||
tcpConns[i] = newConn(tcp4ListenAddr, ma.StringCast(fmt.Sprintf("/ip4/1.2.3.%d/tcp/1", i)))
|
||||
quicConns[i] = newConn(quic4ListenAddr, ma.StringCast(fmt.Sprintf("/ip4/1.2.3.%d/udp/1/quic-v1", i)))
|
||||
}
|
||||
var observedQuic, observedTCP ma.Multiaddr
|
||||
for i := 0; i < N; i++ {
|
||||
for i := range N {
|
||||
// ip addr has the form 2.2.<conn-num>.2
|
||||
observedQuic = ma.StringCast(fmt.Sprintf("/ip4/2.2.%d.2/udp/2/quic-v1", i%20))
|
||||
observedTCP = ma.StringCast(fmt.Sprintf("/ip4/2.2.%d.2/tcp/2", i%20))
|
||||
@@ -410,7 +410,7 @@ func TestObservedAddrsManager(t *testing.T) {
|
||||
require.Equal(t, len(o.Addrs(0)), 3*maxExternalThinWaistAddrsPerLocalAddr)
|
||||
}, 1*time.Second, 100*time.Millisecond)
|
||||
addrs := o.Addrs(0)
|
||||
for i := 0; i < 10; i++ {
|
||||
for range 10 {
|
||||
require.ElementsMatch(t, o.Addrs(0), addrs, "%s %s", o.Addrs(0), addrs)
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
}
|
||||
@@ -419,7 +419,7 @@ func TestObservedAddrsManager(t *testing.T) {
|
||||
require.Equal(t, tcpNAT, network.NATDeviceTypeEndpointDependent)
|
||||
require.Equal(t, udpNAT, network.NATDeviceTypeEndpointDependent)
|
||||
|
||||
for i := 0; i < N; i++ {
|
||||
for i := range N {
|
||||
o.removeConn(tcpConns[i])
|
||||
o.removeConn(quicConns[i])
|
||||
}
|
||||
@@ -444,7 +444,7 @@ func TestObservedAddrsManager(t *testing.T) {
|
||||
const N = 100
|
||||
var tcp4Conns, quic4Conns, webTransport4Conns [N]*mockConn
|
||||
var tcp6Conns, quic6Conns, webTransport6Conns [N]*mockConn
|
||||
for i := 0; i < N; i++ {
|
||||
for i := range N {
|
||||
tcp4Conns[i] = newConn(tcp4ListenAddr, ma.StringCast(fmt.Sprintf("/ip4/1.2.3.%d/tcp/1", i)))
|
||||
quic4Conns[i] = newConn(quic4ListenAddr, ma.StringCast(fmt.Sprintf("/ip4/1.2.3.%d/udp/1/quic-v1", i)))
|
||||
webTransport4Conns[i] = newConn(webTransport4ListenAddr, ma.StringCast(fmt.Sprintf("/ip4/1.2.3.%d/udp/1/quic-v1/webtransport", i)))
|
||||
@@ -455,8 +455,8 @@ func TestObservedAddrsManager(t *testing.T) {
|
||||
}
|
||||
var observedQUIC4, observedWebTransport4, observedTCP4 ma.Multiaddr
|
||||
var observedQUIC6, observedWebTransport6, observedTCP6 ma.Multiaddr
|
||||
for i := 0; i < N; i++ {
|
||||
for j := 0; j < 5; j++ {
|
||||
for i := range N {
|
||||
for j := range 5 {
|
||||
// ip addr has the form 2.2.<conn-num>.<obs-num>
|
||||
observedQUIC4 = ma.StringCast(fmt.Sprintf("/ip4/2.2.%d.%d/udp/2/quic-v1", i/10, j))
|
||||
observedWebTransport4 = ma.StringCast(fmt.Sprintf("/ip4/2.2.%d.%d/udp/2/quic-v1/webtransport", i/10, j))
|
||||
@@ -482,7 +482,7 @@ func TestObservedAddrsManager(t *testing.T) {
|
||||
return len(o.Addrs(0)) == 2*3*maxExternalThinWaistAddrsPerLocalAddr
|
||||
}, 1*time.Second, 100*time.Millisecond)
|
||||
addrs := o.Addrs(0)
|
||||
for i := 0; i < 10; i++ {
|
||||
for range 10 {
|
||||
require.ElementsMatch(t, o.Addrs(0), addrs, "%s %s", o.Addrs(0), addrs)
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
@@ -519,7 +519,7 @@ func TestObservedAddrsManager(t *testing.T) {
|
||||
matest.AssertMultiaddrsMatch(t, o.Addrs(0), allAddrs)
|
||||
}, 1*time.Second, 100*time.Millisecond)
|
||||
|
||||
for i := 0; i < N; i++ {
|
||||
for i := range N {
|
||||
o.removeConn(tcp4Conns[i])
|
||||
o.removeConn(quic4Conns[i])
|
||||
o.removeConn(webTransport4Conns[i])
|
||||
@@ -584,7 +584,7 @@ func FuzzObservedAddrsManager(f *testing.F) {
|
||||
}
|
||||
n = len(addrs)
|
||||
for i := 0; i < n; i++ {
|
||||
for j := 0; j < len(protos); j++ {
|
||||
for j := range protos {
|
||||
protoAddr := ma.StringCast(protos[j])
|
||||
addrs = append(addrs, addrs[i].Encapsulate(protoAddr))
|
||||
addrs = append(addrs, protoAddr)
|
||||
|
||||
@@ -48,7 +48,7 @@ func TestLatencyEWMA(t *testing.T) {
|
||||
const sig = 10
|
||||
next := func() time.Duration { return time.Duration(rand.Intn(20) - 10 + mu) }
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
for range 10 {
|
||||
m.RecordLatency(id, next())
|
||||
}
|
||||
|
||||
|
||||
@@ -41,7 +41,7 @@ func NewPeerMetadata(_ context.Context, store ds.Datastore, _ Options) (*dsPeerM
|
||||
return &dsPeerMetadata{store}, nil
|
||||
}
|
||||
|
||||
func (pm *dsPeerMetadata) Get(p peer.ID, key string) (interface{}, error) {
|
||||
func (pm *dsPeerMetadata) Get(p peer.ID, key string) (any, error) {
|
||||
k := pmBase.ChildString(base32.RawStdEncoding.EncodeToString([]byte(p))).ChildString(key)
|
||||
value, err := pm.ds.Get(context.TODO(), k)
|
||||
if err != nil {
|
||||
@@ -51,14 +51,14 @@ func (pm *dsPeerMetadata) Get(p peer.ID, key string) (interface{}, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var res interface{}
|
||||
var res any
|
||||
if err := gob.NewDecoder(bytes.NewReader(value)).Decode(&res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (pm *dsPeerMetadata) Put(p peer.ID, key string, val interface{}) error {
|
||||
func (pm *dsPeerMetadata) Put(p peer.ID, key string, val any) error {
|
||||
k := pmBase.ChildString(base32.RawStdEncoding.EncodeToString([]byte(p))).ChildString(key)
|
||||
var buf pool.Buffer
|
||||
if err := gob.NewEncoder(&buf).Encode(&val); err != nil {
|
||||
|
||||
@@ -136,7 +136,7 @@ func uniquePeerIds(ds ds.Datastore, prefix ds.Key, extractor func(result query.R
|
||||
|
||||
func (ps *pstoreds) Close() (err error) {
|
||||
var errs []error
|
||||
weakClose := func(name string, c interface{}) {
|
||||
weakClose := func(name string, c any) {
|
||||
if cl, ok := c.(io.Closer); ok {
|
||||
if err = cl.Close(); err != nil {
|
||||
errs = append(errs, fmt.Errorf("%s error: %s", name, err))
|
||||
|
||||
@@ -32,7 +32,7 @@ func TestPeerAddrsNextExpiry(t *testing.T) {
|
||||
|
||||
func peerAddrsInput(n int) []*expiringAddr {
|
||||
expiringAddrs := make([]*expiringAddr, n)
|
||||
for i := 0; i < n; i++ {
|
||||
for i := range n {
|
||||
port := i % 65535
|
||||
a := ma.StringCast(fmt.Sprintf("/ip4/1.2.3.4/udp/%d/quic-v1", port))
|
||||
e := time.Time{}.Add(time.Duration(i) * time.Second)
|
||||
@@ -48,11 +48,11 @@ func TestPeerAddrsHeapProperty(t *testing.T) {
|
||||
|
||||
const N = 10000
|
||||
expiringAddrs := peerAddrsInput(N)
|
||||
for i := 0; i < N; i++ {
|
||||
for i := range N {
|
||||
paa.Insert(expiringAddrs[i])
|
||||
}
|
||||
|
||||
for i := 0; i < N; i++ {
|
||||
for i := range N {
|
||||
ea, ok := pa.PopIfExpired(expiringAddrs[i].Expiry)
|
||||
require.True(t, ok, "pos: %d", i)
|
||||
require.Equal(t, ea.Addr, expiringAddrs[i].Addr)
|
||||
@@ -69,7 +69,7 @@ func TestPeerAddrsHeapPropertyDeletions(t *testing.T) {
|
||||
|
||||
const N = 10000
|
||||
expiringAddrs := peerAddrsInput(N)
|
||||
for i := 0; i < N; i++ {
|
||||
for i := range N {
|
||||
paa.Insert(expiringAddrs[i])
|
||||
}
|
||||
|
||||
@@ -78,7 +78,7 @@ func TestPeerAddrsHeapPropertyDeletions(t *testing.T) {
|
||||
paa.Delete(expiringAddrs[i])
|
||||
}
|
||||
|
||||
for i := 0; i < N; i++ {
|
||||
for i := range N {
|
||||
ea, ok := pa.PopIfExpired(expiringAddrs[i].Expiry)
|
||||
if i%3 == 0 {
|
||||
require.False(t, ok)
|
||||
@@ -100,7 +100,7 @@ func TestPeerAddrsHeapPropertyUpdates(t *testing.T) {
|
||||
|
||||
const N = 10000
|
||||
expiringAddrs := peerAddrsInput(N)
|
||||
for i := 0; i < N; i++ {
|
||||
for i := range N {
|
||||
heap.Push(pa, expiringAddrs[i])
|
||||
}
|
||||
|
||||
@@ -112,7 +112,7 @@ func TestPeerAddrsHeapPropertyUpdates(t *testing.T) {
|
||||
endElements = append(endElements, expiringAddrs[i].Addr)
|
||||
}
|
||||
|
||||
for i := 0; i < N; i++ {
|
||||
for i := range N {
|
||||
if i%3 == 0 {
|
||||
continue // skip the elements at the end
|
||||
}
|
||||
@@ -136,7 +136,7 @@ func TestPeerAddrsHeapPropertyUpdates(t *testing.T) {
|
||||
// TestPeerAddrsExpiry tests for multiple element expiry with PopIfExpired.
|
||||
func TestPeerAddrsExpiry(t *testing.T) {
|
||||
const T = 100_000
|
||||
for x := 0; x < T; x++ {
|
||||
for range T {
|
||||
paa := newPeerAddrs()
|
||||
pa := &paa
|
||||
// Try a lot of random inputs.
|
||||
@@ -144,16 +144,16 @@ func TestPeerAddrsExpiry(t *testing.T) {
|
||||
// So this should test for all possible 5 element inputs.
|
||||
const N = 5
|
||||
expiringAddrs := peerAddrsInput(N)
|
||||
for i := 0; i < N; i++ {
|
||||
for i := range N {
|
||||
expiringAddrs[i].Expiry = time.Time{}.Add(time.Duration(1+rand.Intn(N)) * time.Second)
|
||||
}
|
||||
for i := 0; i < N; i++ {
|
||||
for i := range N {
|
||||
pa.Insert(expiringAddrs[i])
|
||||
}
|
||||
|
||||
expiry := time.Time{}.Add(time.Duration(1+rand.Intn(N)) * time.Second)
|
||||
expected := []ma.Multiaddr{}
|
||||
for i := 0; i < N; i++ {
|
||||
for i := range N {
|
||||
if !expiry.Before(expiringAddrs[i].Expiry) {
|
||||
expected = append(expected, expiringAddrs[i].Addr)
|
||||
}
|
||||
@@ -167,7 +167,7 @@ func TestPeerAddrsExpiry(t *testing.T) {
|
||||
got = append(got, ea.Addr)
|
||||
}
|
||||
expiries := []int{}
|
||||
for i := 0; i < N; i++ {
|
||||
for i := range N {
|
||||
expiries = append(expiries, expiringAddrs[i].Expiry.Second())
|
||||
}
|
||||
require.ElementsMatch(t, expected, got, "failed for input: element expiries: %v, expiry: %v", expiries, expiry.Second())
|
||||
@@ -195,7 +195,7 @@ func BenchmarkPeerAddrs(b *testing.B) {
|
||||
paa := newPeerAddrs()
|
||||
pa := &paa
|
||||
expiringAddrs := peerAddrsInput(sz)
|
||||
for i := 0; i < sz; i++ {
|
||||
for i := range sz {
|
||||
pa.Insert(expiringAddrs[i])
|
||||
}
|
||||
b.StartTimer()
|
||||
|
||||
@@ -23,7 +23,7 @@ func TestInvalidOption(t *testing.T) {
|
||||
func TestFuzzInMemoryPeerstore(t *testing.T) {
|
||||
// Just create and close a bunch of peerstores. If this leaks, we'll
|
||||
// catch it in the leak check below.
|
||||
for i := 0; i < 100; i++ {
|
||||
for range 100 {
|
||||
ps, err := NewPeerstore()
|
||||
require.NoError(t, err)
|
||||
ps.Close()
|
||||
@@ -99,10 +99,10 @@ func BenchmarkGC(b *testing.B) {
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
b.StopTimer()
|
||||
for i := 0; i < peerCount; i++ {
|
||||
for i := range peerCount {
|
||||
id := peer.ID(strconv.Itoa(i))
|
||||
addrs := make([]multiaddr.Multiaddr, addrsPerPeer)
|
||||
for j := 0; j < addrsPerPeer; j++ {
|
||||
for j := range addrsPerPeer {
|
||||
addrs[j] = multiaddr.StringCast("/ip4/1.2.3.4/tcp/" + strconv.Itoa(j))
|
||||
}
|
||||
ps.AddAddrs(id, addrs, 24*time.Hour)
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
|
||||
type memoryPeerMetadata struct {
|
||||
// store other data, like versions
|
||||
ds map[peer.ID]map[string]interface{}
|
||||
ds map[peer.ID]map[string]any
|
||||
dslock sync.RWMutex
|
||||
}
|
||||
|
||||
@@ -17,23 +17,23 @@ var _ pstore.PeerMetadata = (*memoryPeerMetadata)(nil)
|
||||
|
||||
func NewPeerMetadata() *memoryPeerMetadata {
|
||||
return &memoryPeerMetadata{
|
||||
ds: make(map[peer.ID]map[string]interface{}),
|
||||
ds: make(map[peer.ID]map[string]any),
|
||||
}
|
||||
}
|
||||
|
||||
func (ps *memoryPeerMetadata) Put(p peer.ID, key string, val interface{}) error {
|
||||
func (ps *memoryPeerMetadata) Put(p peer.ID, key string, val any) error {
|
||||
ps.dslock.Lock()
|
||||
defer ps.dslock.Unlock()
|
||||
m, ok := ps.ds[p]
|
||||
if !ok {
|
||||
m = make(map[string]interface{})
|
||||
m = make(map[string]any)
|
||||
ps.ds[p] = m
|
||||
}
|
||||
m[key] = val
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ps *memoryPeerMetadata) Get(p peer.ID, key string) (interface{}, error) {
|
||||
func (ps *memoryPeerMetadata) Get(p peer.ID, key string) (any, error) {
|
||||
ps.dslock.RLock()
|
||||
defer ps.dslock.RUnlock()
|
||||
m, ok := ps.ds[p]
|
||||
|
||||
@@ -20,7 +20,7 @@ type pstoremem struct {
|
||||
|
||||
var _ peerstore.Peerstore = &pstoremem{}
|
||||
|
||||
type Option interface{}
|
||||
type Option any
|
||||
|
||||
// NewPeerstore creates an in-memory thread-safe collection of peers.
|
||||
// It's the caller's responsibility to call RemovePeer to ensure
|
||||
@@ -57,7 +57,7 @@ func NewPeerstore(opts ...Option) (ps *pstoremem, err error) {
|
||||
|
||||
func (ps *pstoremem) Close() (err error) {
|
||||
var errs []error
|
||||
weakClose := func(name string, c interface{}) {
|
||||
weakClose := func(name string, c any) {
|
||||
if cl, ok := c.(io.Closer); ok {
|
||||
if err = cl.Close(); err != nil {
|
||||
errs = append(errs, fmt.Errorf("%s error: %s", name, err))
|
||||
|
||||
@@ -217,7 +217,7 @@ func testSetNegativeTTLClears(m pstore.AddrBook, _ *mockClock.Mock) func(t *test
|
||||
// try to remove the same addr multiple times
|
||||
m.SetAddrs(id, addrs[:5], time.Hour)
|
||||
repeated := make([]multiaddr.Multiaddr, 10)
|
||||
for i := 0; i < len(repeated); i++ {
|
||||
for i := range repeated {
|
||||
repeated[i] = addrs[0]
|
||||
}
|
||||
m.SetAddrs(id, repeated, -1)
|
||||
|
||||
@@ -38,15 +38,15 @@ func BenchmarkPeerstore(b *testing.B, factory PeerstoreFactory, _ string) {
|
||||
b.ResetTimer()
|
||||
itersPerBM := 10
|
||||
for i := 0; i < b.N; i++ {
|
||||
for j := 0; j < itersPerBM; j++ {
|
||||
for j := range itersPerBM {
|
||||
pp := peers[(i+j)%N]
|
||||
ps.AddAddrs(pp.ID, pp.Addr, pstore.RecentlyConnectedAddrTTL)
|
||||
}
|
||||
for j := 0; j < itersPerBM; j++ {
|
||||
for j := range itersPerBM {
|
||||
pp := peers[(i+j)%N]
|
||||
ps.Addrs(pp.ID)
|
||||
}
|
||||
for j := 0; j < itersPerBM; j++ {
|
||||
for j := range itersPerBM {
|
||||
pp := peers[(i+j)%N]
|
||||
ps.ClearAddrs(pp.ID)
|
||||
}
|
||||
|
||||
@@ -114,7 +114,7 @@ func testKeyBookPeers(kb pstore.KeyBook) func(t *testing.T) {
|
||||
}
|
||||
|
||||
var peers peer.IDSlice
|
||||
for i := 0; i < 10; i++ {
|
||||
for range 10 {
|
||||
// Add a public key.
|
||||
_, pub, err := pt.RandTestKeyPair(ic.RSA, 2048)
|
||||
if err != nil {
|
||||
@@ -309,7 +309,7 @@ func benchmarkAddPrivKey(kb pstore.KeyBook) func(*testing.B) {
|
||||
|
||||
func benchmarkPeersWithKeys(kb pstore.KeyBook) func(*testing.B) {
|
||||
return func(b *testing.B) {
|
||||
for i := 0; i < 10; i++ {
|
||||
for range 10 {
|
||||
priv, pub, err := pt.RandTestKeyPair(ic.RSA, 2048)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"reflect"
|
||||
"sort"
|
||||
"slices"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -46,7 +46,7 @@ func TestPeerstore(t *testing.T, factory PeerstoreFactory) {
|
||||
}
|
||||
|
||||
func sortProtos(protos []protocol.ID) {
|
||||
sort.Slice(protos, func(i, j int) bool { return protos[i] < protos[j] })
|
||||
slices.Sort(protos)
|
||||
}
|
||||
|
||||
func testAddrStream(ps pstore.Peerstore) func(t *testing.T) {
|
||||
@@ -65,7 +65,7 @@ func testAddrStream(ps pstore.Peerstore) func(t *testing.T) {
|
||||
|
||||
// now receive them (without hanging)
|
||||
timeout := time.After(time.Second * 10)
|
||||
for i := 0; i < 20; i++ {
|
||||
for range 20 {
|
||||
select {
|
||||
case <-addrch:
|
||||
case <-timeout:
|
||||
@@ -88,7 +88,7 @@ func testAddrStream(ps pstore.Peerstore) func(t *testing.T) {
|
||||
|
||||
// receive some concurrently with the goroutine
|
||||
timeout = time.After(time.Second * 10)
|
||||
for i := 0; i < 40; i++ {
|
||||
for range 40 {
|
||||
select {
|
||||
case <-addrch:
|
||||
case <-timeout:
|
||||
@@ -99,7 +99,7 @@ func testAddrStream(ps pstore.Peerstore) func(t *testing.T) {
|
||||
|
||||
// receive some more after waiting for that goroutine to complete
|
||||
timeout = time.After(time.Second * 10)
|
||||
for i := 0; i < 20; i++ {
|
||||
for range 20 {
|
||||
select {
|
||||
case <-addrch:
|
||||
case <-timeout:
|
||||
@@ -110,7 +110,7 @@ func testAddrStream(ps pstore.Peerstore) func(t *testing.T) {
|
||||
cancel()
|
||||
|
||||
// now check the *second* subscription. We should see 80 addresses.
|
||||
for i := 0; i < 80; i++ {
|
||||
for range 80 {
|
||||
<-addrch2
|
||||
}
|
||||
|
||||
@@ -131,14 +131,14 @@ func testGetStreamBeforePeerAdded(ps pstore.Peerstore) func(t *testing.T) {
|
||||
defer cancel()
|
||||
|
||||
ach := ps.AddrStream(ctx, pid)
|
||||
for i := 0; i < 10; i++ {
|
||||
for i := range 10 {
|
||||
ps.AddAddr(pid, addrs[i], time.Hour)
|
||||
}
|
||||
|
||||
received := make(map[string]bool)
|
||||
var count int
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
for range 10 {
|
||||
a, ok := <-ach
|
||||
if !ok {
|
||||
t.Fatal("channel shouldnt be closed yet")
|
||||
@@ -181,7 +181,7 @@ func testAddrStreamDuplicates(ps pstore.Peerstore) func(t *testing.T) {
|
||||
|
||||
ach := ps.AddrStream(ctx, pid)
|
||||
go func() {
|
||||
for i := 0; i < 10; i++ {
|
||||
for i := range 10 {
|
||||
ps.AddAddr(pid, addrs[i], time.Hour)
|
||||
ps.AddAddr(pid, addrs[rand.Intn(10)], time.Hour)
|
||||
}
|
||||
@@ -374,8 +374,8 @@ func testCertifiedAddrBook(ps pstore.Peerstore) func(*testing.T) {
|
||||
}
|
||||
|
||||
func getAddrs(t *testing.T, n int) []ma.Multiaddr {
|
||||
var addrs []ma.Multiaddr
|
||||
for i := 0; i < n; i++ {
|
||||
addrs := make([]ma.Multiaddr, 0, n)
|
||||
for i := range n {
|
||||
a, err := ma.NewMultiaddr(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", i))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -389,7 +389,7 @@ func getAddrs(t *testing.T, n int) []ma.Multiaddr {
|
||||
func TestPeerstoreProtoStoreLimits(t *testing.T, ps pstore.Peerstore, limit int) {
|
||||
p := peer.ID("foobar")
|
||||
protocols := make([]protocol.ID, limit)
|
||||
for i := 0; i < limit; i++ {
|
||||
for i := range limit {
|
||||
protocols[i] = protocol.ID(fmt.Sprintf("protocol %d", i))
|
||||
}
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@ package test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
@@ -36,7 +37,7 @@ func RandomPeer(b *testing.B, addrCount int) *peerpair {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
for i := 0; i < addrCount; i++ {
|
||||
for i := range addrCount {
|
||||
if addrs[i], err = ma.NewMultiaddr(fmt.Sprintf(aFmt, i, pid)); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@@ -46,7 +47,7 @@ func RandomPeer(b *testing.B, addrCount int) *peerpair {
|
||||
|
||||
func getPeerPairs(b *testing.B, n int, addrsPerPeer int) []*peerpair {
|
||||
pps := make([]*peerpair, n)
|
||||
for i := 0; i < n; i++ {
|
||||
for i := range n {
|
||||
pps[i] = RandomPeer(b, addrsPerPeer)
|
||||
}
|
||||
return pps
|
||||
@@ -54,7 +55,7 @@ func getPeerPairs(b *testing.B, n int, addrsPerPeer int) []*peerpair {
|
||||
|
||||
func GenerateAddrs(count int) []ma.Multiaddr {
|
||||
var addrs = make([]ma.Multiaddr, count)
|
||||
for i := 0; i < count; i++ {
|
||||
for i := range count {
|
||||
addrs[i] = Multiaddr(fmt.Sprintf("/ip4/1.1.1.%d/tcp/1111", i))
|
||||
}
|
||||
return addrs
|
||||
@@ -62,7 +63,7 @@ func GenerateAddrs(count int) []ma.Multiaddr {
|
||||
|
||||
func GeneratePeerIDs(count int) []peer.ID {
|
||||
var ids = make([]peer.ID, count)
|
||||
for i := 0; i < count; i++ {
|
||||
for i := range count {
|
||||
ids[i], _ = pt.RandPeerID()
|
||||
}
|
||||
return ids
|
||||
@@ -75,14 +76,7 @@ func AssertAddressesEqual(t *testing.T, exp, act []ma.Multiaddr) {
|
||||
}
|
||||
|
||||
for _, a := range exp {
|
||||
found := false
|
||||
|
||||
for _, b := range act {
|
||||
if a.Equal(b) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
found := slices.ContainsFunc(act, a.Equal)
|
||||
|
||||
if !found {
|
||||
t.Fatalf("expected address %s not found", a)
|
||||
|
||||
@@ -234,7 +234,7 @@ func BenchmarkAllowlistCheck(b *testing.B) {
|
||||
countOfTotalPeersForTest := 100_000
|
||||
|
||||
mas := make([]multiaddr.Multiaddr, countOfTotalPeersForTest)
|
||||
for i := 0; i < countOfTotalPeersForTest; i++ {
|
||||
for i := range countOfTotalPeersForTest {
|
||||
|
||||
ip := make([]byte, 16)
|
||||
n, err := rand.Reader.Read(ip)
|
||||
|
||||
@@ -64,7 +64,7 @@ func TestItLimits(t *testing.T) {
|
||||
|
||||
t.Run("IPv6 with multiple limits", func(t *testing.T) {
|
||||
cl := newConnLimiter()
|
||||
for i := 0; i < defaultMaxConcurrentConns; i++ {
|
||||
for i := range defaultMaxConcurrentConns {
|
||||
ip := net.ParseIP("ff:2:3:4::1")
|
||||
binary.BigEndian.PutUint16(ip[14:], uint16(i))
|
||||
ipAddr := netip.MustParseAddr(ip.String())
|
||||
|
||||
@@ -15,8 +15,8 @@ func (e *ErrStreamOrConnLimitExceeded) Error() string { return e.err.Error() }
|
||||
func (e *ErrStreamOrConnLimitExceeded) Unwrap() error { return e.err }
|
||||
|
||||
// edge may be "" if this is not an edge error
|
||||
func logValuesStreamLimit(scope, edge string, dir network.Direction, stat network.ScopeStat, err error) []interface{} {
|
||||
logValues := make([]interface{}, 0, 2*8)
|
||||
func logValuesStreamLimit(scope, edge string, dir network.Direction, stat network.ScopeStat, err error) []any {
|
||||
logValues := make([]any, 0, 2*8)
|
||||
logValues = append(logValues, "scope", scope)
|
||||
if edge != "" {
|
||||
logValues = append(logValues, "edge", edge)
|
||||
@@ -34,8 +34,8 @@ func logValuesStreamLimit(scope, edge string, dir network.Direction, stat networ
|
||||
}
|
||||
|
||||
// edge may be "" if this is not an edge error
|
||||
func logValuesConnLimit(scope, edge string, dir network.Direction, usefd bool, stat network.ScopeStat, err error) []interface{} {
|
||||
logValues := make([]interface{}, 0, 2*9)
|
||||
func logValuesConnLimit(scope, edge string, dir network.Direction, usefd bool, stat network.ScopeStat, err error) []any {
|
||||
logValues := make([]any, 0, 2*9)
|
||||
logValues = append(logValues, "scope", scope)
|
||||
if edge != "" {
|
||||
logValues = append(logValues, "edge", edge)
|
||||
@@ -62,8 +62,8 @@ func (e *ErrMemoryLimitExceeded) Error() string { return e.err.Error() }
|
||||
func (e *ErrMemoryLimitExceeded) Unwrap() error { return e.err }
|
||||
|
||||
// edge may be "" if this is not an edge error
|
||||
func logValuesMemoryLimit(scope, edge string, stat network.ScopeStat, err error) []interface{} {
|
||||
logValues := make([]interface{}, 0, 2*8)
|
||||
func logValuesMemoryLimit(scope, edge string, stat network.ScopeStat, err error) []any {
|
||||
logValues := make([]any, 0, 2*8)
|
||||
logValues = append(logValues, "scope", scope)
|
||||
if edge != "" {
|
||||
logValues = append(logValues, "edge", edge)
|
||||
|
||||
@@ -2,6 +2,7 @@ package rcmgr
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
@@ -86,9 +87,7 @@ func (r *resourceManager) ListProtocols() []protocol.ID {
|
||||
result = append(result, p)
|
||||
}
|
||||
|
||||
sort.Slice(result, func(i, j int) bool {
|
||||
return result[i] < result[j]
|
||||
})
|
||||
slices.Sort(result)
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package rcmgr
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"maps"
|
||||
"math"
|
||||
"strconv"
|
||||
|
||||
@@ -347,32 +348,32 @@ func (l *ResourceLimits) Build(defaults Limit) BaseLimit {
|
||||
}
|
||||
|
||||
type PartialLimitConfig struct {
|
||||
System ResourceLimits `json:",omitempty"`
|
||||
Transient ResourceLimits `json:",omitempty"`
|
||||
System ResourceLimits
|
||||
Transient ResourceLimits
|
||||
|
||||
// Limits that are applied to resources with an allowlisted multiaddr.
|
||||
// These will only be used if the normal System & Transient limits are
|
||||
// reached.
|
||||
AllowlistedSystem ResourceLimits `json:",omitempty"`
|
||||
AllowlistedTransient ResourceLimits `json:",omitempty"`
|
||||
AllowlistedSystem ResourceLimits
|
||||
AllowlistedTransient ResourceLimits
|
||||
|
||||
ServiceDefault ResourceLimits `json:",omitempty"`
|
||||
ServiceDefault ResourceLimits
|
||||
Service map[string]ResourceLimits `json:",omitempty"`
|
||||
|
||||
ServicePeerDefault ResourceLimits `json:",omitempty"`
|
||||
ServicePeerDefault ResourceLimits
|
||||
ServicePeer map[string]ResourceLimits `json:",omitempty"`
|
||||
|
||||
ProtocolDefault ResourceLimits `json:",omitempty"`
|
||||
ProtocolDefault ResourceLimits
|
||||
Protocol map[protocol.ID]ResourceLimits `json:",omitempty"`
|
||||
|
||||
ProtocolPeerDefault ResourceLimits `json:",omitempty"`
|
||||
ProtocolPeerDefault ResourceLimits
|
||||
ProtocolPeer map[protocol.ID]ResourceLimits `json:",omitempty"`
|
||||
|
||||
PeerDefault ResourceLimits `json:",omitempty"`
|
||||
PeerDefault ResourceLimits
|
||||
Peer map[peer.ID]ResourceLimits `json:",omitempty"`
|
||||
|
||||
Conn ResourceLimits `json:",omitempty"`
|
||||
Stream ResourceLimits `json:",omitempty"`
|
||||
Conn ResourceLimits
|
||||
Stream ResourceLimits
|
||||
}
|
||||
|
||||
func (cfg *PartialLimitConfig) MarshalJSON() ([]byte, error) {
|
||||
@@ -493,9 +494,7 @@ func buildMapWithDefault[K comparable](definedLimits map[K]ResourceLimits, defau
|
||||
}
|
||||
|
||||
out := make(map[K]BaseLimit)
|
||||
for k, l := range defaults {
|
||||
out[k] = l
|
||||
}
|
||||
maps.Copy(out, defaults)
|
||||
|
||||
for k, l := range definedLimits {
|
||||
if defaultForKey, ok := out[k]; ok {
|
||||
@@ -653,11 +652,9 @@ func scale(base BaseLimit, inc BaseLimitIncrease, memory int64, numFD int) BaseL
|
||||
FD: base.FD,
|
||||
}
|
||||
if inc.FDFraction > 0 && numFD > 0 {
|
||||
l.FD = int(inc.FDFraction * float64(numFD))
|
||||
if l.FD < base.FD {
|
||||
l.FD = max(int(inc.FDFraction*float64(numFD)),
|
||||
// Use at least the base amount
|
||||
l.FD = base.FD
|
||||
}
|
||||
base.FD)
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
@@ -629,11 +629,11 @@ func PeerStrInScopeName(name string) string {
|
||||
return ""
|
||||
}
|
||||
// Index to avoid allocating a new string
|
||||
peerSplitIdx := strings.Index(name, "peer:")
|
||||
if peerSplitIdx == -1 {
|
||||
_, after, ok := strings.Cut(name, "peer:")
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
p := (name[peerSplitIdx+len("peer:"):])
|
||||
p := (after)
|
||||
return p
|
||||
}
|
||||
|
||||
@@ -647,11 +647,11 @@ func ParseProtocolScopeName(name string) string {
|
||||
}
|
||||
|
||||
// Index to avoid allocating a new string
|
||||
separatorIdx := strings.Index(name, ":")
|
||||
if separatorIdx == -1 {
|
||||
_, after, ok := strings.Cut(name, ":")
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return name[separatorIdx+1:]
|
||||
return after
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
@@ -68,11 +68,9 @@ func TestCheckMemory(t *testing.T) {
|
||||
})
|
||||
|
||||
f := func(limit uint64, res uint64, currentMem uint64, priShift uint8) bool {
|
||||
limit = (limit % math.MaxInt64) + 1
|
||||
if limit < 1024 {
|
||||
limit = max((limit%math.MaxInt64)+1,
|
||||
// We set the min to 1KiB
|
||||
limit = 1024
|
||||
}
|
||||
1024)
|
||||
currentMem = (currentMem % limit) // We can't have reserved more than our limit
|
||||
res = (res >> 14) // We won't reasonably ever have a reservation > 2^50
|
||||
rc := resources{limit: &BaseLimit{
|
||||
|
||||
@@ -23,7 +23,7 @@ type trace struct {
|
||||
|
||||
mx sync.Mutex
|
||||
done bool
|
||||
pendingWrites []interface{}
|
||||
pendingWrites []any
|
||||
reporters []TraceReporter
|
||||
}
|
||||
|
||||
@@ -191,7 +191,7 @@ type TraceEvt struct {
|
||||
Scope *scopeClass `json:",omitempty"`
|
||||
Name string `json:",omitempty"`
|
||||
|
||||
Limit interface{} `json:",omitempty"`
|
||||
Limit any `json:",omitempty"`
|
||||
|
||||
Priority uint8 `json:",omitempty"`
|
||||
|
||||
@@ -243,7 +243,7 @@ func (t *trace) backgroundWriter(out io.WriteCloser) {
|
||||
ticker := time.NewTicker(time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
var pend []interface{}
|
||||
var pend []any
|
||||
|
||||
getEvents := func() {
|
||||
t.mx.Lock()
|
||||
@@ -299,7 +299,7 @@ func (t *trace) backgroundWriter(out io.WriteCloser) {
|
||||
}
|
||||
}
|
||||
|
||||
func (t *trace) writeEvents(pend []interface{}, jout *json.Encoder) error {
|
||||
func (t *trace) writeEvents(pend []any, jout *json.Encoder) error {
|
||||
for _, e := range pend {
|
||||
if err := jout.Encode(e); err != nil {
|
||||
return err
|
||||
|
||||
@@ -270,7 +270,7 @@ func TestConcurrentAuth(t *testing.T) {
|
||||
t.Cleanup(ts.Close)
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
for i := 0; i < 10; i++ {
|
||||
for i := range 10 {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
@@ -278,7 +278,7 @@ func TestConcurrentAuth(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
clientAuth := ClientPeerIDAuth{PrivKey: clientKey}
|
||||
reqBody := []byte(fmt.Sprintf("echo %d", i))
|
||||
reqBody := fmt.Appendf(nil, "echo %d", i)
|
||||
req, err := http.NewRequest("POST", ts.URL, bytes.NewReader(reqBody))
|
||||
require.NoError(t, err)
|
||||
req.Host = "example.com"
|
||||
|
||||
@@ -56,12 +56,12 @@ func (p *params) parsePeerIDAuthSchemeParams(headerVal []byte) error {
|
||||
for ; err == nil; advance, token, err = splitAuthHeaderParams(headerVal, true) {
|
||||
headerVal = headerVal[advance:]
|
||||
bs := token
|
||||
splitAt := bytes.Index(bs, []byte("="))
|
||||
if splitAt == -1 {
|
||||
before, after, ok := bytes.Cut(bs, []byte("="))
|
||||
if !ok {
|
||||
return errInvalid
|
||||
}
|
||||
kB := bs[:splitAt]
|
||||
v := bs[splitAt+1:]
|
||||
kB := before
|
||||
v := after
|
||||
if len(v) < 2 || v[0] != '"' || v[len(v)-1] != '"' {
|
||||
return errInvalid
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"maps"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@@ -1192,9 +1193,7 @@ func (h *Host) AddPeerMetadata(server peer.ID, meta PeerMeta) {
|
||||
h.peerMetadata.Add(server, meta)
|
||||
return
|
||||
}
|
||||
for proto, m := range meta {
|
||||
origMeta[proto] = m
|
||||
}
|
||||
maps.Copy(origMeta, meta)
|
||||
h.peerMetadata.Add(server, origMeta)
|
||||
}
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
)
|
||||
|
||||
func TestStringSlicePool(t *testing.T) {
|
||||
for i := 0; i < 1e5; i++ {
|
||||
for range int(1e5) {
|
||||
s := GetStringSlice()
|
||||
require.Empty(t, *s)
|
||||
require.Equal(t, 8, cap(*s))
|
||||
|
||||
@@ -40,7 +40,7 @@ func init() {
|
||||
}
|
||||
}
|
||||
|
||||
func getFunctionName(i interface{}) string {
|
||||
func getFunctionName(i any) string {
|
||||
return runtime.FuncForPC(reflect.ValueOf(i).Pointer()).Name()
|
||||
}
|
||||
|
||||
@@ -196,7 +196,7 @@ func SubtestStress(t *testing.T, opt Options) {
|
||||
|
||||
rateLimitN := 5000 // max of 5k funcs, because -race has 8k max.
|
||||
rateLimitChan := make(chan struct{}, rateLimitN)
|
||||
for i := 0; i < rateLimitN; i++ {
|
||||
for range rateLimitN {
|
||||
rateLimitChan <- struct{}{}
|
||||
}
|
||||
|
||||
@@ -356,7 +356,7 @@ func SubtestStreamOpenStress(t *testing.T, tr network.Multiplexer) {
|
||||
}
|
||||
stress := func() {
|
||||
defer wg.Done()
|
||||
for i := 0; i < count; i++ {
|
||||
for range count {
|
||||
s, err := muxa.OpenStream(context.Background())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
@@ -376,7 +376,7 @@ func SubtestStreamOpenStress(t *testing.T, tr network.Multiplexer) {
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < workers; i++ {
|
||||
for range workers {
|
||||
wg.Add(1)
|
||||
go stress()
|
||||
}
|
||||
@@ -530,7 +530,7 @@ func SubtestStreamLeftOpen(t *testing.T, tr network.Multiplexer) {
|
||||
wg.Add(1 + numStreams)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for i := 0; i < numStreams; i++ {
|
||||
for range numStreams {
|
||||
stra, err := muxa.OpenStream(context.Background())
|
||||
checkErr(t, err)
|
||||
go func() {
|
||||
@@ -545,7 +545,7 @@ func SubtestStreamLeftOpen(t *testing.T, tr network.Multiplexer) {
|
||||
wg.Add(1 + numStreams)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for i := 0; i < numStreams; i++ {
|
||||
for range numStreams {
|
||||
str, err := muxb.AcceptStream()
|
||||
checkErr(t, err)
|
||||
go func() {
|
||||
|
||||
@@ -26,7 +26,7 @@ func BenchmarkLockContention(b *testing.B) {
|
||||
kill := make(chan struct{})
|
||||
var wg sync.WaitGroup
|
||||
|
||||
for i := 0; i < 16; i++ {
|
||||
for range 16 {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
@@ -3,6 +3,7 @@ package connmgr
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"maps"
|
||||
"sort"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
@@ -554,9 +555,7 @@ func (cm *BasicConnMgr) GetTagInfo(p peer.ID) *connmgr.TagInfo {
|
||||
Conns: make(map[string]time.Time),
|
||||
}
|
||||
|
||||
for t, v := range pi.tags {
|
||||
out.Tags[t] = v
|
||||
}
|
||||
maps.Copy(out.Tags, pi.tags)
|
||||
for t, v := range pi.decaying {
|
||||
out.Tags[t.name] = v.Value
|
||||
}
|
||||
|
||||
@@ -144,8 +144,8 @@ func TestConnTrimming(t *testing.T) {
|
||||
defer cm.Close()
|
||||
not := cm.Notifee()
|
||||
|
||||
var conns []network.Conn
|
||||
for i := 0; i < 300; i++ {
|
||||
conns := make([]network.Conn, 0, 300)
|
||||
for range 300 {
|
||||
rc := randConn(t, nil)
|
||||
conns = append(conns, rc)
|
||||
not.Connected(nil, rc)
|
||||
@@ -157,7 +157,7 @@ func TestConnTrimming(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
for i := range 100 {
|
||||
cm.TagPeer(conns[i].RemotePeer(), "foo", 10)
|
||||
}
|
||||
|
||||
@@ -165,7 +165,7 @@ func TestConnTrimming(t *testing.T) {
|
||||
|
||||
cm.TrimOpenConns(context.Background())
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
for i := range 100 {
|
||||
c := conns[i]
|
||||
if c.(*tconn).isClosed() {
|
||||
t.Fatal("these shouldnt be closed")
|
||||
@@ -180,7 +180,7 @@ func TestConnTrimming(t *testing.T) {
|
||||
func TestConnsToClose(t *testing.T) {
|
||||
addConns := func(cm *BasicConnMgr, n int) {
|
||||
not := cm.Notifee()
|
||||
for i := 0; i < n; i++ {
|
||||
for range n {
|
||||
conn := randConn(t, nil)
|
||||
not.Connected(nil, conn)
|
||||
}
|
||||
@@ -430,7 +430,7 @@ func TestGracePeriod(t *testing.T) {
|
||||
|
||||
not := cm.Notifee()
|
||||
|
||||
var conns []network.Conn
|
||||
conns := make([]network.Conn, 0, 31)
|
||||
|
||||
// Add a connection and wait the grace period.
|
||||
{
|
||||
@@ -446,7 +446,7 @@ func TestGracePeriod(t *testing.T) {
|
||||
}
|
||||
|
||||
// quickly add 30 connections (sending us above the high watermark)
|
||||
for i := 0; i < 30; i++ {
|
||||
for range 30 {
|
||||
rc := randConn(t, not.Disconnected)
|
||||
conns = append(conns, rc)
|
||||
not.Connected(nil, rc)
|
||||
@@ -484,10 +484,10 @@ func TestQuickBurstRespectsSilencePeriod(t *testing.T) {
|
||||
defer cm.Close()
|
||||
not := cm.Notifee()
|
||||
|
||||
var conns []network.Conn
|
||||
conns := make([]network.Conn, 0, 30)
|
||||
|
||||
// quickly produce 30 connections (sending us above the high watermark)
|
||||
for i := 0; i < 30; i++ {
|
||||
for range 30 {
|
||||
rc := randConn(t, not.Disconnected)
|
||||
conns = append(conns, rc)
|
||||
not.Connected(nil, rc)
|
||||
@@ -526,7 +526,7 @@ func TestPeerProtectionSingleTag(t *testing.T) {
|
||||
}
|
||||
|
||||
// produce 20 connections with unique peers.
|
||||
for i := 0; i < 20; i++ {
|
||||
for range 20 {
|
||||
addConn(20)
|
||||
}
|
||||
|
||||
@@ -552,7 +552,7 @@ func TestPeerProtectionSingleTag(t *testing.T) {
|
||||
}
|
||||
|
||||
// add 5 more connection, sending the connection manager overboard.
|
||||
for i := 0; i < 5; i++ {
|
||||
for range 5 {
|
||||
addConn(20)
|
||||
}
|
||||
|
||||
@@ -578,7 +578,7 @@ func TestPeerProtectionSingleTag(t *testing.T) {
|
||||
cm.Unprotect(protected[0].RemotePeer(), "global")
|
||||
|
||||
// add 2 more connections, sending the connection manager overboard again.
|
||||
for i := 0; i < 2; i++ {
|
||||
for range 2 {
|
||||
addConn(20)
|
||||
}
|
||||
|
||||
@@ -601,8 +601,8 @@ func TestPeerProtectionMultipleTags(t *testing.T) {
|
||||
not := cm.Notifee()
|
||||
|
||||
// produce 20 connections with unique peers.
|
||||
var conns []network.Conn
|
||||
for i := 0; i < 20; i++ {
|
||||
conns := make([]network.Conn, 0, 20)
|
||||
for range 20 {
|
||||
rc := randConn(t, not.Disconnected)
|
||||
conns = append(conns, rc)
|
||||
not.Connected(nil, rc)
|
||||
@@ -638,7 +638,7 @@ func TestPeerProtectionMultipleTags(t *testing.T) {
|
||||
}
|
||||
|
||||
// add 2 more connections, sending the connection manager overboard again.
|
||||
for i := 0; i < 2; i++ {
|
||||
for range 2 {
|
||||
rc := randConn(t, not.Disconnected)
|
||||
not.Connected(nil, rc)
|
||||
cm.TagPeer(rc.RemotePeer(), "test", 20)
|
||||
@@ -657,7 +657,7 @@ func TestPeerProtectionMultipleTags(t *testing.T) {
|
||||
cm.Unprotect(protected[0].RemotePeer(), "tag2")
|
||||
|
||||
// add 2 more connections, sending the connection manager overboard again.
|
||||
for i := 0; i < 2; i++ {
|
||||
for range 2 {
|
||||
rc := randConn(t, not.Disconnected)
|
||||
not.Connected(nil, rc)
|
||||
cm.TagPeer(rc.RemotePeer(), "test", 20)
|
||||
@@ -794,7 +794,7 @@ func TestConcurrentCleanupAndTagging(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
defer cm.Close()
|
||||
|
||||
for i := 0; i < 1000; i++ {
|
||||
for range 1000 {
|
||||
conn := randConn(t, nil)
|
||||
cm.TagPeer(conn.RemotePeer(), "test", 20)
|
||||
}
|
||||
@@ -950,12 +950,12 @@ func TestSafeConcurrency(t *testing.T) {
|
||||
const runs = 10
|
||||
const concurrency = 10
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < concurrency; i++ {
|
||||
for range concurrency {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
// add conns. This mimics new connection events
|
||||
pis := peerInfos{p1, p2}
|
||||
for i := 0; i < runs; i++ {
|
||||
for i := range runs {
|
||||
pi := pis[i%len(pis)]
|
||||
s := ss.get(pi.id)
|
||||
s.Lock()
|
||||
@@ -968,7 +968,7 @@ func TestSafeConcurrency(t *testing.T) {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
pis := peerInfos{p1, p2}
|
||||
for i := 0; i < runs; i++ {
|
||||
for range runs {
|
||||
pis.SortByValueAndStreams(ss, false)
|
||||
}
|
||||
wg.Done()
|
||||
|
||||
@@ -9,7 +9,7 @@ var log = logging.Logger("mocknet")
|
||||
// WithNPeers constructs a Mocknet with N peers.
|
||||
func WithNPeers(n int) (Mocknet, error) {
|
||||
m := New()
|
||||
for i := 0; i < n; i++ {
|
||||
for range n {
|
||||
if _, err := m.GenPeer(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -2,6 +2,8 @@ package mocknet
|
||||
|
||||
import (
|
||||
"context"
|
||||
"maps"
|
||||
"slices"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -68,11 +70,8 @@ func TestNotifications(t *testing.T) {
|
||||
|
||||
for _, c := range cons {
|
||||
var found bool
|
||||
for _, c2 := range expect {
|
||||
if c == c2 {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
if slices.Contains(expect, c) {
|
||||
found = true
|
||||
}
|
||||
|
||||
if !found {
|
||||
@@ -138,9 +137,7 @@ func TestNotifications(t *testing.T) {
|
||||
// Avoid holding this lock while waiting, otherwise we can deadlock.
|
||||
streamStateCopy := map[network.Stream]chan struct{}{}
|
||||
n1.streamState.Lock()
|
||||
for str, ch := range n1.streamState.m {
|
||||
streamStateCopy[str] = ch
|
||||
}
|
||||
maps.Copy(streamStateCopy, n1.streamState.m)
|
||||
n1.streamState.Unlock()
|
||||
|
||||
for str1, ch1 := range streamStateCopy {
|
||||
|
||||
@@ -354,8 +354,8 @@ func TestAdding(t *testing.T) {
|
||||
mn := New()
|
||||
defer mn.Close()
|
||||
|
||||
var peers []peer.ID
|
||||
for i := 0; i < 3; i++ {
|
||||
peers := make([]peer.ID, 0, 3)
|
||||
for range 3 {
|
||||
priv, _, err := crypto.GenerateEd25519Key(rand.Reader)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -488,7 +488,7 @@ func TestLimitedStreams(t *testing.T) {
|
||||
messageSize := 500
|
||||
handler := func(s network.Stream) {
|
||||
b := make([]byte, messageSize)
|
||||
for i := 0; i < messages; i++ {
|
||||
for range messages {
|
||||
if _, err := io.ReadFull(s, b); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -524,7 +524,7 @@ func TestLimitedStreams(t *testing.T) {
|
||||
filler := make([]byte, messageSize-4)
|
||||
data := append([]byte("ping"), filler...)
|
||||
before := time.Now()
|
||||
for i := 0; i < messages; i++ {
|
||||
for range messages {
|
||||
wg.Add(1)
|
||||
if _, err := s.Write(data); err != nil {
|
||||
panic(err)
|
||||
|
||||
@@ -116,7 +116,7 @@ func (n *natpmpNAT) AddPortMapping(_ context.Context, protocol string, internalP
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
for range 3 {
|
||||
externalPort := randomPort()
|
||||
_, err = n.c.AddPortMapping(protocol, internalPort, externalPort, timeoutInSeconds)
|
||||
if err == nil {
|
||||
|
||||
@@ -193,7 +193,7 @@ func (u *upnp_NAT) AddPortMapping(ctx context.Context, protocol string, internal
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
for range 3 {
|
||||
externalPort := randomPort()
|
||||
err = u.c.AddPortMappingCtx(ctx, "", uint16(externalPort), mapProtocol(protocol), uint16(internalPort), ip.String(), true, description, timeoutInSeconds)
|
||||
if err == nil {
|
||||
|
||||
+10
-10
@@ -195,7 +195,7 @@ func TestNATRediscoveryOnConnectionError(t *testing.T) {
|
||||
errConnectionRefused := errors.New("goupnp: error performing SOAP HTTP request: Post \"http://192.168.1.1:1234/ctl/IPConn\": dial tcp 192.168.1.1:1234: connect: connection refused")
|
||||
|
||||
// Set up expectations for the failures that will trigger rediscovery
|
||||
for i := 0; i < 3; i++ {
|
||||
for i := range 3 {
|
||||
expectPortMappingFailure(mockNAT, "tcp", 10000+i, errConnectionRefused)
|
||||
}
|
||||
|
||||
@@ -204,7 +204,7 @@ func TestNATRediscoveryOnConnectionError(t *testing.T) {
|
||||
expectPortMappingSuccess(newMockNAT, "udp", 4002, 4002)
|
||||
|
||||
// Now trigger the failures
|
||||
for i := 0; i < 3; i++ {
|
||||
for i := range 3 {
|
||||
externalPort := n.establishMapping(context.Background(), "tcp", 10000+i)
|
||||
require.Equal(t, 0, externalPort)
|
||||
}
|
||||
@@ -270,7 +270,7 @@ func TestNATRediscoveryOldRouterReturns(t *testing.T) {
|
||||
errConnectionRefused := errors.New("goupnp: error performing SOAP HTTP request: dial tcp 192.168.1.1:1234: connect: connection refused")
|
||||
|
||||
// Set up expectations for the first two failures
|
||||
for i := 0; i < 2; i++ {
|
||||
for i := range 2 {
|
||||
mockNAT.EXPECT().AddPortMapping(gomock.Any(), "tcp", 10000+i, gomock.Any(), MappingDuration).Return(0, errConnectionRefused).Times(1)
|
||||
mockNAT.EXPECT().AddPortMapping(gomock.Any(), "tcp", 10000+i, gomock.Any(), time.Duration(0)).Return(0, errConnectionRefused).Times(1)
|
||||
}
|
||||
@@ -283,7 +283,7 @@ func TestNATRediscoveryOldRouterReturns(t *testing.T) {
|
||||
mockNAT.EXPECT().AddPortMapping(gomock.Any(), "tcp", 4001, gomock.Any(), MappingDuration).Return(4001, nil).Times(1)
|
||||
|
||||
// Trigger the failures
|
||||
for i := 0; i < 2; i++ {
|
||||
for i := range 2 {
|
||||
n.establishMapping(context.Background(), "tcp", 10000+i)
|
||||
}
|
||||
n.establishMapping(context.Background(), "tcp", 10002)
|
||||
@@ -330,7 +330,7 @@ func TestNATRediscoveryFailureThreshold(t *testing.T) {
|
||||
errOther := errors.New("some other error")
|
||||
|
||||
// Test 1: Only 2 failures - should NOT trigger rediscovery
|
||||
for i := 0; i < 2; i++ {
|
||||
for i := range 2 {
|
||||
mockNAT.EXPECT().AddPortMapping(gomock.Any(), "tcp", 10000+i, gomock.Any(), MappingDuration).Return(0, errConnectionRefused).Times(1)
|
||||
mockNAT.EXPECT().AddPortMapping(gomock.Any(), "tcp", 10000+i, gomock.Any(), time.Duration(0)).Return(0, errConnectionRefused).Times(1)
|
||||
n.establishMapping(context.Background(), "tcp", 10000+i)
|
||||
@@ -345,7 +345,7 @@ func TestNATRediscoveryFailureThreshold(t *testing.T) {
|
||||
n.establishMapping(context.Background(), "tcp", 10002)
|
||||
|
||||
// Now even 2 more connection failures shouldn't trigger (counter was reset)
|
||||
for i := 0; i < 2; i++ {
|
||||
for i := range 2 {
|
||||
mockNAT.EXPECT().AddPortMapping(gomock.Any(), "tcp", 10003+i, gomock.Any(), MappingDuration).Return(0, errConnectionRefused).Times(1)
|
||||
mockNAT.EXPECT().AddPortMapping(gomock.Any(), "tcp", 10003+i, gomock.Any(), time.Duration(0)).Return(0, errConnectionRefused).Times(1)
|
||||
n.establishMapping(context.Background(), "tcp", 10003+i)
|
||||
@@ -359,7 +359,7 @@ func TestNATRediscoveryFailureThreshold(t *testing.T) {
|
||||
n.establishMapping(context.Background(), "tcp", 10005)
|
||||
|
||||
// Again, 2 failures shouldn't trigger
|
||||
for i := 0; i < 2; i++ {
|
||||
for i := range 2 {
|
||||
mockNAT.EXPECT().AddPortMapping(gomock.Any(), "tcp", 10006+i, gomock.Any(), MappingDuration).Return(0, errConnectionRefused).Times(1)
|
||||
mockNAT.EXPECT().AddPortMapping(gomock.Any(), "tcp", 10006+i, gomock.Any(), time.Duration(0)).Return(0, errConnectionRefused).Times(1)
|
||||
n.establishMapping(context.Background(), "tcp", 10006+i)
|
||||
@@ -410,14 +410,14 @@ func TestNATRediscoveryConcurrency(t *testing.T) {
|
||||
|
||||
// Simulate multiple goroutines hitting failures after threshold
|
||||
// First get to threshold
|
||||
for i := 0; i < 3; i++ {
|
||||
for i := range 3 {
|
||||
mockNAT.EXPECT().AddPortMapping(gomock.Any(), "tcp", 10000+i, gomock.Any(), MappingDuration).Return(0, errConnectionRefused).Times(1)
|
||||
mockNAT.EXPECT().AddPortMapping(gomock.Any(), "tcp", 10000+i, gomock.Any(), time.Duration(0)).Return(0, errConnectionRefused).Times(1)
|
||||
n.establishMapping(context.Background(), "tcp", 10000+i)
|
||||
}
|
||||
|
||||
// Set up expectations for concurrent failure attempts
|
||||
for i := 0; i < 5; i++ {
|
||||
for i := range 5 {
|
||||
port := 10003 + i
|
||||
mockNAT.EXPECT().AddPortMapping(gomock.Any(), "tcp", port, gomock.Any(), MappingDuration).Return(0, errConnectionRefused).AnyTimes()
|
||||
mockNAT.EXPECT().AddPortMapping(gomock.Any(), "tcp", port, gomock.Any(), time.Duration(0)).Return(0, errConnectionRefused).AnyTimes()
|
||||
@@ -425,7 +425,7 @@ func TestNATRediscoveryConcurrency(t *testing.T) {
|
||||
|
||||
// Now launch multiple goroutines that would all try to trigger rediscovery
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < 5; i++ {
|
||||
for i := range 5 {
|
||||
wg.Add(1)
|
||||
go func(port int) {
|
||||
defer wg.Done()
|
||||
|
||||
@@ -72,7 +72,7 @@ func TestPSKFragmentation(t *testing.T) {
|
||||
wch <- err
|
||||
}()
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
for range 10 {
|
||||
if _, err := psk2.Read(out); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"net"
|
||||
"runtime"
|
||||
"slices"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -82,10 +83,8 @@ func dialOne(t *testing.T, tr *Transport, listener manet.Listener, expected ...i
|
||||
if len(expected) == 0 {
|
||||
return port
|
||||
}
|
||||
for _, p := range expected {
|
||||
if p == port {
|
||||
return port
|
||||
}
|
||||
if slices.Contains(expected, port) {
|
||||
return port
|
||||
}
|
||||
t.Errorf("dialed %s from %v. expected to dial from port %v", listener.Multiaddr(), c.LocalAddr(), expected)
|
||||
return 0
|
||||
@@ -271,7 +270,7 @@ func TestDuplicateGlobal(t *testing.T) {
|
||||
port := dialOne(t, &trB, listenerA)
|
||||
|
||||
// Check consistency
|
||||
for i := 0; i < 10; i++ {
|
||||
for range 10 {
|
||||
dialOne(t, &trB, listenerA, port)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -35,7 +35,7 @@ func TestBlackHoleSuccessCounterReset(t *testing.T) {
|
||||
|
||||
bhf.RecordResult(true)
|
||||
// check if calls up to n are probes again
|
||||
for i := 0; i < n; i++ {
|
||||
for range n {
|
||||
if bhf.HandleRequest() != blackHoleStateProbing {
|
||||
t.Fatalf("expected black hole detector state to reset after success")
|
||||
}
|
||||
@@ -95,10 +95,10 @@ func TestBlackHoleDetectorInApplicableAddress(t *testing.T) {
|
||||
ma.StringCast("/ip6/::1/udp/1234/quic-v1"),
|
||||
ma.StringCast("/ip4/192.168.1.5/udp/1234/quic-v1"),
|
||||
}
|
||||
for i := 0; i < 1000; i++ {
|
||||
for range 1000 {
|
||||
filteredAddrs, _ := bhd.FilterAddrs(addrs)
|
||||
require.ElementsMatch(t, addrs, filteredAddrs)
|
||||
for j := 0; j < len(addrs); j++ {
|
||||
for j := range addrs {
|
||||
bhd.RecordResult(addrs[j], false)
|
||||
}
|
||||
}
|
||||
@@ -109,7 +109,7 @@ func TestBlackHoleDetectorUDPDisabled(t *testing.T) {
|
||||
bhd := &blackHoleDetector{ipv6: ipv6F}
|
||||
publicAddr := ma.StringCast("/ip4/1.2.3.4/udp/1234/quic-v1")
|
||||
privAddr := ma.StringCast("/ip4/192.168.1.5/udp/1234/quic-v1")
|
||||
for i := 0; i < 100; i++ {
|
||||
for range 100 {
|
||||
bhd.RecordResult(publicAddr, false)
|
||||
}
|
||||
wantAddrs := []ma.Multiaddr{publicAddr, privAddr}
|
||||
@@ -125,7 +125,7 @@ func TestBlackHoleDetectorIPv6Disabled(t *testing.T) {
|
||||
bhd := &blackHoleDetector{udp: udpF}
|
||||
publicAddr := ma.StringCast("/ip6/2001::1/tcp/1234")
|
||||
privAddr := ma.StringCast("/ip6/::1/tcp/1234")
|
||||
for i := 0; i < 100; i++ {
|
||||
for range 100 {
|
||||
bhd.RecordResult(publicAddr, false)
|
||||
}
|
||||
|
||||
@@ -144,7 +144,7 @@ func TestBlackHoleDetectorProbes(t *testing.T) {
|
||||
}
|
||||
udp6Addr := ma.StringCast("/ip6/2001::1/udp/1234/quic-v1")
|
||||
addrs := []ma.Multiaddr{udp6Addr}
|
||||
for i := 0; i < 3; i++ {
|
||||
for range 3 {
|
||||
bhd.RecordResult(udp6Addr, false)
|
||||
}
|
||||
for i := 1; i < 100; i++ {
|
||||
@@ -177,10 +177,10 @@ func TestBlackHoleDetectorAddrFiltering(t *testing.T) {
|
||||
udp: &BlackHoleSuccessCounter{N: 100, MinSuccesses: 10, Name: "udp"},
|
||||
ipv6: &BlackHoleSuccessCounter{N: 100, MinSuccesses: 10, Name: "ipv6"},
|
||||
}
|
||||
for i := 0; i < 100; i++ {
|
||||
for range 100 {
|
||||
bhd.RecordResult(udp4Pub, !udpBlocked)
|
||||
}
|
||||
for i := 0; i < 100; i++ {
|
||||
for range 100 {
|
||||
bhd.RecordResult(tcp6Pub, !ipv6Blocked)
|
||||
}
|
||||
return bhd
|
||||
@@ -217,7 +217,7 @@ func TestBlackHoleDetectorReadOnlyMode(t *testing.T) {
|
||||
bhd := &blackHoleDetector{udp: udpF, ipv6: ipv6F, readOnly: true}
|
||||
publicAddr := ma.StringCast("/ip4/1.2.3.4/udp/1234/quic-v1")
|
||||
privAddr := ma.StringCast("/ip6/::1/tcp/1234")
|
||||
for i := 0; i < 100; i++ {
|
||||
for range 100 {
|
||||
bhd.RecordResult(publicAddr, true)
|
||||
}
|
||||
allAddr := []ma.Multiaddr{privAddr, publicAddr}
|
||||
@@ -231,7 +231,7 @@ func TestBlackHoleDetectorReadOnlyMode(t *testing.T) {
|
||||
|
||||
// a non readonly shared state black hole detector
|
||||
nbhd := &blackHoleDetector{udp: bhd.udp, ipv6: bhd.ipv6, readOnly: false}
|
||||
for i := 0; i < 100; i++ {
|
||||
for range 100 {
|
||||
nbhd.RecordResult(publicAddr, true)
|
||||
}
|
||||
// no addresses filtered because state is allowed
|
||||
|
||||
@@ -98,7 +98,7 @@ func DefaultDialRanker(addrs []ma.Multiaddr) []network.AddrDelay {
|
||||
maxDelay = res[len(res)-1].Delay
|
||||
}
|
||||
|
||||
for i := 0; i < len(addrs); i++ {
|
||||
for i := range addrs {
|
||||
res = append(res, network.AddrDelay{Addr: addrs[i], Delay: maxDelay + PublicOtherDelay})
|
||||
}
|
||||
|
||||
@@ -273,7 +273,7 @@ func isQUICAddr(a ma.Multiaddr) bool {
|
||||
// filterAddrs filters an address slice in place
|
||||
func filterAddrs(addrs []ma.Multiaddr, f func(a ma.Multiaddr) bool) (filtered, rest []ma.Multiaddr) {
|
||||
j := 0
|
||||
for i := 0; i < len(addrs); i++ {
|
||||
for i := range addrs {
|
||||
if f(addrs[i]) {
|
||||
addrs[i], addrs[j] = addrs[j], addrs[i]
|
||||
j++
|
||||
|
||||
@@ -139,7 +139,7 @@ func TestDialSyncAllCancel(t *testing.T) {
|
||||
}()
|
||||
|
||||
cancel()
|
||||
for i := 0; i < 2; i++ {
|
||||
for range 2 {
|
||||
select {
|
||||
case <-finished:
|
||||
case <-time.After(time.Second):
|
||||
@@ -213,13 +213,13 @@ func TestStressActiveDial(_ *testing.T) {
|
||||
pid := peer.ID("foo")
|
||||
|
||||
makeDials := func() {
|
||||
for i := 0; i < 10000; i++ {
|
||||
for range 10000 {
|
||||
ds.Dial(context.Background(), pid)
|
||||
}
|
||||
wg.Done()
|
||||
}
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
for range 100 {
|
||||
wg.Add(1)
|
||||
go makeDials()
|
||||
}
|
||||
|
||||
@@ -141,7 +141,7 @@ func TestSimultDials(t *testing.T) {
|
||||
}
|
||||
|
||||
log.Info("Connecting swarms simultaneously.")
|
||||
for i := 0; i < 10; i++ { // connect 10x for each.
|
||||
for range 10 { // connect 10x for each.
|
||||
wg.Add(2)
|
||||
go connect(swarms[0], swarms[1].LocalPeer(), ifaceAddrs1[0])
|
||||
go connect(swarms[1], swarms[0].LocalPeer(), ifaceAddrs0[0])
|
||||
@@ -254,7 +254,7 @@ func TestDialBackoff(t *testing.T) {
|
||||
|
||||
dialOnlineNode := func(dst peer.ID, times int) <-chan bool {
|
||||
ch := make(chan bool)
|
||||
for i := 0; i < times; i++ {
|
||||
for range times {
|
||||
go func() {
|
||||
if _, err := s1.DialPeer(ctx, dst); err != nil {
|
||||
t.Error("error dialing", dst, err)
|
||||
@@ -269,7 +269,7 @@ func TestDialBackoff(t *testing.T) {
|
||||
|
||||
dialOfflineNode := func(dst peer.ID, times int) <-chan bool {
|
||||
ch := make(chan bool)
|
||||
for i := 0; i < times; i++ {
|
||||
for range times {
|
||||
go func() {
|
||||
if c, err := s1.DialPeer(ctx, dst); err != nil {
|
||||
ch <- false
|
||||
@@ -304,7 +304,7 @@ func TestDialBackoff(t *testing.T) {
|
||||
}
|
||||
|
||||
// 3) s1->s2 should succeed.
|
||||
for i := 0; i < N; i++ {
|
||||
for range N {
|
||||
select {
|
||||
case r := <-s2done:
|
||||
if !r {
|
||||
@@ -327,7 +327,7 @@ func TestDialBackoff(t *testing.T) {
|
||||
|
||||
// 4) s1->s3 should not (and should place s3 on backoff)
|
||||
// N-1 should finish before dialTimeout1x * 2
|
||||
for i := 0; i < N; i++ {
|
||||
for i := range N {
|
||||
select {
|
||||
case <-s2done:
|
||||
t.Error("s2 should have no more")
|
||||
@@ -391,7 +391,7 @@ func TestDialBackoff(t *testing.T) {
|
||||
}
|
||||
|
||||
// 8) s2 dials should all hang, and succeed
|
||||
for i := 0; i < N; i++ {
|
||||
for range N {
|
||||
select {
|
||||
case r := <-s2done:
|
||||
if !r {
|
||||
@@ -471,7 +471,7 @@ func TestDialPeerFailed(t *testing.T) {
|
||||
testedSwarm, targetSwarm := swarms[0], swarms[1]
|
||||
|
||||
const expectedErrorsCount = 5
|
||||
for i := 0; i < expectedErrorsCount; i++ {
|
||||
for range expectedErrorsCount {
|
||||
_, silentPeerAddress, silentPeerListener := newSilentPeer(t)
|
||||
go acceptAndHang(silentPeerListener)
|
||||
defer silentPeerListener.Close()
|
||||
@@ -614,7 +614,7 @@ func TestDialSimultaneousJoin(t *testing.T) {
|
||||
c3 := <-connch
|
||||
|
||||
// raise any errors from the previous goroutines
|
||||
for i := 0; i < 3; i++ {
|
||||
for range 3 {
|
||||
require.NoError(t, <-errs)
|
||||
}
|
||||
|
||||
|
||||
@@ -202,7 +202,7 @@ func TestDialWorkerLoopConcurrent(t *testing.T) {
|
||||
const dials = 100
|
||||
var wg sync.WaitGroup
|
||||
resch := make(chan dialResponse, dials)
|
||||
for i := 0; i < dials; i++ {
|
||||
for range dials {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
@@ -218,7 +218,7 @@ func TestDialWorkerLoopConcurrent(t *testing.T) {
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
for i := 0; i < dials; i++ {
|
||||
for range dials {
|
||||
res := <-resch
|
||||
require.NoError(t, res.err)
|
||||
}
|
||||
@@ -270,7 +270,7 @@ func TestDialWorkerLoopConcurrentFailure(t *testing.T) {
|
||||
var errTimeout = errors.New("timed out!")
|
||||
var wg sync.WaitGroup
|
||||
resch := make(chan dialResponse, dials)
|
||||
for i := 0; i < dials; i++ {
|
||||
for range dials {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
@@ -287,7 +287,7 @@ func TestDialWorkerLoopConcurrentFailure(t *testing.T) {
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
for i := 0; i < dials; i++ {
|
||||
for range dials {
|
||||
res := <-resch
|
||||
require.Error(t, res.err)
|
||||
if res.err == errTimeout {
|
||||
@@ -317,7 +317,7 @@ func TestDialWorkerLoopConcurrentMix(t *testing.T) {
|
||||
const dials = 100
|
||||
var wg sync.WaitGroup
|
||||
resch := make(chan dialResponse, dials)
|
||||
for i := 0; i < dials; i++ {
|
||||
for range dials {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
@@ -333,7 +333,7 @@ func TestDialWorkerLoopConcurrentMix(t *testing.T) {
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
for i := 0; i < dials; i++ {
|
||||
for range dials {
|
||||
res := <-resch
|
||||
require.NoError(t, res.err)
|
||||
}
|
||||
@@ -350,8 +350,8 @@ func TestDialWorkerLoopConcurrentFailureStress(t *testing.T) {
|
||||
|
||||
_, p2 := newPeer(t)
|
||||
|
||||
var addrs []ma.Multiaddr
|
||||
for i := 0; i < 16; i++ {
|
||||
addrs := make([]ma.Multiaddr, 0, 16)
|
||||
for i := range 16 {
|
||||
addrs = append(addrs, ma.StringCast(fmt.Sprintf("/ip4/11.0.0.%d/tcp/%d", i%256, 1234+i)))
|
||||
}
|
||||
s1.Peerstore().AddAddrs(p2, addrs, peerstore.PermanentAddrTTL)
|
||||
@@ -364,7 +364,7 @@ func TestDialWorkerLoopConcurrentFailureStress(t *testing.T) {
|
||||
var errTimeout = errors.New("timed out!")
|
||||
var wg sync.WaitGroup
|
||||
resch := make(chan dialResponse, dials)
|
||||
for i := 0; i < dials; i++ {
|
||||
for range dials {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
@@ -381,7 +381,7 @@ func TestDialWorkerLoopConcurrentFailureStress(t *testing.T) {
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
for i := 0; i < dials; i++ {
|
||||
for range dials {
|
||||
res := <-resch
|
||||
require.Error(t, res.err)
|
||||
if res.err == errTimeout {
|
||||
@@ -397,7 +397,7 @@ func TestDialWorkerLoopConcurrentFailureStress(t *testing.T) {
|
||||
|
||||
func TestDialQueueNextBatch(t *testing.T) {
|
||||
addrs := make([]ma.Multiaddr, 0)
|
||||
for i := 0; i < 10; i++ {
|
||||
for i := range 10 {
|
||||
addrs = append(addrs, ma.StringCast(fmt.Sprintf("/ip4/1.2.3.4/tcp/%d", i)))
|
||||
}
|
||||
testcase := []struct {
|
||||
@@ -485,7 +485,7 @@ func TestDialQueueNextBatch(t *testing.T) {
|
||||
}
|
||||
sort.Slice(b, func(i, j int) bool { return b[i].Addr.String() < b[j].Addr.String() })
|
||||
sort.Slice(batch, func(i, j int) bool { return batch[i].String() < batch[j].String() })
|
||||
for i := 0; i < len(b); i++ {
|
||||
for i := range b {
|
||||
if !b[i].Addr.Equal(batch[i]) {
|
||||
log.Error("expected address mismatch", "expected", batch[i], "got", b[i].Addr)
|
||||
}
|
||||
@@ -745,7 +745,7 @@ loop:
|
||||
func makeRanker(tc []timedDial) network.DialRanker {
|
||||
return func(_ []ma.Multiaddr) []network.AddrDelay {
|
||||
res := make([]network.AddrDelay, len(tc))
|
||||
for i := 0; i < len(tc); i++ {
|
||||
for i := range tc {
|
||||
res[i] = network.AddrDelay{Addr: tc[i].addr, Delay: tc[i].delay}
|
||||
}
|
||||
return res
|
||||
@@ -755,7 +755,7 @@ func makeRanker(tc []timedDial) network.DialRanker {
|
||||
// TestCheckDialWorkerLoopScheduling will check the checker
|
||||
func TestCheckDialWorkerLoopScheduling(t *testing.T) {
|
||||
addrs := make([]ma.Multiaddr, 0)
|
||||
for i := 0; i < 10; i++ {
|
||||
for i := range 10 {
|
||||
for {
|
||||
p := 20000 + i
|
||||
addrs = append(addrs, ma.StringCast(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", p)))
|
||||
@@ -801,7 +801,7 @@ func TestCheckDialWorkerLoopScheduling(t *testing.T) {
|
||||
|
||||
func TestDialWorkerLoopRanking(t *testing.T) {
|
||||
addrs := make([]ma.Multiaddr, 0)
|
||||
for i := 0; i < 10; i++ {
|
||||
for i := range 10 {
|
||||
for {
|
||||
p := 20000 + i
|
||||
addrs = append(addrs, ma.StringCast(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", p)))
|
||||
@@ -972,7 +972,7 @@ func TestDialWorkerLoopHolePunching(t *testing.T) {
|
||||
|
||||
s1.dialRanker = func(addrs []ma.Multiaddr) (res []network.AddrDelay) {
|
||||
res = make([]network.AddrDelay, len(addrs))
|
||||
for i := 0; i < len(addrs); i++ {
|
||||
for i := range addrs {
|
||||
delay := 10 * time.Second
|
||||
if addrs[i].Equal(t1) {
|
||||
// fire t1 immediately
|
||||
@@ -1163,14 +1163,14 @@ func BenchmarkDialRanker(b *testing.B) {
|
||||
}
|
||||
}
|
||||
addrs := make([]ma.Multiaddr, N)
|
||||
for i := 0; i < N; i++ {
|
||||
for i := range N {
|
||||
addrs[i] = ma.StringCast(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", i))
|
||||
}
|
||||
|
||||
b.Run("equal delay", func(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
addrDelays := make([]network.AddrDelay, N)
|
||||
for i := 0; i < N; i++ {
|
||||
for i := range N {
|
||||
addrDelays[i] = network.AddrDelay{
|
||||
Addr: addrs[i],
|
||||
Delay: 0,
|
||||
@@ -1183,7 +1183,7 @@ func BenchmarkDialRanker(b *testing.B) {
|
||||
b.Run("sorted delay", func(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
addrDelays := make([]network.AddrDelay, N)
|
||||
for i := 0; i < N; i++ {
|
||||
for i := range N {
|
||||
addrDelays[i] = network.AddrDelay{
|
||||
Addr: addrs[i],
|
||||
Delay: time.Millisecond * time.Duration(i),
|
||||
|
||||
@@ -291,15 +291,15 @@ func TestStressLimiter(t *testing.T) {
|
||||
|
||||
l := newDialLimiterWithParams(df, 20, 5)
|
||||
|
||||
var bads []ma.Multiaddr
|
||||
for i := 0; i < 100; i++ {
|
||||
bads := make([]ma.Multiaddr, 0, 101)
|
||||
for i := range 100 {
|
||||
bads = append(bads, addrWithPort(i))
|
||||
}
|
||||
|
||||
addresses := append(bads, addrWithPort(2000))
|
||||
success := make(chan struct{})
|
||||
|
||||
for i := 0; i < 20; i++ {
|
||||
for i := range 20 {
|
||||
go func(id peer.ID) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
@@ -324,7 +324,7 @@ func TestStressLimiter(t *testing.T) {
|
||||
}(peer.ID(fmt.Sprintf("testpeer%d", i)))
|
||||
}
|
||||
|
||||
for i := 0; i < 20; i++ {
|
||||
for range 20 {
|
||||
select {
|
||||
case <-success:
|
||||
case <-time.After(time.Minute):
|
||||
@@ -354,7 +354,7 @@ func TestFDLimitUnderflow(t *testing.T) {
|
||||
const num = 3 * fdLimit
|
||||
wg.Add(num)
|
||||
errs := make(chan error, num)
|
||||
for i := 0; i < num; i++ {
|
||||
for i := range num {
|
||||
go func(id peer.ID, i int) {
|
||||
defer wg.Done()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
@@ -37,7 +37,7 @@ func TestPeers(t *testing.T) {
|
||||
require.Eventually(t, func() bool { return len(s2.Peers()) > 0 }, 3*time.Second, 50*time.Millisecond)
|
||||
connect(s2, s1.LocalPeer(), s1.ListenAddresses()[0])
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
for range 100 {
|
||||
connect(s1, s2.LocalPeer(), s2.ListenAddresses()[0])
|
||||
connect(s2, s1.LocalPeer(), s1.ListenAddresses()[0])
|
||||
}
|
||||
|
||||
@@ -37,7 +37,7 @@ func TestSwarmResolver(t *testing.T) {
|
||||
t.Run("Test Limits", func(t *testing.T) {
|
||||
var ipaddrs []net.IPAddr
|
||||
var manyDNSAddrs []string
|
||||
for i := 0; i < 255; i++ {
|
||||
for i := range 255 {
|
||||
ip := "1.2.3." + strconv.Itoa(i)
|
||||
ipaddrs = append(ipaddrs, net.IPAddr{IP: net.ParseIP(ip)})
|
||||
manyDNSAddrs = append(manyDNSAddrs, "dnsaddr=/ip4/"+ip)
|
||||
@@ -53,21 +53,21 @@ func TestSwarmResolver(t *testing.T) {
|
||||
res, err := swarmResolver.ResolveDNSComponent(ctx, multiaddr.StringCast("/dns/example.com"), 10)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 10, len(res))
|
||||
for i := 0; i < 10; i++ {
|
||||
for i := range 10 {
|
||||
require.Equal(t, "/ip4/1.2.3."+strconv.Itoa(i), res[i].String())
|
||||
}
|
||||
|
||||
res, err = swarmResolver.ResolveDNSAddr(ctx, "", multiaddr.StringCast("/dnsaddr/example.com"), 1, 10)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 10, len(res))
|
||||
for i := 0; i < 10; i++ {
|
||||
for i := range 10 {
|
||||
require.Equal(t, "/ip4/1.2.3."+strconv.Itoa(i), res[i].String())
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("Test Recursive Limits", func(t *testing.T) {
|
||||
recursiveDNSAddr := make(map[string][]string)
|
||||
for i := 0; i < 255; i++ {
|
||||
for i := range 255 {
|
||||
recursiveDNSAddr["_dnsaddr."+strconv.Itoa(i)+".example.com"] = []string{"dnsaddr=/dnsaddr/" + strconv.Itoa(i+1) + ".example.com"}
|
||||
}
|
||||
recursiveDNSAddr["_dnsaddr.255.example.com"] = []string{"dnsaddr=/ip4/127.0.0.1"}
|
||||
|
||||
@@ -70,7 +70,7 @@ func TestSimultOpenFewStress(t *testing.T) {
|
||||
rounds := 10
|
||||
// rounds := 100
|
||||
|
||||
for i := 0; i < rounds; i++ {
|
||||
for range rounds {
|
||||
subtestSwarm(t, swarms, msgs)
|
||||
<-time.After(10 * time.Millisecond)
|
||||
}
|
||||
|
||||
@@ -179,10 +179,7 @@ func (db *DialBackoff) AddBackoff(p peer.ID, addr ma.Multiaddr) {
|
||||
return
|
||||
}
|
||||
|
||||
backoffTime := BackoffBase + BackoffCoef*time.Duration(ba.tries*ba.tries)
|
||||
if backoffTime > BackoffMax {
|
||||
backoffTime = BackoffMax
|
||||
}
|
||||
backoffTime := min(BackoffBase+BackoffCoef*time.Duration(ba.tries*ba.tries), BackoffMax)
|
||||
ba.until = time.Now().Add(backoffTime)
|
||||
ba.tries++
|
||||
}
|
||||
@@ -202,10 +199,7 @@ func (db *DialBackoff) cleanup() {
|
||||
for p, e := range db.entries {
|
||||
good := false
|
||||
for _, backoff := range e {
|
||||
backoffTime := BackoffBase + BackoffCoef*time.Duration(backoff.tries*backoff.tries)
|
||||
if backoffTime > BackoffMax {
|
||||
backoffTime = BackoffMax
|
||||
}
|
||||
backoffTime := min(BackoffBase+BackoffCoef*time.Duration(backoff.tries*backoff.tries), BackoffMax)
|
||||
if now.Before(backoff.until.Add(backoffTime)) {
|
||||
good = true
|
||||
break
|
||||
|
||||
@@ -347,7 +347,7 @@ func TestAddrsForDialFiltering(t *testing.T) {
|
||||
if len(result) != len(tc.output) {
|
||||
t.Fatalf("output mismatch got: %s want: %s", result, tc.output)
|
||||
}
|
||||
for i := 0; i < len(result); i++ {
|
||||
for i := range result {
|
||||
if !result[i].Equal(tc.output[i]) {
|
||||
t.Fatalf("output mismatch got: %s want: %s", result, tc.output)
|
||||
}
|
||||
|
||||
@@ -106,7 +106,7 @@ func TestNoDeadlockWhenConsumingConnectednessEvents(t *testing.T) {
|
||||
}
|
||||
}()
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
for range 10 {
|
||||
// Connect and disconnect to trigger a bunch of events
|
||||
_, err := dialer.DialPeer(context.Background(), listener.LocalPeer())
|
||||
require.NoError(t, err)
|
||||
@@ -120,7 +120,7 @@ func TestConnectednessEvents(t *testing.T) {
|
||||
s1, sub1 := newSwarmWithSubscription(t)
|
||||
const N = 100
|
||||
peers := make([]*Swarm, N)
|
||||
for i := 0; i < N; i++ {
|
||||
for i := range N {
|
||||
peers[i] = swarmt.GenSwarm(t)
|
||||
}
|
||||
|
||||
@@ -128,7 +128,7 @@ func TestConnectednessEvents(t *testing.T) {
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
defer close(done)
|
||||
for i := 0; i < N; i++ {
|
||||
for range N {
|
||||
e := <-sub1.Out()
|
||||
evt, ok := e.(event.EvtPeerConnectednessChanged)
|
||||
if !ok {
|
||||
@@ -141,7 +141,7 @@ func TestConnectednessEvents(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}()
|
||||
for i := 0; i < N; i++ {
|
||||
for i := range N {
|
||||
s1.Peerstore().AddAddrs(peers[i].LocalPeer(), []ma.Multiaddr{peers[i].ListenAddresses()[0]}, time.Hour)
|
||||
_, err := s1.DialPeer(context.Background(), peers[i].LocalPeer())
|
||||
require.NoError(t, err)
|
||||
@@ -156,7 +156,7 @@ func TestConnectednessEvents(t *testing.T) {
|
||||
done = make(chan struct{})
|
||||
go func() {
|
||||
defer close(done)
|
||||
for i := 0; i < N/2; i++ {
|
||||
for range N / 2 {
|
||||
e := <-sub1.Out()
|
||||
evt, ok := e.(event.EvtPeerConnectednessChanged)
|
||||
if !ok {
|
||||
@@ -169,7 +169,7 @@ func TestConnectednessEvents(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}()
|
||||
for i := 0; i < N/2; i++ {
|
||||
for i := range N / 2 {
|
||||
err := s1.ClosePeer(peers[i].LocalPeer())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -208,7 +208,7 @@ func TestConnectednessEventDeadlock(t *testing.T) {
|
||||
s1, sub1 := newSwarmWithSubscription(t)
|
||||
const N = 100
|
||||
peers := make([]*Swarm, N)
|
||||
for i := 0; i < N; i++ {
|
||||
for i := range N {
|
||||
peers[i] = swarmt.GenSwarm(t)
|
||||
}
|
||||
|
||||
@@ -232,7 +232,7 @@ func TestConnectednessEventDeadlock(t *testing.T) {
|
||||
s1.ClosePeer(evt.Peer)
|
||||
}
|
||||
}()
|
||||
for i := 0; i < N; i++ {
|
||||
for i := range N {
|
||||
s1.Peerstore().AddAddrs(peers[i].LocalPeer(), []ma.Multiaddr{peers[i].ListenAddresses()[0]}, time.Hour)
|
||||
go func(i int) {
|
||||
_, err := s1.DialPeer(context.Background(), peers[i].LocalPeer())
|
||||
@@ -250,11 +250,11 @@ func TestConnectednessEventDeadlockWithDial(t *testing.T) {
|
||||
s1, sub1 := newSwarmWithSubscription(t)
|
||||
const N = 200
|
||||
peers := make([]*Swarm, N)
|
||||
for i := 0; i < N; i++ {
|
||||
for i := range N {
|
||||
peers[i] = swarmt.GenSwarm(t)
|
||||
}
|
||||
peers2 := make([]*Swarm, N)
|
||||
for i := 0; i < N; i++ {
|
||||
for i := range N {
|
||||
peers2[i] = swarmt.GenSwarm(t)
|
||||
}
|
||||
|
||||
@@ -266,7 +266,7 @@ func TestConnectednessEventDeadlockWithDial(t *testing.T) {
|
||||
defer subWG.Done()
|
||||
count := 0
|
||||
for {
|
||||
var e interface{}
|
||||
var e any
|
||||
select {
|
||||
case e = <-sub1.Out():
|
||||
case <-done:
|
||||
@@ -293,7 +293,7 @@ func TestConnectednessEventDeadlockWithDial(t *testing.T) {
|
||||
}()
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(N)
|
||||
for i := 0; i < N; i++ {
|
||||
for i := range N {
|
||||
s1.Peerstore().AddAddrs(peers[i].LocalPeer(), []ma.Multiaddr{peers[i].ListenAddresses()[0]}, time.Hour)
|
||||
go func(i int) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
|
||||
|
||||
@@ -191,10 +191,5 @@ func (s *Swarm) AddListenAddr(a ma.Multiaddr) error {
|
||||
}
|
||||
|
||||
func containsMultiaddr(addrs []ma.Multiaddr, addr ma.Multiaddr) bool {
|
||||
for _, a := range addrs {
|
||||
if addr.Equal(a) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
return slices.ContainsFunc(addrs, addr.Equal)
|
||||
}
|
||||
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
// and tests Connectedness value is correct.
|
||||
func TestConnectednessCorrect(t *testing.T) {
|
||||
nets := make([]network.Network, 4)
|
||||
for i := 0; i < 4; i++ {
|
||||
for i := range 4 {
|
||||
nets[i] = GenSwarm(t)
|
||||
}
|
||||
|
||||
@@ -93,7 +93,7 @@ func TestNetworkOpenStream(t *testing.T) {
|
||||
testString := "hello ipfs"
|
||||
|
||||
nets := make([]network.Network, 4)
|
||||
for i := 0; i < 4; i++ {
|
||||
for i := range 4 {
|
||||
nets[i] = GenSwarm(t)
|
||||
}
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@ package swarm_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"slices"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -80,11 +81,8 @@ func TestNotifications(t *testing.T) {
|
||||
|
||||
for _, c := range cons {
|
||||
var found bool
|
||||
for _, c2 := range expect {
|
||||
if c == c2 {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
if slices.Contains(expect, c) {
|
||||
found = true
|
||||
}
|
||||
|
||||
if !found {
|
||||
|
||||
@@ -70,7 +70,7 @@ func makeDialOnlySwarm(t *testing.T) *swarm.Swarm {
|
||||
|
||||
func makeSwarms(t *testing.T, num int, opts ...Option) []*swarm.Swarm {
|
||||
swarms := make([]*swarm.Swarm, 0, num)
|
||||
for i := 0; i < num; i++ {
|
||||
for range num {
|
||||
swarm := GenSwarm(t, opts...)
|
||||
swarm.SetStreamHandler(EchoStreamHandler)
|
||||
swarms = append(swarms, swarm)
|
||||
@@ -135,7 +135,7 @@ func subtestSwarm(t *testing.T, SwarmNum int, MsgNum int) {
|
||||
}
|
||||
|
||||
// send out ping!
|
||||
for k := 0; k < MsgNum; k++ { // with k messages
|
||||
for k := range MsgNum { // with k messages
|
||||
msg := "ping"
|
||||
log.Debug("sending message", "local", s1.LocalPeer(), "msg", msg, "peer", p, "count", k)
|
||||
if _, err := stream.Write([]byte(msg)); err != nil {
|
||||
@@ -171,7 +171,7 @@ func subtestSwarm(t *testing.T, SwarmNum int, MsgNum int) {
|
||||
// receive pings
|
||||
msgCount := 0
|
||||
msg := make([]byte, 4)
|
||||
for k := 0; k < MsgNum; k++ { // with k messages
|
||||
for k := range MsgNum { // with k messages
|
||||
|
||||
// read from the stream
|
||||
if _, err := stream.Read(msg); err != nil {
|
||||
@@ -438,7 +438,7 @@ func TestStreamCount(t *testing.T) {
|
||||
streamAccepted <- struct{}{}
|
||||
})
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
for range 10 {
|
||||
str, err := s2.NewStream(context.Background(), s1.LocalPeer())
|
||||
require.NoError(t, err)
|
||||
str.Write([]byte("foobar"))
|
||||
|
||||
@@ -56,14 +56,14 @@ func TestAcceptMultipleConns(t *testing.T) {
|
||||
ln := createListener(t, u)
|
||||
defer ln.Close()
|
||||
|
||||
var toClose []io.Closer
|
||||
toClose := make([]io.Closer, 0, 20)
|
||||
defer func() {
|
||||
for _, c := range toClose {
|
||||
_ = c.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
for range 10 {
|
||||
cconn, err := dial(t, u, ln.Multiaddr(), id, &network.NullScope{})
|
||||
require.NoError(err)
|
||||
toClose = append(toClose, cconn)
|
||||
@@ -171,8 +171,8 @@ func TestListenerCloseClosesQueued(t *testing.T) {
|
||||
id, upgrader := createUpgrader(t)
|
||||
ln := createListener(t, upgrader)
|
||||
|
||||
var conns []transport.CapableConn
|
||||
for i := 0; i < 10; i++ {
|
||||
conns := make([]transport.CapableConn, 0, 10)
|
||||
for range 10 {
|
||||
conn, err := dial(t, upgrader, ln.Multiaddr(), id, &network.NullScope{})
|
||||
require.NoError(err)
|
||||
conns = append(conns, conn)
|
||||
@@ -228,7 +228,7 @@ func TestConcurrentAccept(t *testing.T) {
|
||||
// start num dials, which all block while setting up the muxer
|
||||
errCh := make(chan error, num)
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < num; i++ {
|
||||
for range num {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
@@ -153,7 +153,7 @@ func (an *AutoNAT) Start(h host.Host) error {
|
||||
an.host = h
|
||||
// Listen on event.EvtPeerProtocolsUpdated, event.EvtPeerConnectednessChanged
|
||||
// event.EvtPeerIdentificationCompleted to maintain our set of autonat supporting peers.
|
||||
sub, err := an.host.EventBus().Subscribe([]interface{}{
|
||||
sub, err := an.host.EventBus().Subscribe([]any{
|
||||
new(event.EvtPeerProtocolsUpdated),
|
||||
new(event.EvtPeerConnectednessChanged),
|
||||
new(event.EvtPeerIdentificationCompleted),
|
||||
|
||||
@@ -109,7 +109,7 @@ func TestClientRequest(t *testing.T) {
|
||||
|
||||
addrs := an.host.Addrs()
|
||||
addrbs := make([][]byte, len(addrs))
|
||||
for i := 0; i < len(addrs); i++ {
|
||||
for i := range addrs {
|
||||
addrbs[i] = addrs[i].Bytes()
|
||||
}
|
||||
|
||||
@@ -739,10 +739,7 @@ func FuzzClient(f *testing.F) {
|
||||
}
|
||||
ips = ips[1:]
|
||||
var x, y int64
|
||||
split := 128 / 8
|
||||
if len(ips) < split {
|
||||
split = len(ips)
|
||||
}
|
||||
split := min(len(ips), 128/8)
|
||||
var b [8]byte
|
||||
copy(b[:], ips[:split])
|
||||
x = int64(binary.LittleEndian.Uint64(b[:]))
|
||||
|
||||
@@ -27,7 +27,7 @@ import (
|
||||
|
||||
func newTestRequests(addrs []ma.Multiaddr, sendDialData bool) (reqs []Request) {
|
||||
reqs = make([]Request, len(addrs))
|
||||
for i := 0; i < len(addrs); i++ {
|
||||
for i := range addrs {
|
||||
reqs[i] = Request{Addr: addrs[i], SendDialData: sendDialData}
|
||||
}
|
||||
return
|
||||
@@ -106,7 +106,7 @@ func TestServerInvalidAddrsRejected(t *testing.T) {
|
||||
defer an.host.Close()
|
||||
|
||||
var addrs []ma.Multiaddr
|
||||
for i := 0; i < 100; i++ {
|
||||
for i := range 100 {
|
||||
addrs = append(addrs, ma.StringCast(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", 2000+i)))
|
||||
}
|
||||
addrs = append(addrs, c.host.Addrs()...)
|
||||
@@ -125,7 +125,7 @@ func TestServerInvalidAddrsRejected(t *testing.T) {
|
||||
defer an.host.Close()
|
||||
|
||||
var addrs []ma.Multiaddr
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
addrs = append(addrs, ma.StringCast(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", 2000+i)))
|
||||
}
|
||||
addrs = append(addrs, c.host.Addrs()...)
|
||||
@@ -215,7 +215,7 @@ func TestServerMaxConcurrentRequestsPerPeer(t *testing.T) {
|
||||
errChan := make(chan error)
|
||||
const n = 10
|
||||
// num concurrentRequests will stall and n will fail
|
||||
for i := 0; i < concurrentRequests+n; i++ {
|
||||
for range concurrentRequests + n {
|
||||
go func() {
|
||||
_, err := c.GetReachability(context.Background(), []Request{{Addr: c.host.Addrs()[0], SendDialData: false}})
|
||||
errChan <- err
|
||||
@@ -223,7 +223,7 @@ func TestServerMaxConcurrentRequestsPerPeer(t *testing.T) {
|
||||
}
|
||||
|
||||
// check N failures
|
||||
for i := 0; i < n; i++ {
|
||||
for i := range n {
|
||||
select {
|
||||
case err := <-errChan:
|
||||
require.Error(t, err)
|
||||
@@ -237,7 +237,7 @@ func TestServerMaxConcurrentRequestsPerPeer(t *testing.T) {
|
||||
|
||||
close(stallChan) // complete stalled requests
|
||||
// check concurrentRequests failures, as we won't send dial data
|
||||
for i := 0; i < concurrentRequests; i++ {
|
||||
for i := range concurrentRequests {
|
||||
select {
|
||||
case err := <-errChan:
|
||||
require.Error(t, err)
|
||||
@@ -284,7 +284,7 @@ func TestServerDataRequestJitter(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
for range 10 {
|
||||
st := time.Now()
|
||||
res, err := c.GetReachability(context.Background(), []Request{{Addr: quicAddr, SendDialData: true}, {Addr: tcpAddr}})
|
||||
took := time.Since(st)
|
||||
@@ -402,7 +402,7 @@ func TestRateLimiterConcurrentRequests(t *testing.T) {
|
||||
for concurrentRequests := 1; concurrentRequests <= N; concurrentRequests++ {
|
||||
cl := test.NewMockClock()
|
||||
r := rateLimiter{RPM: 10 * Peers * N, PerPeerRPM: 10 * Peers * N, DialDataRPM: 10 * Peers * N, now: cl.Now, MaxConcurrentRequestsPerPeer: concurrentRequests}
|
||||
for p := 0; p < Peers; p++ {
|
||||
for p := range Peers {
|
||||
for i := 0; i < concurrentRequests; i++ {
|
||||
require.True(t, r.Accept(peer.ID(fmt.Sprintf("peer-%d", p))))
|
||||
}
|
||||
@@ -422,21 +422,21 @@ func TestRateLimiterConcurrentRequests(t *testing.T) {
|
||||
|
||||
func TestRateLimiterStress(t *testing.T) {
|
||||
cl := test.NewMockClock()
|
||||
for i := 0; i < 10; i++ {
|
||||
for i := range 10 {
|
||||
r := rateLimiter{RPM: 20 + i, PerPeerRPM: 10 + i, DialDataRPM: i, MaxConcurrentRequestsPerPeer: 1, now: cl.Now}
|
||||
|
||||
peers := make([]peer.ID, 10+i)
|
||||
for i := 0; i < len(peers); i++ {
|
||||
for i := range peers {
|
||||
peers[i] = peer.ID(fmt.Sprintf("peer-%d", i))
|
||||
}
|
||||
peerSuccesses := make([]atomic.Int64, len(peers))
|
||||
var success, dialDataSuccesses atomic.Int64
|
||||
var wg sync.WaitGroup
|
||||
for k := 0; k < 5; k++ {
|
||||
for range 5 {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for i := 0; i < 2*60; i++ {
|
||||
for range 2 * 60 {
|
||||
for j, p := range peers {
|
||||
if r.Accept(p) {
|
||||
success.Add(1)
|
||||
|
||||
@@ -29,7 +29,7 @@ func (e relayError) Error() string {
|
||||
return e.err
|
||||
}
|
||||
|
||||
func newRelayError(t string, args ...interface{}) error {
|
||||
func newRelayError(t string, args ...any) error {
|
||||
return relayError{err: fmt.Sprintf(t, args...)}
|
||||
}
|
||||
|
||||
@@ -182,7 +182,7 @@ func (c *Client) connect(s network.Stream, dest peer.AddrInfo) (*Conn, error) {
|
||||
var stat network.ConnStats
|
||||
if limit := msg.GetLimit(); limit != nil {
|
||||
stat.Limited = true
|
||||
stat.Extra = make(map[interface{}]interface{})
|
||||
stat.Extra = make(map[any]any)
|
||||
stat.Extra[StatLimitDuration] = time.Duration(limit.GetDuration()) * time.Second
|
||||
stat.Extra[StatLimitData] = limit.GetData()
|
||||
}
|
||||
|
||||
@@ -70,7 +70,7 @@ func (c *Client) handleStreamV2(s network.Stream) {
|
||||
var stat network.ConnStats
|
||||
if limit := msg.GetLimit(); limit != nil {
|
||||
stat.Limited = true
|
||||
stat.Extra = make(map[interface{}]interface{})
|
||||
stat.Extra = make(map[any]any)
|
||||
stat.Extra[StatLimitDuration] = time.Duration(limit.GetDuration()) * time.Second
|
||||
stat.Extra[StatLimitData] = limit.GetData()
|
||||
}
|
||||
|
||||
@@ -40,7 +40,7 @@ func TestConstraints(t *testing.T) {
|
||||
res := infResources()
|
||||
res.MaxReservations = limit
|
||||
c := newConstraints(res)
|
||||
for i := 0; i < limit; i++ {
|
||||
for range limit {
|
||||
if err := c.Reserve(test.RandPeerIDFatal(t), randomIPv4Addr(t), expiry); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -77,7 +77,7 @@ func TestConstraints(t *testing.T) {
|
||||
res := infResources()
|
||||
res.MaxReservationsPerIP = limit
|
||||
c := newConstraints(res)
|
||||
for i := 0; i < limit; i++ {
|
||||
for range limit {
|
||||
if err := c.Reserve(test.RandPeerIDFatal(t), ip, expiry); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -104,7 +104,7 @@ func TestConstraints(t *testing.T) {
|
||||
res.MaxReservationsPerASN = limit
|
||||
c := newConstraints(res)
|
||||
const ipv6Prefix = "2a03:2880:f003:c07:face:b00c::"
|
||||
for i := 0; i < limit; i++ {
|
||||
for i := range limit {
|
||||
addr := getAddr(t, net.ParseIP(fmt.Sprintf("%s%d", ipv6Prefix, i+1)))
|
||||
if err := c.Reserve(test.RandPeerIDFatal(t), addr, expiry); err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -130,7 +130,7 @@ func TestConstraintsCleanup(t *testing.T) {
|
||||
MaxReservationsPerASN: math.MaxInt32,
|
||||
}
|
||||
c := newConstraints(res)
|
||||
for i := 0; i < limit; i++ {
|
||||
for range limit {
|
||||
if err := c.Reserve(test.RandPeerIDFatal(t), randomIPv4Addr(t), expiry); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -31,7 +31,7 @@ import (
|
||||
)
|
||||
|
||||
func getNetHosts(t *testing.T, _ context.Context, n int) (hosts []host.Host, upgraders []transport.Upgrader) {
|
||||
for i := 0; i < n; i++ {
|
||||
for range n {
|
||||
privk, pubk, err := crypto.GenerateKeyPair(crypto.Ed25519, 0)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -156,7 +156,7 @@ func TestBasicRelay(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for {
|
||||
var e interface{}
|
||||
var e any
|
||||
select {
|
||||
case e = <-sub.Out():
|
||||
case <-time.After(2 * time.Second):
|
||||
@@ -289,7 +289,7 @@ func TestRelayLimitData(t *testing.T) {
|
||||
defer close(rch)
|
||||
|
||||
buf := make([]byte, 1024)
|
||||
for i := 0; i < 3; i++ {
|
||||
for range 3 {
|
||||
n, err := s.Read(buf)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -347,7 +347,7 @@ func TestRelayLimitData(t *testing.T) {
|
||||
}
|
||||
|
||||
buf := make([]byte, 1024)
|
||||
for i := 0; i < 3; i++ {
|
||||
for range 3 {
|
||||
if _, err := rand.Read(buf); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -78,11 +78,11 @@ type EventTracer interface {
|
||||
}
|
||||
|
||||
type Event struct {
|
||||
Timestamp int64 // UNIX nanos
|
||||
Peer peer.ID // local peer ID
|
||||
Remote peer.ID // remote peer ID
|
||||
Type string // event type
|
||||
Evt interface{} // the actual event
|
||||
Timestamp int64 // UNIX nanos
|
||||
Peer peer.ID // local peer ID
|
||||
Remote peer.ID // remote peer ID
|
||||
Type string // event type
|
||||
Evt any // the actual event
|
||||
}
|
||||
|
||||
// Event Types
|
||||
|
||||
@@ -563,7 +563,7 @@ func (ids *idService) handleIdentifyResponse(s network.Stream, isPush bool) erro
|
||||
|
||||
func readAllIDMessages(r pbio.Reader, finalMsg proto.Message) error {
|
||||
mes := &pb.Identify{}
|
||||
for i := 0; i < maxMessages; i++ {
|
||||
for range maxMessages {
|
||||
switch err := r.ReadMsg(mes); err {
|
||||
case io.EOF:
|
||||
return nil
|
||||
@@ -585,7 +585,7 @@ func (ids *idService) updateSnapshot() (updated bool) {
|
||||
slices.SortFunc(addrs, func(a, b ma.Multiaddr) int { return bytes.Compare(a.Bytes(), b.Bytes()) })
|
||||
|
||||
usedSpace := len(ids.ProtocolVersion) + len(ids.UserAgent)
|
||||
for i := 0; i < len(protos); i++ {
|
||||
for i := range protos {
|
||||
usedSpace += len(protos[i])
|
||||
}
|
||||
addrs = trimHostAddrList(addrs, maxOwnIdentifyMsgSize-usedSpace-256) // 256 bytes of buffer
|
||||
@@ -702,11 +702,8 @@ func diff(a, b []protocol.ID) (added, removed []protocol.ID) {
|
||||
// This is O(n^2), but it's fine because the slices are small.
|
||||
for _, x := range b {
|
||||
var found bool
|
||||
for _, y := range a {
|
||||
if x == y {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
if slices.Contains(a, x) {
|
||||
found = true
|
||||
}
|
||||
if !found {
|
||||
added = append(added, x)
|
||||
@@ -714,11 +711,8 @@ func diff(a, b []protocol.ID) (added, removed []protocol.ID) {
|
||||
}
|
||||
for _, x := range a {
|
||||
var found bool
|
||||
for _, y := range b {
|
||||
if x == y {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
if slices.Contains(b, x) {
|
||||
found = true
|
||||
}
|
||||
if !found {
|
||||
removed = append(removed, x)
|
||||
|
||||
@@ -214,7 +214,7 @@ func TestIDService(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func assertCorrectEvtPeerIdentificationCompleted(t *testing.T, evtAny interface{}, other host.Host) {
|
||||
func assertCorrectEvtPeerIdentificationCompleted(t *testing.T, evtAny any, other host.Host) {
|
||||
t.Helper()
|
||||
evt := evtAny.(event.EvtPeerIdentificationCompleted)
|
||||
require.NotNil(t, evt.Conn)
|
||||
@@ -620,7 +620,7 @@ func TestLargeIdentifyMessage(t *testing.T) {
|
||||
|
||||
// add protocol strings to make the message larger
|
||||
// about 2K of protocol strings
|
||||
for i := 0; i < 500; i++ {
|
||||
for i := range 500 {
|
||||
r := protocol.ID(fmt.Sprintf("rand%d", i))
|
||||
h1.SetStreamHandler(r, func(network.Stream) {})
|
||||
h2.SetStreamHandler(r, func(network.Stream) {})
|
||||
@@ -717,7 +717,7 @@ func TestLargeIdentifyMessage(t *testing.T) {
|
||||
func randString(n int) string {
|
||||
chars := "abcdefghijklmnopqrstuvwxyz"
|
||||
buf := make([]byte, n)
|
||||
for i := 0; i < n; i++ {
|
||||
for i := range n {
|
||||
buf[i] = chars[rand.Intn(len(chars))]
|
||||
}
|
||||
return string(buf)
|
||||
@@ -731,7 +731,7 @@ func TestLargePushMessage(t *testing.T) {
|
||||
|
||||
// add protocol strings to make the message larger
|
||||
// about 3K of protocol strings
|
||||
for i := 0; i < 100; i++ {
|
||||
for i := range 100 {
|
||||
r := protocol.ID(fmt.Sprintf("%s-%d", randString(30), i))
|
||||
h1.SetStreamHandler(r, func(network.Stream) {})
|
||||
h2.SetStreamHandler(r, func(network.Stream) {})
|
||||
|
||||
@@ -43,7 +43,7 @@ func testPing(t *testing.T, ps *ping.PingService, p peer.ID) {
|
||||
defer cancel()
|
||||
ts := ps.Ping(pctx, p)
|
||||
|
||||
for i := 0; i < 5; i++ {
|
||||
for range 5 {
|
||||
select {
|
||||
case res := <-ts:
|
||||
require.NoError(t, res.Error)
|
||||
|
||||
@@ -167,7 +167,7 @@ func benchDataTransfer(b *benchenv, dataSize int64, m testMode) {
|
||||
|
||||
plainTextBufs := make([][]byte, 61)
|
||||
writeTos := make(map[int]io.Writer)
|
||||
for i := 0; i < len(plainTextBufs); i++ {
|
||||
for i := range plainTextBufs {
|
||||
var rbuf []byte
|
||||
// plaintext will be 2 KB to 62 KB
|
||||
plainTextBufs[i] = make([]byte, (i+2)*1024)
|
||||
|
||||
@@ -107,10 +107,7 @@ func (s *secureSession) Write(data []byte) (int, error) {
|
||||
defer pool.Put(cbuf)
|
||||
|
||||
for written < total {
|
||||
end := written + MaxPlaintextLength
|
||||
if end > total {
|
||||
end = total
|
||||
}
|
||||
end := min(written+MaxPlaintextLength, total)
|
||||
|
||||
b, err := s.encrypt(cbuf[:LengthPrefixLength], data[written:end])
|
||||
if err != nil {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user