Skip to content

Commit

Permalink
code review changes
Browse files Browse the repository at this point in the history
- maps.Keys()
- docs
- parallel_best resolver String()
- remove expensive & unnecessary log
- return err from retry failed
- pre-alloc pickRandom chosenResolvers
- fix context comment
- upstreamResolverStatus.resolve(): add resolver to err
  • Loading branch information
DerRockWolf committed Nov 18, 2023
1 parent 7ce9375 commit deee9dc
Show file tree
Hide file tree
Showing 4 changed files with 28 additions and 53 deletions.
3 changes: 1 addition & 2 deletions docs/configuration.md
Original file line number Diff line number Diff line change
Expand Up @@ -146,8 146,7 @@ Currently available strategies:
- `random`: blocky picks one random (weighted) resolver from the upstream group for each query and if successful, returns its response.
If the selected resolver fails to respond, a second one is picked to which the query is sent.
The weighting is identical to the `parallel_best` strategy.
Although the `random` strategy might be slower than the `parallel_best` strategy, it offers a larger increase of privacy.
(When using 10 upstream servers, each upstream will get on average 10% of the DNS requests)
Although the `random` strategy might be slower than the `parallel_best` strategy, it offers more privacy since each request is sent to a single upstream.
- `strict`: blocky forwards the request in a strict order. If the first upstream does not respond, the second is asked, and so on.

!!! example
Expand Down
22 changes: 5 additions & 17 deletions resolver/blocking_resolver.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,13 4,15 @@ import (
"context"
"fmt"
"net"
"slices"
"sort"
"strings"
"sync"
"sync/atomic"
"time"

"github.com/0xERR0R/blocky/cache/expirationcache"
"golang.org/x/exp/maps"

"github.com/hashicorp/go-multierror"

Expand Down Expand Up @@ -206,21 208,11 @@ func (r *BlockingResolver) RefreshLists() error {
return err.ErrorOrNil()
}

//nolint:prealloc
func (r *BlockingResolver) retrieveAllBlockingGroups() []string {
groups := make(map[string]bool, len(r.cfg.BlackLists))

for group := range r.cfg.BlackLists {
groups[group] = true
}

var result []string
for k := range groups {
result = append(result, k)
}
result := maps.Keys(r.cfg.BlackLists)

result = append(result, "default")
sort.Strings(result)
slices.Sort(result)

return result
}
Expand Down Expand Up @@ -615,11 607,7 @@ func (r *BlockingResolver) queryForFQIdentifierIPs(identifier string) (*[]net.IP
}

func (r *BlockingResolver) initFQDNIPCache() {
identifiers := make([]string, 0)

for identifier := range r.clientGroupsBlock {
identifiers = append(identifiers, identifier)
}
identifiers := maps.Keys(r.clientGroupsBlock)

for _, identifier := range identifiers {
if isFQDN(identifier) {
Expand Down
9 changes: 2 additions & 7 deletions resolver/bootstrap.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 17,7 @@ import (
"github.com/hashicorp/go-multierror"
"github.com/miekg/dns"
"github.com/sirupsen/logrus"
"golang.org/x/exp/maps"
)

const (
Expand Down Expand Up @@ -301,13 302,7 @@ func newBootstrapedResolvers(b *Bootstrap, cfg config.BootstrapDNSConfig) (boots
}

func (br bootstrapedResolvers) Resolvers() []Resolver {
resolvers := make([]Resolver, 0, len(br))

for resolver := range br {
resolvers = append(resolvers, resolver)
}

return resolvers
return maps.Keys(br)
}

type IPSet struct {
Expand Down
47 changes: 20 additions & 27 deletions resolver/parallel_best_resolver.go
Original file line number Diff line number Diff line change
Expand Up @@ -56,8 56,12 @@ func newUpstreamResolverStatus(resolver Resolver) *upstreamResolverStatus {
func (r *upstreamResolverStatus) resolve(req *model.Request, ch chan<- requestResponse) {
resp, err := r.resolver.Resolve(req)
if err != nil {
// update the last error time
r.lastErrorTime.Store(time.Now())
// Ignore `Canceled`: resolver lost the race, not an error
if !errors.Is(err, context.Canceled) {
r.lastErrorTime.Store(time.Now())
}

err = fmt.Errorf("%s: %w", r.resolver, err)
}

ch <- requestResponse{
Expand Down Expand Up @@ -134,14 138,14 @@ func (r *ParallelBestResolver) Name() string {
return r.String()
}

// TODO: add resolverCount & retryWithDifferentResolver to output
func (r *ParallelBestResolver) String() string {
result := make([]string, len(r.resolvers))
for i, s := range r.resolvers {
result[i] = fmt.Sprintf("%s", s.resolver)
}

return fmt.Sprintf("%s upstreams '%s (%s)'", parallelResolverType, r.groupName, strings.Join(result, ","))
return fmt.Sprintf("%s (resolverCount: %d, retryWithDifferentResolver: %t) upstreams '%s (%s)'",
parallelResolverType, r.resolverCount, r.retryWithDifferentResolver, r.groupName, strings.Join(result, ","))
}

// Resolve sends the query request to multiple upstream resolvers and returns the fastest result
Expand Down Expand Up @@ -170,16 174,6 @@ func (r *ParallelBestResolver) Resolve(request *model.Request) (*model.Response,
resolvers := pickRandom(r.resolvers, r.resolverCount)
ch := make(chan requestResponse, len(resolvers))

// build usedResolver log string
var usedResolvers string
for _, resolver := range resolvers {
usedResolvers = fmt.Sprintf("%q,", resolver.resolver)
}

usedResolvers = strings.TrimSuffix(usedResolvers, ",")

logger.Debug("using " usedResolvers " as resolver")

for _, resolver := range resolvers {
logger.WithField("resolver", resolver.resolver).Debug("delegating to resolver")

Expand All @@ -192,8 186,7 @@ func (r *ParallelBestResolver) Resolve(request *model.Request) (*model.Response,
}

if !r.retryWithDifferentResolver {
return nil, fmt.Errorf("resolution was not successful, used resolvers: %s errors: %v",
usedResolvers, collectedErrors)
return nil, fmt.Errorf("resolution failed: %w", errors.Join(collectedErrors...))
}

return r.retryWithDifferent(logger, request, resolvers)
Expand All @@ -202,21 195,21 @@ func (r *ParallelBestResolver) Resolve(request *model.Request) (*model.Response,
func evaluateResponses(
ctx context.Context, logger *logrus.Entry, ch chan requestResponse, resolvers []*upstreamResolverStatus,
) (*model.Response, []error) {
var collectedErrors []error
collectedErrors := make([]error, 0, len(resolvers))

for len(collectedErrors) < len(resolvers) {
select {
case <-ctx.Done():
// this context is currently only set & canceled if resolverCount == 1
field := logrus.Fields{"resolver": resolvers[0].resolver}
// this context currently only has a deadline when resolverCount == 1
if errors.Is(ctx.Err(), context.DeadlineExceeded) {
logger.WithFields(field).Debug("upstream exceeded timeout, trying other upstream")
logger.WithField("resolver", resolvers[0].resolver).
Debug("upstream exceeded timeout, trying other upstream")
resolvers[0].lastErrorTime.Store(time.Now())
}
case result := <-ch:
if result.err != nil {
logger.Debug("resolution failed from resolver, cause: ", result.err)
collectedErrors = append(collectedErrors, result.err)
collectedErrors = append(collectedErrors, fmt.Errorf("resolver: %q error: %w", *result.resolver, result.err))
} else {
logger.WithFields(logrus.Fields{
"resolver": *result.resolver,
Expand Down Expand Up @@ -244,9 237,7 @@ func (r *ParallelBestResolver) retryWithDifferent(

result := <-ch
if result.err != nil {
logger.Debug("resolution failed from resolver, cause: ", result.err)

return nil, errors.New("resolution was not successful, no resolver returned answer in time")
return nil, fmt.Errorf("resolution retry failed: %w", result.err)
}

logger.WithFields(logrus.Fields{
Expand All @@ -258,12 249,14 @@ func (r *ParallelBestResolver) retryWithDifferent(
}

// pickRandom picks n (resolverCount) different random resolvers from the given resolver pool
func pickRandom(resolvers []*upstreamResolverStatus, resolverCount int) (choosenResolvers []*upstreamResolverStatus) {
func pickRandom(resolvers []*upstreamResolverStatus, resolverCount int) []*upstreamResolverStatus {
chosenResolvers := make([]*upstreamResolverStatus, 0, resolverCount)

for i := 0; i < resolverCount; i {
choosenResolvers = append(choosenResolvers, weightedRandom(resolvers, choosenResolvers))
chosenResolvers = append(chosenResolvers, weightedRandom(resolvers, chosenResolvers))
}

return
return chosenResolvers
}

func weightedRandom(in, excludedResolvers []*upstreamResolverStatus) *upstreamResolverStatus {
Expand Down

0 comments on commit deee9dc

Please sign in to comment.