Synced Race Condition Attack

dev
Mzack9999 2020-10-19 02:57:30 +02:00
parent 00ce870239
commit a136b118ef
4 changed files with 115 additions and 1 deletions

View File

@ -20,6 +20,7 @@ require (
github.com/remeh/sizedwaitgroup v1.0.0
github.com/spaolacci/murmur3 v1.1.0
github.com/vbauerster/mpb/v5 v5.3.0
go.uber.org/atomic v1.7.0
go.uber.org/ratelimit v0.1.0
golang.org/x/net v0.0.0-20201016165138-7b1cca2348c0
gopkg.in/yaml.v2 v2.3.0

View File

@ -70,6 +70,8 @@ github.com/vbauerster/mpb v1.1.3 h1:IRgic8VFaURXkW0VxDLkNOiNaAgtw0okB2YIaVvJDI4=
github.com/vbauerster/mpb v3.4.0+incompatible h1:mfiiYw87ARaeRW6x5gWwYRUawxaW1tLAD8IceomUCNw=
github.com/vbauerster/mpb/v5 v5.3.0 h1:vgrEJjUzHaSZKDRRxul5Oh4C72Yy/5VEMb0em+9M0mQ=
github.com/vbauerster/mpb/v5 v5.3.0/go.mod h1:4yTkvAb8Cm4eylAp6t0JRq6pXDkFJ4krUlDqWYkakAs=
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/ratelimit v0.1.0 h1:U2AruXqeTb4Eh9sYQSTrMhH8Cb7M0Ian2ibBOnBcnAw=
go.uber.org/ratelimit v0.1.0/go.mod h1:2X8KaoNd1J0lZV+PxJk/5+DGbO/tpwLR1m++a7FnB/Y=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=

View File

@ -4,17 +4,20 @@ import (
"bufio"
"context"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"net/url"
"regexp"
"strings"
"time"
"github.com/Knetic/govaluate"
"github.com/projectdiscovery/nuclei/v2/pkg/extractors"
"github.com/projectdiscovery/nuclei/v2/pkg/generators"
"github.com/projectdiscovery/nuclei/v2/pkg/matchers"
"github.com/projectdiscovery/nuclei/v2/pkg/syncedreadcloser"
"github.com/projectdiscovery/rawhttp"
retryablehttp "github.com/projectdiscovery/retryablehttp-go"
)
@ -77,6 +80,9 @@ type BulkHTTPRequest struct {
DisableAutoHostname bool `yaml:"disable-automatic-host-header,omitempty"`
// DisableAutoContentLength Enable/Disable Content-Length header for unsafe raw requests
DisableAutoContentLength bool `yaml:"disable-automatic-content-length-header,omitempty"`
// Race determines if all the request have to be attempted at the same time
// The minumum number fof requests is determined by threads
Race bool `yaml:"race,omitempty"`
}
// GetMatchersCondition returns the condition for the matcher
@ -235,7 +241,15 @@ func (r *BulkHTTPRequest) handleRawWithPaylods(ctx context.Context, raw, baseURL
}
// retryablehttp
req, err := http.NewRequestWithContext(ctx, rawRequest.Method, rawRequest.FullURL, strings.NewReader(rawRequest.Data))
var body io.ReadCloser
body = ioutil.NopCloser(strings.NewReader(rawRequest.Data))
if r.Race {
// More or less this ensures that all requests hit the endpoint at the same approximated time
// Todo: sync internally upon writing latest request byte
body = syncedreadcloser.NewOpenGateWithTimeout(body, time.Duration(2)*time.Second)
}
req, err := http.NewRequestWithContext(ctx, rawRequest.Method, rawRequest.FullURL, body)
if err != nil {
return nil, err
}

View File

@ -0,0 +1,97 @@
package syncedreadcloser
import (
"fmt"
"io"
"io/ioutil"
"time"
)
// compatible with ReadSeeker
type SyncedReadCloser struct {
data []byte
p int64
length int64
opengate chan struct{}
enableBlocking bool
}
func New(r io.ReadCloser) *SyncedReadCloser {
var (
s SyncedReadCloser
err error
)
s.data, err = ioutil.ReadAll(r)
if err != nil {
return nil
}
r.Close()
s.length = int64(len(s.data))
s.opengate = make(chan struct{})
s.enableBlocking = true
return &s
}
func NewOpenGateWithTimeout(r io.ReadCloser, d time.Duration) *SyncedReadCloser {
s := New(r)
s.OpenGateAfter(d)
return s
}
func (s *SyncedReadCloser) SetOpenGate(status bool) {
s.enableBlocking = status
}
func (s *SyncedReadCloser) OpenGate() {
s.opengate <- struct{}{}
}
func (s *SyncedReadCloser) OpenGateAfter(d time.Duration) {
time.AfterFunc(d, func() {
s.opengate <- struct{}{}
})
}
func (s *SyncedReadCloser) Seek(offset int64, whence int) (int64, error) {
var err error
switch whence {
case io.SeekStart:
s.p = 0
case io.SeekCurrent:
if s.p+offset < s.length {
s.p += offset
break
}
err = fmt.Errorf("offset is too big")
case io.SeekEnd:
if s.length-offset >= 0 {
s.p = s.length - offset
break
}
err = fmt.Errorf("offset is too big")
}
return s.p, err
}
func (s *SyncedReadCloser) Read(p []byte) (n int, err error) {
// If the data fits in the buffer blocks awaiting the sync instruction
if s.p+int64(len(p)) >= s.length && s.enableBlocking {
<-s.opengate
}
n = copy(p, s.data[s.p:])
s.p += int64(n)
if s.p == s.length {
err = io.EOF
}
return n, err
}
func (s *SyncedReadCloser) Close() error {
return nil
}
func (s *SyncedReadCloser) Len() int {
return int(s.length)
}