mirror of https://github.com/daffainfo/nuclei.git
Merge pull request #371 from projectdiscovery/feature-race-conditions
Synced Race Condition Attackdev
commit
bf54f9f1d7
|
@ -146,6 +146,48 @@ func NewHTTPExecuter(options *HTTPOptions) (*HTTPExecuter, error) {
|
|||
return executer, nil
|
||||
}
|
||||
|
||||
func (e *HTTPExecuter) ExecuteRaceRequest(reqURL string) *Result {
|
||||
result := &Result{
|
||||
Matches: make(map[string]interface{}),
|
||||
Extractions: make(map[string]interface{}),
|
||||
}
|
||||
|
||||
dynamicvalues := make(map[string]interface{})
|
||||
|
||||
// verify if the URL is already being processed
|
||||
if e.bulkHTTPRequest.HasGenerator(reqURL) {
|
||||
return result
|
||||
}
|
||||
|
||||
e.bulkHTTPRequest.CreateGenerator(reqURL)
|
||||
|
||||
// Workers that keeps enqueuing new requests
|
||||
maxWorkers := e.bulkHTTPRequest.RaceNumberRequests
|
||||
swg := sizedwaitgroup.New(maxWorkers)
|
||||
for i := 0; i < e.bulkHTTPRequest.RaceNumberRequests; i++ {
|
||||
swg.Add()
|
||||
// base request
|
||||
request, err := e.bulkHTTPRequest.MakeHTTPRequest(reqURL, dynamicvalues, e.bulkHTTPRequest.Current(reqURL))
|
||||
if err != nil {
|
||||
result.Error = err
|
||||
return result
|
||||
}
|
||||
go func(httpRequest *requests.HTTPRequest) {
|
||||
defer swg.Done()
|
||||
|
||||
// If the request was built correctly then execute it
|
||||
err = e.handleHTTP(reqURL, httpRequest, dynamicvalues, result, "")
|
||||
if err != nil {
|
||||
result.Error = errors.Wrap(err, "could not handle http request")
|
||||
}
|
||||
}(request)
|
||||
}
|
||||
|
||||
swg.Wait()
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func (e *HTTPExecuter) ExecuteParallelHTTP(p progress.IProgress, reqURL string) *Result {
|
||||
result := &Result{
|
||||
Matches: make(map[string]interface{}),
|
||||
|
@ -196,7 +238,7 @@ func (e *HTTPExecuter) ExecuteParallelHTTP(p progress.IProgress, reqURL string)
|
|||
return result
|
||||
}
|
||||
|
||||
func (e *HTTPExecuter) ExecuteTurboHTTP(p progress.IProgress, reqURL string) *Result {
|
||||
func (e *HTTPExecuter) ExecuteTurboHTTP(reqURL string) *Result {
|
||||
result := &Result{
|
||||
Matches: make(map[string]interface{}),
|
||||
Extractions: make(map[string]interface{}),
|
||||
|
@ -209,7 +251,6 @@ func (e *HTTPExecuter) ExecuteTurboHTTP(p progress.IProgress, reqURL string) *Re
|
|||
return result
|
||||
}
|
||||
|
||||
remaining := e.bulkHTTPRequest.GetRequestCount()
|
||||
e.bulkHTTPRequest.CreateGenerator(reqURL)
|
||||
|
||||
// need to extract the target from the url
|
||||
|
@ -240,7 +281,6 @@ func (e *HTTPExecuter) ExecuteTurboHTTP(p progress.IProgress, reqURL string) *Re
|
|||
request, err := e.bulkHTTPRequest.MakeHTTPRequest(reqURL, dynamicvalues, e.bulkHTTPRequest.Current(reqURL))
|
||||
if err != nil {
|
||||
result.Error = err
|
||||
p.Drop(remaining)
|
||||
} else {
|
||||
swg.Add()
|
||||
go func(httpRequest *requests.HTTPRequest) {
|
||||
|
@ -254,7 +294,6 @@ func (e *HTTPExecuter) ExecuteTurboHTTP(p progress.IProgress, reqURL string) *Re
|
|||
if err != nil {
|
||||
e.traceLog.Request(e.template.ID, reqURL, "http", err)
|
||||
result.Error = errors.Wrap(err, "could not handle http request")
|
||||
p.Drop(remaining)
|
||||
} else {
|
||||
e.traceLog.Request(e.template.ID, reqURL, "http", nil)
|
||||
}
|
||||
|
@ -274,9 +313,15 @@ func (e *HTTPExecuter) ExecuteTurboHTTP(p progress.IProgress, reqURL string) *Re
|
|||
func (e *HTTPExecuter) ExecuteHTTP(p progress.IProgress, reqURL string) *Result {
|
||||
// verify if pipeline was requested
|
||||
if e.bulkHTTPRequest.Pipeline {
|
||||
return e.ExecuteTurboHTTP(p, reqURL)
|
||||
return e.ExecuteTurboHTTP(reqURL)
|
||||
}
|
||||
|
||||
// verify if a basic race condition was requested
|
||||
if e.bulkHTTPRequest.Race && e.bulkHTTPRequest.RaceNumberRequests > 0 {
|
||||
return e.ExecuteRaceRequest(reqURL)
|
||||
}
|
||||
|
||||
// verify if parallel elaboration was requested
|
||||
if e.bulkHTTPRequest.Threads > 0 {
|
||||
return e.ExecuteParallelHTTP(p, reqURL)
|
||||
}
|
||||
|
@ -602,7 +647,7 @@ func makeCheckRedirectFunc(followRedirects bool, maxRedirects int) checkRedirect
|
|||
func (e *HTTPExecuter) setCustomHeaders(r *requests.HTTPRequest) {
|
||||
for _, customHeader := range e.customHeaders {
|
||||
// This should be pre-computed somewhere and done only once
|
||||
tokens := strings.SplitN(customHeader, ":", 2)
|
||||
tokens := strings.SplitN(customHeader, ":", two)
|
||||
// if it's an invalid header skip it
|
||||
if len(tokens) < two {
|
||||
continue
|
||||
|
|
|
@ -4,17 +4,20 @@ import (
|
|||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Knetic/govaluate"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/extractors"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/generators"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/matchers"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/syncedreadcloser"
|
||||
"github.com/projectdiscovery/rawhttp"
|
||||
retryablehttp "github.com/projectdiscovery/retryablehttp-go"
|
||||
)
|
||||
|
@ -77,6 +80,11 @@ type BulkHTTPRequest struct {
|
|||
DisableAutoHostname bool `yaml:"disable-automatic-host-header,omitempty"`
|
||||
// DisableAutoContentLength Enable/Disable Content-Length header for unsafe raw requests
|
||||
DisableAutoContentLength bool `yaml:"disable-automatic-content-length-header,omitempty"`
|
||||
// Race determines if all the request have to be attempted at the same time
|
||||
// The minimum number fof requests is determined by threads
|
||||
Race bool `yaml:"race,omitempty"`
|
||||
// Number of same request to send in race condition attack
|
||||
RaceNumberRequests int `yaml:"race_count,omitempty"`
|
||||
}
|
||||
|
||||
// GetMatchersCondition returns the condition for the matcher
|
||||
|
@ -235,7 +243,15 @@ func (r *BulkHTTPRequest) handleRawWithPaylods(ctx context.Context, raw, baseURL
|
|||
}
|
||||
|
||||
// retryablehttp
|
||||
req, err := http.NewRequestWithContext(ctx, rawRequest.Method, rawRequest.FullURL, strings.NewReader(rawRequest.Data))
|
||||
var body io.ReadCloser
|
||||
body = ioutil.NopCloser(strings.NewReader(rawRequest.Data))
|
||||
if r.Race {
|
||||
// More or less this ensures that all requests hit the endpoint at the same approximated time
|
||||
// Todo: sync internally upon writing latest request byte
|
||||
body = syncedreadcloser.NewOpenGateWithTimeout(body, time.Duration(two)*time.Second)
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, rawRequest.Method, rawRequest.FullURL, body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -0,0 +1,97 @@
|
|||
package syncedreadcloser
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"time"
|
||||
)
|
||||
|
||||
// compatible with ReadSeeker
|
||||
type SyncedReadCloser struct {
|
||||
data []byte
|
||||
p int64
|
||||
length int64
|
||||
opengate chan struct{}
|
||||
enableBlocking bool
|
||||
}
|
||||
|
||||
func New(r io.ReadCloser) *SyncedReadCloser {
|
||||
var (
|
||||
s SyncedReadCloser
|
||||
err error
|
||||
)
|
||||
s.data, err = ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
r.Close()
|
||||
s.length = int64(len(s.data))
|
||||
s.opengate = make(chan struct{})
|
||||
s.enableBlocking = true
|
||||
|
||||
return &s
|
||||
}
|
||||
|
||||
func NewOpenGateWithTimeout(r io.ReadCloser, d time.Duration) *SyncedReadCloser {
|
||||
s := New(r)
|
||||
s.OpenGateAfter(d)
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *SyncedReadCloser) SetOpenGate(status bool) {
|
||||
s.enableBlocking = status
|
||||
}
|
||||
|
||||
func (s *SyncedReadCloser) OpenGate() {
|
||||
s.opengate <- struct{}{}
|
||||
}
|
||||
|
||||
func (s *SyncedReadCloser) OpenGateAfter(d time.Duration) {
|
||||
time.AfterFunc(d, func() {
|
||||
s.opengate <- struct{}{}
|
||||
})
|
||||
}
|
||||
|
||||
func (s *SyncedReadCloser) Seek(offset int64, whence int) (int64, error) {
|
||||
var err error
|
||||
switch whence {
|
||||
case io.SeekStart:
|
||||
s.p = 0
|
||||
case io.SeekCurrent:
|
||||
if s.p+offset < s.length {
|
||||
s.p += offset
|
||||
break
|
||||
}
|
||||
err = fmt.Errorf("offset is too big")
|
||||
case io.SeekEnd:
|
||||
if s.length-offset >= 0 {
|
||||
s.p = s.length - offset
|
||||
break
|
||||
}
|
||||
err = fmt.Errorf("offset is too big")
|
||||
}
|
||||
return s.p, err
|
||||
}
|
||||
|
||||
func (s *SyncedReadCloser) Read(p []byte) (n int, err error) {
|
||||
// If the data fits in the buffer blocks awaiting the sync instruction
|
||||
if s.p+int64(len(p)) >= s.length && s.enableBlocking {
|
||||
<-s.opengate
|
||||
}
|
||||
n = copy(p, s.data[s.p:])
|
||||
s.p += int64(n)
|
||||
if s.p == s.length {
|
||||
err = io.EOF
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (s *SyncedReadCloser) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *SyncedReadCloser) Len() int {
|
||||
return int(s.length)
|
||||
}
|
Loading…
Reference in New Issue