Merge pull request #174 from projectdiscovery/bugfix-engine-rework

engine parallelism fix
dev
Mzack9999 2020-07-25 22:48:48 +02:00 committed by GitHub
commit 5eb92abb80
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 372 additions and 126 deletions

View File

@ -7,7 +7,6 @@ require (
github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535
github.com/blang/semver v3.5.1+incompatible github.com/blang/semver v3.5.1+incompatible
github.com/d5/tengo/v2 v2.6.0 github.com/d5/tengo/v2 v2.6.0
github.com/elastic/go-lumber v0.1.0
github.com/google/go-github/v32 v32.1.0 github.com/google/go-github/v32 v32.1.0
github.com/json-iterator/go v1.1.10 github.com/json-iterator/go v1.1.10
github.com/karrick/godirwalk v1.15.6 github.com/karrick/godirwalk v1.15.6

View File

@ -11,8 +11,6 @@ github.com/d5/tengo/v2 v2.6.0 h1:D0cJtpiBzaLJ/Smv6nnUc/LIfO46oKwDx85NZtIRNRI=
github.com/d5/tengo/v2 v2.6.0/go.mod h1:XRGjEs5I9jYIKTxly6HCF8oiiilk5E/RYXOZ5b0DZC8= github.com/d5/tengo/v2 v2.6.0/go.mod h1:XRGjEs5I9jYIKTxly6HCF8oiiilk5E/RYXOZ5b0DZC8=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/elastic/go-lumber v0.1.0 h1:HUjpyg36v2HoKtXlEC53EJ3zDFiDRn65d7B8dBHNius=
github.com/elastic/go-lumber v0.1.0/go.mod h1:8YvjMIRYypWuPvpxx7WoijBYdbB7XIh/9FqSYQZTtxQ=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/google/go-github v17.0.0+incompatible h1:N0LgJ1j65A7kfXrZnUDaYCs/Sf4rEjNlfyDHW9dolSY= github.com/google/go-github v17.0.0+incompatible h1:N0LgJ1j65A7kfXrZnUDaYCs/Sf4rEjNlfyDHW9dolSY=
github.com/google/go-github/v32 v32.1.0 h1:GWkQOdXqviCPx7Q7Fj+KyPoGm4SwHRh8rheoPhd27II= github.com/google/go-github/v32 v32.1.0 h1:GWkQOdXqviCPx7Q7Fj+KyPoGm4SwHRh8rheoPhd27II=

View File

@ -50,7 +50,7 @@ func ParseOptions() *Options {
options := &Options{} options := &Options{}
flag.StringVar(&options.Target, "target", "", "Target is a single target to scan using template") flag.StringVar(&options.Target, "target", "", "Target is a single target to scan using template")
flag.Var(&options.Templates, "t","Template input file/files to run on host. Can be used multiple times.") flag.Var(&options.Templates, "t", "Template input file/files to run on host. Can be used multiple times.")
flag.StringVar(&options.Targets, "l", "", "List of URLs to run templates on") flag.StringVar(&options.Targets, "l", "", "List of URLs to run templates on")
flag.StringVar(&options.Output, "o", "", "File to write output to (optional)") flag.StringVar(&options.Output, "o", "", "File to write output to (optional)")
flag.StringVar(&options.ProxyURL, "proxy-url", "", "URL of the proxy server") flag.StringVar(&options.ProxyURL, "proxy-url", "", "URL of the proxy server")
@ -59,7 +59,7 @@ func ParseOptions() *Options {
flag.BoolVar(&options.Version, "version", false, "Show version of nuclei") flag.BoolVar(&options.Version, "version", false, "Show version of nuclei")
flag.BoolVar(&options.Verbose, "v", false, "Show Verbose output") flag.BoolVar(&options.Verbose, "v", false, "Show Verbose output")
flag.BoolVar(&options.NoColor, "nC", false, "Don't Use colors in output") flag.BoolVar(&options.NoColor, "nC", false, "Don't Use colors in output")
flag.IntVar(&options.Threads, "c", 10, "Number of concurrent requests to make") flag.IntVar(&options.Threads, "c", 50, "Number of concurrent requests to make")
flag.IntVar(&options.Timeout, "timeout", 5, "Time to wait in seconds before timeout") flag.IntVar(&options.Timeout, "timeout", 5, "Time to wait in seconds before timeout")
flag.IntVar(&options.Retries, "retries", 1, "Number of times to retry a failed request") flag.IntVar(&options.Retries, "retries", 1, "Number of times to retry a failed request")
flag.Var(&options.CustomHeaders, "H", "Custom Header.") flag.Var(&options.CustomHeaders, "H", "Custom Header.")

View File

@ -17,6 +17,7 @@ import (
"github.com/d5/tengo/v2/stdlib" "github.com/d5/tengo/v2/stdlib"
"github.com/karrick/godirwalk" "github.com/karrick/godirwalk"
"github.com/projectdiscovery/gologger" "github.com/projectdiscovery/gologger"
"github.com/projectdiscovery/nuclei/v2/pkg/atomicboolean"
"github.com/projectdiscovery/nuclei/v2/pkg/executer" "github.com/projectdiscovery/nuclei/v2/pkg/executer"
"github.com/projectdiscovery/nuclei/v2/pkg/requests" "github.com/projectdiscovery/nuclei/v2/pkg/requests"
"github.com/projectdiscovery/nuclei/v2/pkg/templates" "github.com/projectdiscovery/nuclei/v2/pkg/templates"
@ -33,6 +34,7 @@ type Runner struct {
templatesConfig *nucleiConfig templatesConfig *nucleiConfig
// options contains configuration options for runner // options contains configuration options for runner
options *Options options *Options
limiter chan struct{}
} }
// New creates a new client for running enumeration process. // New creates a new client for running enumeration process.
@ -80,6 +82,9 @@ func New(options *Options) (*Runner, error) {
} }
runner.output = output runner.output = output
} }
runner.limiter = make(chan struct{}, options.Threads)
return runner, nil return runner, nil
} }
@ -225,33 +230,37 @@ func (r *Runner) RunEnumeration() {
gologger.Fatalf("Error, no templates were found.\n") gologger.Fatalf("Error, no templates were found.\n")
} }
// run with the specified templates var (
var results bool wgtemplates sync.WaitGroup
results atomicboolean.AtomBool
)
for _, match := range allTemplates { for _, match := range allTemplates {
t, err := r.parse(match) wgtemplates.Add(1)
switch t.(type) { go func(match string) {
case *templates.Template: defer wgtemplates.Done()
template := t.(*templates.Template) t, err := r.parse(match)
for _, request := range template.RequestsDNS { switch t.(type) {
dnsResults := r.processTemplateRequest(template, request) case *templates.Template:
if dnsResults { template := t.(*templates.Template)
results = dnsResults for _, request := range template.RequestsDNS {
results.Or(r.processTemplateRequest(template, request))
} }
} for _, request := range template.BulkRequestsHTTP {
for _, request := range template.BulkRequestsHTTP { results.Or(r.processTemplateRequest(template, request))
httpResults := r.processTemplateRequest(template, request)
if httpResults {
results = httpResults
} }
case *workflows.Workflow:
workflow := t.(*workflows.Workflow)
r.ProcessWorkflowWithList(workflow)
default:
gologger.Errorf("Could not parse file '%s': %s\n", match, err)
} }
case *workflows.Workflow: }(match)
workflow := t.(*workflows.Workflow)
r.ProcessWorkflowWithList(workflow)
default:
gologger.Errorf("Could not parse file '%s': %s\n", match, err)
}
} }
if !results {
wgtemplates.Wait()
if !results.Get() {
if r.output != nil { if r.output != nil {
outputFile := r.output.Name() outputFile := r.output.Name()
r.output.Close() r.output.Close()
@ -276,9 +285,9 @@ func (r *Runner) processTemplateRequest(template *templates.Template, request in
if err != nil { if err != nil {
gologger.Fatalf("Could not open targets file '%s': %s\n", r.options.Targets, err) gologger.Fatalf("Could not open targets file '%s': %s\n", r.options.Targets, err)
} }
results := r.processTemplateWithList(template, request, file) defer file.Close()
file.Close()
return results return r.processTemplateWithList(template, request, file)
} }
// processDomain processes the list with a template // processDomain processes the list with a template
@ -331,48 +340,42 @@ func (r *Runner) processTemplateWithList(template *templates.Template, request i
return false return false
} }
limiter := make(chan struct{}, r.options.Threads) var globalresult atomicboolean.AtomBool
wg := &sync.WaitGroup{}
var wg sync.WaitGroup
scanner := bufio.NewScanner(reader) scanner := bufio.NewScanner(reader)
for scanner.Scan() { for scanner.Scan() {
text := scanner.Text() text := scanner.Text()
if text == "" { if text == "" {
continue continue
} }
limiter <- struct{}{}
r.limiter <- struct{}{}
wg.Add(1) wg.Add(1)
go func(URL string) { go func(URL string) {
defer wg.Done()
var result executer.Result var result executer.Result
if httpExecuter != nil { if httpExecuter != nil {
result = httpExecuter.ExecuteHTTP(URL) result = httpExecuter.ExecuteHTTP(URL)
globalresult.Or(result.GotResults)
} }
if dnsExecuter != nil { if dnsExecuter != nil {
result = dnsExecuter.ExecuteDNS(URL) result = dnsExecuter.ExecuteDNS(URL)
globalresult.Or(result.GotResults)
} }
if result.Error != nil { if result.Error != nil {
gologger.Warningf("Could not execute step: %s\n", result.Error) gologger.Warningf("Could not execute step: %s\n", result.Error)
} }
<-limiter <-r.limiter
wg.Done()
}(text) }(text)
} }
close(limiter)
wg.Wait() wg.Wait()
// See if we got any results from the executers // See if we got any results from the executers
var results bool return globalresult.Get()
if httpExecuter != nil {
results = httpExecuter.Results
}
if dnsExecuter != nil {
if !results {
results = dnsExecuter.Results
}
}
return results
} }
// ProcessWorkflowWithList coming from stdin or list of targets // ProcessWorkflowWithList coming from stdin or list of targets

View File

@ -0,0 +1,38 @@
package atomicboolean
import (
"sync"
)
type AtomBool struct {
sync.RWMutex
flag bool
}
func (b *AtomBool) Or(value bool) {
b.Lock()
defer b.Unlock()
b.flag = b.flag || value
}
func (b *AtomBool) And(value bool) {
b.Lock()
defer b.Unlock()
b.flag = b.flag && value
}
func (b *AtomBool) Set(value bool) {
b.Lock()
defer b.Unlock()
b.flag = value
}
func (b *AtomBool) Get() bool {
b.RLock()
defer b.RUnlock()
return b.flag
}

View File

@ -110,7 +110,7 @@ func (e *DNSExecuter) ExecuteDNS(URL string) (result Result) {
// write the first output then move to next matcher. // write the first output then move to next matcher.
if matcherCondition == matchers.ORCondition && len(e.dnsRequest.Extractors) == 0 { if matcherCondition == matchers.ORCondition && len(e.dnsRequest.Extractors) == 0 {
e.writeOutputDNS(domain, matcher, nil) e.writeOutputDNS(domain, matcher, nil)
e.Results = true result.GotResults = true
} }
} }
} }
@ -120,7 +120,9 @@ func (e *DNSExecuter) ExecuteDNS(URL string) (result Result) {
var extractorResults []string var extractorResults []string
for _, extractor := range e.dnsRequest.Extractors { for _, extractor := range e.dnsRequest.Extractors {
for match := range extractor.ExtractDNS(resp) { for match := range extractor.ExtractDNS(resp) {
extractorResults = append(extractorResults, match) if !extractor.Internal {
extractorResults = append(extractorResults, match)
}
} }
} }
@ -128,7 +130,6 @@ func (e *DNSExecuter) ExecuteDNS(URL string) (result Result) {
// AND or if we have extractors for the mechanism too. // AND or if we have extractors for the mechanism too.
if len(e.dnsRequest.Extractors) > 0 || matcherCondition == matchers.ANDCondition { if len(e.dnsRequest.Extractors) > 0 || matcherCondition == matchers.ANDCondition {
e.writeOutputDNS(domain, nil, extractorResults) e.writeOutputDNS(domain, nil, extractorResults)
e.Results = true
} }
return return

View File

@ -86,7 +86,7 @@ func NewHTTPExecuter(options *HTTPOptions) (*HTTPExecuter, error) {
executer := &HTTPExecuter{ executer := &HTTPExecuter{
debug: options.Debug, debug: options.Debug,
jsonOutput: options.JSON, jsonOutput: options.JSON,
jsonRequest: options.JSONRequests, jsonRequest: options.JSONRequests,
httpClient: client, httpClient: client,
template: options.Template, template: options.Template,
bulkHttpRequest: options.BulkHttpRequest, bulkHttpRequest: options.BulkHttpRequest,
@ -95,6 +95,7 @@ func NewHTTPExecuter(options *HTTPOptions) (*HTTPExecuter, error) {
customHeaders: options.CustomHeaders, customHeaders: options.CustomHeaders,
CookieJar: options.CookieJar, CookieJar: options.CookieJar,
} }
return executer, nil return executer, nil
} }
@ -104,10 +105,14 @@ func (e *HTTPExecuter) ExecuteHTTP(URL string) (result Result) {
result.Extractions = make(map[string]interface{}) result.Extractions = make(map[string]interface{})
dynamicvalues := make(map[string]interface{}) dynamicvalues := make(map[string]interface{})
e.bulkHttpRequest.Reset() // verify if the URL is already being processed
if e.bulkHttpRequest.HasGenerator(URL) {
return
}
for e.bulkHttpRequest.Next() && !result.Done { e.bulkHttpRequest.CreateGenerator(URL)
httpRequest, err := e.bulkHttpRequest.MakeHTTPRequest(URL, dynamicvalues, e.bulkHttpRequest.Current()) for e.bulkHttpRequest.Next(URL) && !result.Done {
httpRequest, err := e.bulkHttpRequest.MakeHTTPRequest(URL, dynamicvalues, e.bulkHttpRequest.Current(URL))
if err != nil { if err != nil {
result.Error = errors.Wrap(err, "could not make http request") result.Error = errors.Wrap(err, "could not make http request")
return return
@ -119,7 +124,7 @@ func (e *HTTPExecuter) ExecuteHTTP(URL string) (result Result) {
return return
} }
e.bulkHttpRequest.Increment() e.bulkHttpRequest.Increment(URL)
} }
gologger.Verbosef("Sent HTTP request to %s\n", "http-request", URL) gologger.Verbosef("Sent HTTP request to %s\n", "http-request", URL)
@ -186,25 +191,28 @@ func (e *HTTPExecuter) handleHTTP(URL string, request *requests.HttpRequest, dyn
} else { } else {
// If the matcher has matched, and its an OR // If the matcher has matched, and its an OR
// write the first output then move to next matcher. // write the first output then move to next matcher.
if matcherCondition == matchers.ORCondition && len(e.bulkHttpRequest.Extractors) == 0 { if matcherCondition == matchers.ORCondition {
result.Matches[matcher.Name] = nil result.Matches[matcher.Name] = nil
// probably redundant but ensures we snapshot current payload values when matchers are valid // probably redundant but ensures we snapshot current payload values when matchers are valid
result.Meta = request.Meta result.Meta = request.Meta
e.writeOutputHTTP(request, resp, body, matcher, nil) e.writeOutputHTTP(request, resp, body, matcher, nil)
e.Results = true result.GotResults = true
} }
} }
} }
// All matchers have successfully completed so now start with the // All matchers have successfully completed so now start with the
// next task which is extraction of input from matchers. // next task which is extraction of input from matchers.
var extractorResults []string var extractorResults, outputExtractorResults []string
for _, extractor := range e.bulkHttpRequest.Extractors { for _, extractor := range e.bulkHttpRequest.Extractors {
for match := range extractor.Extract(resp, body, headers) { for match := range extractor.Extract(resp, body, headers) {
if _, ok := dynamicvalues[extractor.Name]; !ok { if _, ok := dynamicvalues[extractor.Name]; !ok {
dynamicvalues[extractor.Name] = match dynamicvalues[extractor.Name] = match
} }
extractorResults = append(extractorResults, match) extractorResults = append(extractorResults, match)
if !extractor.Internal {
outputExtractorResults = append(outputExtractorResults, match)
}
} }
// probably redundant but ensures we snapshot current payload values when extractors are valid // probably redundant but ensures we snapshot current payload values when extractors are valid
result.Meta = request.Meta result.Meta = request.Meta
@ -213,9 +221,9 @@ func (e *HTTPExecuter) handleHTTP(URL string, request *requests.HttpRequest, dyn
// Write a final string of output if matcher type is // Write a final string of output if matcher type is
// AND or if we have extractors for the mechanism too. // AND or if we have extractors for the mechanism too.
if len(e.bulkHttpRequest.Extractors) > 0 || matcherCondition == matchers.ANDCondition { if len(outputExtractorResults) > 0 || matcherCondition == matchers.ANDCondition {
e.writeOutputHTTP(request, resp, body, nil, extractorResults) e.writeOutputHTTP(request, resp, body, nil, outputExtractorResults)
e.Results = true result.GotResults = true
} }
return nil return nil

View File

@ -32,5 +32,6 @@ func (e *Extractor) CompileExtractors() error {
} else { } else {
e.part = BodyPart e.part = BodyPart
} }
return nil return nil
} }

View File

@ -25,6 +25,9 @@ type Extractor struct {
Part string `yaml:"part,omitempty"` Part string `yaml:"part,omitempty"`
// part is the part of the request to match // part is the part of the request to match
part Part part Part
// Internal defines if this is used internally
Internal bool `yaml:"internal,omitempty"`
} }
// ExtractorType is the type of the extractor specified // ExtractorType is the type of the extractor specified

View File

@ -52,14 +52,8 @@ type BulkHTTPRequest struct {
// MaxRedirects is the maximum number of redirects that should be followed. // MaxRedirects is the maximum number of redirects that should be followed.
MaxRedirects int `yaml:"max-redirects,omitempty"` MaxRedirects int `yaml:"max-redirects,omitempty"`
// Raw contains raw requests // Raw contains raw requests
Raw []string `yaml:"raw,omitempty"` Raw []string `yaml:"raw,omitempty"`
positionPath int gsfm *GeneratorFSM
positionRaw int
generator func(payloads map[string][]string) (out chan map[string]interface{})
currentPayloads map[string]interface{}
basePayloads map[string][]string
generatorChan chan map[string]interface{}
currentGeneratorValue map[string]interface{}
} }
// GetMatchersCondition returns the condition for the matcher // GetMatchersCondition returns the condition for the matcher
@ -121,16 +115,20 @@ func (r *BulkHTTPRequest) makeHTTPRequestFromModel(baseURL string, data string,
return &HttpRequest{Request: request}, nil return &HttpRequest{Request: request}, nil
} }
func (r *BulkHTTPRequest) StartGenerator() { func (r *BulkHTTPRequest) InitGenerator() {
r.generatorChan = r.generator(r.basePayloads) r.gsfm = NewGeneratorFSM(r.attackType, r.Payloads, r.Path, r.Raw)
} }
func (r *BulkHTTPRequest) PickOne() { func (r *BulkHTTPRequest) CreateGenerator(URL string) {
var ok bool r.gsfm.Add(URL)
r.currentGeneratorValue, ok = <-r.generatorChan }
if !ok {
r.generator = nil func (r *BulkHTTPRequest) HasGenerator(URL string) bool {
} return r.gsfm.Has(URL)
}
func (r *BulkHTTPRequest) ReadOne(URL string) {
r.gsfm.ReadOne(URL)
} }
// makeHTTPRequestFromRaw creates a *http.Request from a raw request // makeHTTPRequestFromRaw creates a *http.Request from a raw request
@ -138,22 +136,9 @@ func (r *BulkHTTPRequest) makeHTTPRequestFromRaw(baseURL string, data string, va
// Add trailing line // Add trailing line
data += "\n" data += "\n"
if len(r.Payloads) > 0 { if len(r.Payloads) > 0 {
if r.generator == nil { r.gsfm.InitOrSkip(baseURL)
r.basePayloads = generators.LoadPayloads(r.Payloads) r.ReadOne(baseURL)
generatorFunc := generators.SniperGenerator return r.handleRawWithPaylods(data, baseURL, values, r.gsfm.Value(baseURL))
switch r.attackType {
case generators.PitchFork:
generatorFunc = generators.PitchforkGenerator
case generators.ClusterBomb:
generatorFunc = generators.ClusterbombGenerator
}
r.generator = generatorFunc
r.StartGenerator()
}
r.PickOne()
return r.handleRawWithPaylods(data, baseURL, values, r.currentGeneratorValue)
} }
// otherwise continue with normal flow // otherwise continue with normal flow
@ -363,40 +348,25 @@ func (r *BulkHTTPRequest) parseRawRequest(request string, baseURL string) (*RawR
return &rawRequest, nil return &rawRequest, nil
} }
func (r *BulkHTTPRequest) Next() bool { func (r *BulkHTTPRequest) Next(URL string) bool {
if r.positionPath+r.positionRaw >= len(r.Path)+len(r.Raw) { return r.gsfm.Next(URL)
return false
}
return true
} }
func (r *BulkHTTPRequest) Position() int { func (r *BulkHTTPRequest) Position(URL string) int {
return r.positionPath + r.positionRaw return r.gsfm.Position(URL)
} }
func (r *BulkHTTPRequest) Reset() {
r.positionPath = 0
r.positionRaw = 0
}
func (r *BulkHTTPRequest) Current() string {
if r.positionPath < len(r.Path) && len(r.Path) != 0 {
return r.Path[r.positionPath]
}
return r.Raw[r.positionRaw] func (r *BulkHTTPRequest) Reset(URL string) {
r.gsfm.Reset(URL)
} }
func (r *BulkHTTPRequest) Current(URL string) string {
return r.gsfm.Current(URL)
}
func (r *BulkHTTPRequest) Total() int { func (r *BulkHTTPRequest) Total() int {
return len(r.Path) + len(r.Raw) return len(r.Path) + len(r.Raw)
} }
func (r *BulkHTTPRequest) Increment() { func (r *BulkHTTPRequest) Increment(URL string) {
if len(r.Path) > 0 && r.positionPath < len(r.Path) { r.gsfm.Increment(URL)
r.positionPath++
return
}
if len(r.Raw) > 0 && r.positionRaw < len(r.Raw) {
// if we have payloads increment only when the generators are done
if r.generator == nil {
r.positionRaw++
}
}
} }

View File

@ -0,0 +1,222 @@
package requests
import (
"sync"
"time"
"github.com/projectdiscovery/nuclei/v2/pkg/generators"
)
type Generator struct {
sync.RWMutex
positionPath int
positionRaw int
currentPayloads map[string]interface{}
gchan chan map[string]interface{}
currentGeneratorValue map[string]interface{}
}
type GeneratorFSM struct {
sync.RWMutex
payloads map[string]interface{}
basePayloads map[string][]string
generator func(payloads map[string][]string) (out chan map[string]interface{})
Generators map[string]*Generator
Type generators.Type
Paths []string
Raws []string
}
func NewGeneratorFSM(typ generators.Type, payloads map[string]interface{}, paths, raws []string) *GeneratorFSM {
var gsfm GeneratorFSM
gsfm.payloads = payloads
gsfm.Paths = paths
gsfm.Raws = raws
if len(gsfm.payloads) > 0 {
// load payloads if not already done
if gsfm.basePayloads == nil {
gsfm.basePayloads = generators.LoadPayloads(gsfm.payloads)
}
generatorFunc := generators.SniperGenerator
switch typ {
case generators.PitchFork:
generatorFunc = generators.PitchforkGenerator
case generators.ClusterBomb:
generatorFunc = generators.ClusterbombGenerator
}
gsfm.generator = generatorFunc
}
gsfm.Generators = make(map[string]*Generator)
return &gsfm
}
func (gfsm *GeneratorFSM) Add(key string) {
gfsm.Lock()
defer gfsm.Unlock()
if _, ok := gfsm.Generators[key]; !ok {
gfsm.Generators[key] = &Generator{}
}
}
func (gfsm *GeneratorFSM) Has(key string) bool {
gfsm.RLock()
defer gfsm.RUnlock()
_, ok := gfsm.Generators[key]
return ok
}
func (gfsm *GeneratorFSM) Delete(key string) {
gfsm.Lock()
defer gfsm.Unlock()
delete(gfsm.Generators, key)
}
func (gfsm *GeneratorFSM) ReadOne(key string) {
gfsm.RLock()
defer gfsm.RUnlock()
g, ok := gfsm.Generators[key]
if !ok {
return
}
for afterCh := time.After(15 * time.Second); ; {
select {
// got a value
case curGenValue, ok := <-g.gchan:
if !ok {
g.Lock()
g.gchan = nil
g.Unlock()
return
}
g.currentGeneratorValue = curGenValue
return
// timeout
case <-afterCh:
g.Lock()
g.gchan = nil
g.Unlock()
return
}
}
}
func (gfsm *GeneratorFSM) InitOrSkip(key string) {
gfsm.RLock()
defer gfsm.RUnlock()
g, ok := gfsm.Generators[key]
if !ok {
return
}
if len(gfsm.payloads) > 0 {
g.Lock()
defer g.Unlock()
if g.gchan == nil {
g.gchan = gfsm.generator(gfsm.basePayloads)
}
}
}
func (gfsm *GeneratorFSM) Value(key string) map[string]interface{} {
gfsm.RLock()
defer gfsm.RUnlock()
g, ok := gfsm.Generators[key]
if !ok {
return nil
}
return g.currentGeneratorValue
}
func (gfsm *GeneratorFSM) Next(key string) bool {
gfsm.RLock()
defer gfsm.RUnlock()
g, ok := gfsm.Generators[key]
if !ok {
return false
}
if g.positionPath+g.positionRaw >= len(gfsm.Paths)+len(gfsm.Raws) {
return false
}
return true
}
func (gfsm *GeneratorFSM) Position(key string) int {
gfsm.RLock()
defer gfsm.RUnlock()
g, ok := gfsm.Generators[key]
if !ok {
return 0
}
return g.positionPath + g.positionRaw
}
func (gfsm *GeneratorFSM) Reset(key string) {
gfsm.Lock()
defer gfsm.Unlock()
if !gfsm.Has(key) {
gfsm.Add(key)
}
g, ok := gfsm.Generators[key]
if !ok {
return
}
g.positionPath = 0
g.positionRaw = 0
}
func (gfsm *GeneratorFSM) Current(key string) string {
gfsm.RLock()
defer gfsm.RUnlock()
g, ok := gfsm.Generators[key]
if !ok {
return ""
}
if g.positionPath < len(gfsm.Paths) && len(gfsm.Paths) != 0 {
return gfsm.Paths[g.positionPath]
}
return gfsm.Raws[g.positionRaw]
}
func (gfsm *GeneratorFSM) Total() int {
return len(gfsm.Paths) + len(gfsm.Raws)
}
func (gfsm *GeneratorFSM) Increment(key string) {
gfsm.Lock()
defer gfsm.Unlock()
g, ok := gfsm.Generators[key]
if !ok {
return
}
if len(gfsm.Paths) > 0 && g.positionPath < len(gfsm.Paths) {
g.positionPath++
return
}
if len(gfsm.Raws) > 0 && g.positionRaw < len(gfsm.Raws) {
// if we have payloads increment only when the generators are done
if g.gchan == nil {
g.positionRaw++
}
}
}

View File

@ -81,6 +81,8 @@ func Parse(file string) (*Template, error) {
return nil, err return nil, err
} }
} }
request.InitGenerator()
} }
// Compile the matchers and the extractors for dns requests // Compile the matchers and the extractors for dns requests

View File

@ -5,6 +5,7 @@ import (
tengo "github.com/d5/tengo/v2" tengo "github.com/d5/tengo/v2"
"github.com/projectdiscovery/gologger" "github.com/projectdiscovery/gologger"
"github.com/projectdiscovery/nuclei/v2/pkg/atomicboolean"
"github.com/projectdiscovery/nuclei/v2/pkg/executer" "github.com/projectdiscovery/nuclei/v2/pkg/executer"
"github.com/projectdiscovery/nuclei/v2/pkg/generators" "github.com/projectdiscovery/nuclei/v2/pkg/generators"
) )
@ -50,7 +51,7 @@ func (n *NucleiVar) Call(args ...tengo.Object) (ret tengo.Object, err error) {
externalVars = iterableToMap(args[1]) externalVars = iterableToMap(args[1])
} }
var gotResult bool var gotResult atomicboolean.AtomBool
for _, template := range n.Templates { for _, template := range n.Templates {
if template.HTTPOptions != nil { if template.HTTPOptions != nil {
for _, request := range template.HTTPOptions.Template.BulkRequestsHTTP { for _, request := range template.HTTPOptions.Template.BulkRequestsHTTP {
@ -70,8 +71,8 @@ func (n *NucleiVar) Call(args ...tengo.Object) (ret tengo.Object, err error) {
continue continue
} }
if httpExecuter.Results { if result.GotResults {
gotResult = true gotResult.Or(result.GotResults)
n.addResults(&result) n.addResults(&result)
} }
} }
@ -87,15 +88,15 @@ func (n *NucleiVar) Call(args ...tengo.Object) (ret tengo.Object, err error) {
continue continue
} }
if dnsExecuter.Results { if result.GotResults {
gotResult = true gotResult.Or(result.GotResults)
n.addResults(&result) n.addResults(&result)
} }
} }
} }
} }
if gotResult { if gotResult.Get() {
return tengo.TrueValue, nil return tengo.TrueValue, nil
} }
return tengo.FalseValue, nil return tengo.FalseValue, nil