mirror of https://github.com/daffainfo/nuclei.git
Misc work on making http protocol runnable
parent
651a5edfbb
commit
fc83142917
|
@ -57,6 +57,8 @@ type Result struct {
|
|||
OutputExtracts []string
|
||||
// DynamicValues contains any dynamic values to be templated
|
||||
DynamicValues map[string]string
|
||||
// PayloadValues contains payload values provided by user. (Optional)
|
||||
PayloadValues map[string]interface{}
|
||||
}
|
||||
|
||||
// MatchFunc performs matching operation for a matcher on model and returns true or false.
|
||||
|
|
|
@ -14,6 +14,15 @@ func MergeMaps(m1, m2 map[string]interface{}) map[string]interface{} {
|
|||
return m
|
||||
}
|
||||
|
||||
// ExpandMapValues converts values from flat string to strings slice
|
||||
func ExpandMapValues(m map[string]string) map[string][]string {
|
||||
m1 := make(map[string][]string, len(m))
|
||||
for k, v := range m {
|
||||
m1[k] = []string{v}
|
||||
}
|
||||
return m1
|
||||
}
|
||||
|
||||
// CopyMap creates a new copy of an existing map
|
||||
func CopyMap(originalMap map[string]interface{}) map[string]interface{} {
|
||||
newMap := make(map[string]interface{})
|
||||
|
|
|
@ -0,0 +1,90 @@
|
|||
package http
|
||||
|
||||
import (
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/output"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/protocols"
|
||||
)
|
||||
|
||||
// Executer executes a group of requests for a protocol
|
||||
type Executer struct {
|
||||
requests []*Request
|
||||
options *protocols.ExecuterOptions
|
||||
}
|
||||
|
||||
var _ protocols.Executer = &Executer{}
|
||||
|
||||
// NewExecuter creates a new request executer for list of requests
|
||||
func NewExecuter(requests []*Request, options *protocols.ExecuterOptions) *Executer {
|
||||
return &Executer{requests: requests, options: options}
|
||||
}
|
||||
|
||||
// Compile compiles the execution generators preparing any requests possible.
|
||||
func (e *Executer) Compile() error {
|
||||
for _, request := range e.requests {
|
||||
err := request.Compile(e.options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Requests returns the total number of requests the rule will perform
|
||||
func (e *Executer) Requests() int64 {
|
||||
var count int64
|
||||
for _, request := range e.requests {
|
||||
count += int64(request.Requests())
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// Execute executes the protocol group and returns true or false if results were found.
|
||||
func (e *Executer) Execute(input string) (bool, error) {
|
||||
var results bool
|
||||
|
||||
for _, req := range e.requests {
|
||||
events, err := req.ExecuteHTTP(input, nil)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if events == nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// If we have a result field, we should add a result to slice.
|
||||
for _, event := range events {
|
||||
if event.OperatorsResult == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, result := range req.makeResultEvent(event) {
|
||||
results = true
|
||||
e.options.Output.Write(result)
|
||||
}
|
||||
}
|
||||
}
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// ExecuteWithResults executes the protocol requests and returns results instead of writing them.
|
||||
func (e *Executer) ExecuteWithResults(input string) ([]*output.InternalWrappedEvent, error) {
|
||||
var results []*output.InternalWrappedEvent
|
||||
|
||||
for _, req := range e.requests {
|
||||
events, err := req.ExecuteHTTP(input, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if events == nil {
|
||||
return nil, nil
|
||||
}
|
||||
for _, event := range events {
|
||||
if event.OperatorsResult == nil {
|
||||
continue
|
||||
}
|
||||
event.Results = req.makeResultEvent(event)
|
||||
}
|
||||
results = append(results, events...)
|
||||
}
|
||||
return results, nil
|
||||
}
|
|
@ -6,6 +6,7 @@ import (
|
|||
"github.com/projectdiscovery/nuclei/v2/pkg/protocols"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/protocols/common/generators"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/protocols/http/httpclientpool"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/types"
|
||||
"github.com/projectdiscovery/rawhttp"
|
||||
"github.com/projectdiscovery/retryablehttp-go"
|
||||
)
|
||||
|
@ -62,6 +63,7 @@ type Request struct {
|
|||
options *protocols.ExecuterOptions
|
||||
attackType generators.Type
|
||||
totalRequests int
|
||||
customHeaders types.StringSlice
|
||||
generator *generators.Generator // optional, only enabled when using payloads
|
||||
httpClient *retryablehttp.Client
|
||||
rawhttpClient *rawhttp.Client
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/operators/extractors"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/operators/matchers"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/output"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/types"
|
||||
)
|
||||
|
||||
|
@ -84,12 +85,17 @@ func (r *Request) Extract(data map[string]interface{}, extractor *extractors.Ext
|
|||
}
|
||||
|
||||
// responseToDSLMap converts a HTTP response to a map for use in DSL matching
|
||||
func responseToDSLMap(resp *http.Response, body, headers string, duration time.Duration, extra map[string]interface{}) map[string]interface{} {
|
||||
func (r *Request) responseToDSLMap(resp *http.Response, rawReq, rawResp, body, headers string, duration time.Duration, extra map[string]interface{}) map[string]interface{} {
|
||||
data := make(map[string]interface{}, len(extra)+6+len(resp.Header)+len(resp.Cookies()))
|
||||
for k, v := range extra {
|
||||
data[k] = v
|
||||
}
|
||||
|
||||
if r.options.Options.JSONRequests {
|
||||
data["request"] = rawReq
|
||||
data["response"] = rawResp
|
||||
}
|
||||
|
||||
data["content_length"] = resp.ContentLength
|
||||
data["status_code"] = resp.StatusCode
|
||||
|
||||
|
@ -110,3 +116,33 @@ func responseToDSLMap(resp *http.Response, body, headers string, duration time.D
|
|||
data["duration"] = duration.Seconds()
|
||||
return data
|
||||
}
|
||||
|
||||
// makeResultEvent creates a result event from internal wrapped event
|
||||
func (r *Request) makeResultEvent(wrapped *output.InternalWrappedEvent) []*output.ResultEvent {
|
||||
results := make([]*output.ResultEvent, len(wrapped.OperatorsResult.Matches)+1)
|
||||
|
||||
data := output.ResultEvent{
|
||||
TemplateID: r.options.TemplateID,
|
||||
Info: r.options.TemplateInfo,
|
||||
Type: "http",
|
||||
Host: wrapped.InternalEvent["host"].(string),
|
||||
Matched: wrapped.InternalEvent["matched"].(string),
|
||||
Metadata: wrapped.OperatorsResult.PayloadValues,
|
||||
ExtractedResults: wrapped.OperatorsResult.OutputExtracts,
|
||||
}
|
||||
if r.options.Options.JSONRequests {
|
||||
data.Request = wrapped.InternalEvent["request"].(string)
|
||||
data.Response = wrapped.InternalEvent["raw"].(string)
|
||||
}
|
||||
|
||||
// If we have multiple matchers with names, write each of them separately.
|
||||
if len(wrapped.OperatorsResult.Matches) > 0 {
|
||||
for k := range wrapped.OperatorsResult.Matches {
|
||||
data.MatcherName = k
|
||||
results = append(results, &data)
|
||||
}
|
||||
} else {
|
||||
results = append(results, &data)
|
||||
}
|
||||
return results
|
||||
}
|
||||
|
|
|
@ -15,10 +15,9 @@ import (
|
|||
|
||||
"github.com/corpix/uarand"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/matchers"
|
||||
"github.com/projectdiscovery/gologger"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/output"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/protocols/common/generators"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/requests"
|
||||
"github.com/projectdiscovery/rawhttp"
|
||||
"github.com/remeh/sizedwaitgroup"
|
||||
"go.uber.org/multierr"
|
||||
|
@ -213,10 +212,12 @@ func (e *Request) ExecuteHTTP(reqURL string, dynamicValues map[string]interface{
|
|||
func (e *Request) executeRequest(reqURL string, request *generatedRequest, dynamicvalues map[string]interface{}) ([]*output.InternalWrappedEvent, error) {
|
||||
// Add User-Agent value randomly to the customHeaders slice if `random-agent` flag is given
|
||||
if e.options.Options.RandomAgent {
|
||||
builder := &strings.Builder{}
|
||||
builder.WriteString("User-Agent: ")
|
||||
// nolint:errcheck // ignoring error
|
||||
e.customHeaders.Set("User-Agent: " + uarand.GetRandom())
|
||||
builder.WriteString(uarand.GetRandom())
|
||||
e.customHeaders.Set(builder.String())
|
||||
}
|
||||
|
||||
e.setCustomHeaders(request)
|
||||
|
||||
var (
|
||||
|
@ -225,210 +226,158 @@ func (e *Request) executeRequest(reqURL string, request *generatedRequest, dynam
|
|||
dumpedRequest []byte
|
||||
fromcache bool
|
||||
)
|
||||
|
||||
if e.debug || e.pf != nil {
|
||||
dumpedRequest, err = requests.Dump(request, reqURL)
|
||||
if e.options.Options.Debug || e.options.ProjectFile != nil {
|
||||
dumpedRequest, err = dump(request, reqURL)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if e.debug {
|
||||
gologger.Infof("Dumped HTTP request for %s (%s)\n\n", reqURL, e.template.ID)
|
||||
if e.options.Options.Debug {
|
||||
gologger.Info().Msgf("[%s] Dumped HTTP request for %s\n\n", e.options.TemplateID, reqURL)
|
||||
fmt.Fprintf(os.Stderr, "%s", string(dumpedRequest))
|
||||
}
|
||||
|
||||
timeStart := time.Now()
|
||||
|
||||
if request.original.Pipeline {
|
||||
resp, err = request.PipelineClient.DoRaw(request.RawRequest.Method, reqURL, request.RawRequest.Path, requests.ExpandMapValues(request.RawRequest.Headers), ioutil.NopCloser(strings.NewReader(request.RawRequest.Data)))
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
resp.Body.Close()
|
||||
}
|
||||
e.traceLog.Request(e.template.ID, reqURL, "http", err)
|
||||
return err
|
||||
}
|
||||
e.traceLog.Request(e.template.ID, reqURL, "http", nil)
|
||||
resp, err = request.pipelinedClient.DoRaw(request.rawRequest.Method, reqURL, request.rawRequest.Path, generators.ExpandMapValues(request.rawRequest.Headers), ioutil.NopCloser(strings.NewReader(request.rawRequest.Data)))
|
||||
} else if request.original.Unsafe {
|
||||
// rawhttp
|
||||
// burp uses "\r\n" as new line character
|
||||
request.rawRequest.Data = strings.ReplaceAll(request.RawRequest.Data, "\n", "\r\n")
|
||||
options := e.rawHTTPClient.Options
|
||||
options.AutomaticContentLength = request.AutomaticContentLengthHeader
|
||||
options.AutomaticHostHeader = request.AutomaticHostHeader
|
||||
options.FollowRedirects = request.FollowRedirects
|
||||
resp, err = e.rawHTTPClient.DoRawWithOptions(request.RawRequest.Method, reqURL, request.RawRequest.Path, requests.ExpandMapValues(request.RawRequest.Headers), ioutil.NopCloser(strings.NewReader(request.RawRequest.Data)), options)
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
resp.Body.Close()
|
||||
}
|
||||
e.traceLog.Request(e.template.ID, reqURL, "http", err)
|
||||
return err
|
||||
}
|
||||
e.traceLog.Request(e.template.ID, reqURL, "http", nil)
|
||||
request.rawRequest.Data = strings.ReplaceAll(request.rawRequest.Data, "\n", "\r\n")
|
||||
options := request.original.rawhttpClient.Options
|
||||
options.AutomaticContentLength = !e.DisableAutoContentLength
|
||||
options.AutomaticHostHeader = !e.DisableAutoHostname
|
||||
options.FollowRedirects = e.Redirects
|
||||
resp, err = request.original.rawhttpClient.DoRawWithOptions(request.rawRequest.Method, reqURL, request.rawRequest.Path, generators.ExpandMapValues(request.rawRequest.Headers), ioutil.NopCloser(strings.NewReader(request.rawRequest.Data)), options)
|
||||
} else {
|
||||
// if nuclei-project is available check if the request was already sent previously
|
||||
if e.pf != nil {
|
||||
if e.options.ProjectFile != nil {
|
||||
// if unavailable fail silently
|
||||
fromcache = true
|
||||
// nolint:bodyclose // false positive the response is generated at runtime
|
||||
resp, err = e.pf.Get(dumpedRequest)
|
||||
resp, err = e.options.ProjectFile.Get(dumpedRequest)
|
||||
if err != nil {
|
||||
fromcache = false
|
||||
}
|
||||
}
|
||||
|
||||
// retryablehttp
|
||||
if resp == nil {
|
||||
resp, err = e.httpClient.Do(request.Request)
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
resp.Body.Close()
|
||||
}
|
||||
e.traceLog.Request(e.template.ID, reqURL, "http", err)
|
||||
return err
|
||||
}
|
||||
e.traceLog.Request(e.template.ID, reqURL, "http", nil)
|
||||
resp, err = e.httpClient.Do(request.request)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
_, _ = io.Copy(ioutil.Discard, resp.Body)
|
||||
resp.Body.Close()
|
||||
}
|
||||
e.options.Output.Request(e.options.TemplateID, reqURL, "http", err)
|
||||
e.options.Progress.DecrementRequests(1)
|
||||
return nil, err
|
||||
}
|
||||
e.options.Output.Request(e.options.TemplateID, reqURL, "http", err)
|
||||
|
||||
duration := time.Since(timeStart)
|
||||
|
||||
// Dump response - Step 1 - Decompression not yet handled
|
||||
var dumpedResponse []byte
|
||||
if e.debug {
|
||||
if e.options.Options.Debug {
|
||||
var dumpErr error
|
||||
dumpedResponse, dumpErr = httputil.DumpResponse(resp, true)
|
||||
if dumpErr != nil {
|
||||
return errors.Wrap(dumpErr, "could not dump http response")
|
||||
return nil, errors.Wrap(dumpErr, "could not dump http response")
|
||||
}
|
||||
}
|
||||
|
||||
data, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
_, copyErr := io.Copy(ioutil.Discard, resp.Body)
|
||||
if copyErr != nil {
|
||||
resp.Body.Close()
|
||||
return copyErr
|
||||
}
|
||||
|
||||
_, _ = io.Copy(ioutil.Discard, resp.Body)
|
||||
resp.Body.Close()
|
||||
|
||||
return errors.Wrap(err, "could not read http body")
|
||||
return nil, errors.Wrap(err, "could not read http body")
|
||||
}
|
||||
|
||||
resp.Body.Close()
|
||||
|
||||
// net/http doesn't automatically decompress the response body if an encoding has been specified by the user in the request
|
||||
// so in case we have to manually do it
|
||||
// net/http doesn't automatically decompress the response body if an
|
||||
// encoding has been specified by the user in the request so in case we have to
|
||||
// manually do it.
|
||||
dataOrig := data
|
||||
data, err = requests.HandleDecompression(request, data)
|
||||
data, err = handleDecompression(request, data)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not decompress http body")
|
||||
return nil, errors.Wrap(err, "could not decompress http body")
|
||||
}
|
||||
|
||||
// Dump response - step 2 - replace gzip body with deflated one or with itself (NOP operation)
|
||||
if e.debug {
|
||||
if e.options.Options.Debug {
|
||||
dumpedResponse = bytes.ReplaceAll(dumpedResponse, dataOrig, data)
|
||||
gologger.Infof("Dumped HTTP response for %s (%s)\n\n", reqURL, e.template.ID)
|
||||
gologger.Info().Msgf("[%s] Dumped HTTP response for %s\n\n", e.options.TemplateID, reqURL)
|
||||
fmt.Fprintf(os.Stderr, "%s\n", string(dumpedResponse))
|
||||
}
|
||||
|
||||
// if nuclei-project is enabled store the response if not previously done
|
||||
if e.pf != nil && !fromcache {
|
||||
err := e.pf.Set(dumpedRequest, resp, data)
|
||||
if e.options.ProjectFile != nil && !fromcache {
|
||||
err := e.options.ProjectFile.Set(dumpedRequest, resp, data)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not store in project file")
|
||||
return nil, errors.Wrap(err, "could not store in project file")
|
||||
}
|
||||
}
|
||||
|
||||
// Convert response body from []byte to string with zero copy
|
||||
body := unsafeToString(data)
|
||||
|
||||
headers := headersToString(resp.Header)
|
||||
|
||||
var matchData map[string]interface{}
|
||||
if payloads != nil {
|
||||
matchData = generators.MergeMaps(result.historyData, payloads)
|
||||
}
|
||||
// var matchData map[string]interface{}
|
||||
// if payloads != nil {
|
||||
// matchData = generators.MergeMaps(result.historyData, payloads)
|
||||
// }
|
||||
|
||||
// store for internal purposes the DSL matcher data
|
||||
// hardcode stopping storing data after defaultMaxHistorydata items
|
||||
if len(result.historyData) < defaultMaxHistorydata {
|
||||
result.Lock()
|
||||
// update history data with current reqURL and hostname
|
||||
result.historyData["reqURL"] = reqURL
|
||||
if parsed, err := url.Parse(reqURL); err == nil {
|
||||
result.historyData["Hostname"] = parsed.Host
|
||||
//if len(result.historyData) < defaultMaxHistorydata {
|
||||
// result.Lock()
|
||||
// // update history data with current reqURL and hostname
|
||||
// result.historyData["reqURL"] = reqURL
|
||||
// if parsed, err := url.Parse(reqURL); err == nil {
|
||||
// result.historyData["Hostname"] = parsed.Host
|
||||
// }
|
||||
// result.historyData = generators.MergeMaps(result.historyData, matchers.HTTPToMap(resp, body, headers, duration, format))
|
||||
// if payloads == nil {
|
||||
// // merge them to history data
|
||||
// result.historyData = generators.MergeMaps(result.historyData, payloads)
|
||||
// }
|
||||
// result.historyData = generators.MergeMaps(result.historyData, dynamicvalues)
|
||||
//
|
||||
// // complement match data with new one if necessary
|
||||
// matchData = generators.MergeMaps(matchData, result.historyData)
|
||||
// result.Unlock()
|
||||
//}
|
||||
ouputEvent := e.responseToDSLMap(resp, unsafeToString(dumpedRequest), unsafeToString(dumpedResponse), unsafeToString(data), headersToString(resp.Header), duration, request.meta)
|
||||
|
||||
event := []*output.InternalWrappedEvent{{InternalEvent: ouputEvent}}
|
||||
if e.Operators != nil {
|
||||
result, ok := e.Operators.Execute(ouputEvent, e.Match, e.Extract)
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
result.PayloadValues = request.meta
|
||||
event[0].OperatorsResult = result
|
||||
}
|
||||
return event, nil
|
||||
}
|
||||
|
||||
const two = 2
|
||||
|
||||
// setCustomHeaders sets the custom headers for generated request
|
||||
func (e *Request) setCustomHeaders(r *generatedRequest) {
|
||||
for _, customHeader := range e.customHeaders {
|
||||
if customHeader == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
// This should be pre-computed somewhere and done only once
|
||||
tokens := strings.SplitN(customHeader, ":", two)
|
||||
// if it's an invalid header skip it
|
||||
if len(tokens) < 2 {
|
||||
continue
|
||||
}
|
||||
|
||||
headerName, headerValue := tokens[0], strings.Join(tokens[1:], "")
|
||||
if r.rawRequest != nil {
|
||||
r.rawRequest.Headers[headerName] = headerValue
|
||||
} else {
|
||||
r.request.Header.Set(strings.TrimSpace(headerName), strings.TrimSpace(headerValue))
|
||||
}
|
||||
result.historyData = generators.MergeMaps(result.historyData, matchers.HTTPToMap(resp, body, headers, duration, format))
|
||||
if payloads == nil {
|
||||
// merge them to history data
|
||||
result.historyData = generators.MergeMaps(result.historyData, payloads)
|
||||
}
|
||||
result.historyData = generators.MergeMaps(result.historyData, dynamicvalues)
|
||||
|
||||
// complement match data with new one if necessary
|
||||
matchData = generators.MergeMaps(matchData, result.historyData)
|
||||
result.Unlock()
|
||||
}
|
||||
|
||||
matcherCondition := e.GetMatchersCondition()
|
||||
for _, matcher := range e.Matchers {
|
||||
// Check if the matcher matched
|
||||
if !matcher.Match(resp, body, headers, duration, matchData) {
|
||||
// If the condition is AND we haven't matched, try next request.
|
||||
if matcherCondition == matchers.ANDCondition {
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
// If the matcher has matched, and its an OR
|
||||
// write the first output then move to next matcher.
|
||||
if matcherCondition == matchers.ORCondition {
|
||||
result.Lock()
|
||||
result.Matches[matcher.Name] = nil
|
||||
// probably redundant but ensures we snapshot current payload values when matchers are valid
|
||||
result.Meta = request.Meta
|
||||
result.GotResults = true
|
||||
result.Unlock()
|
||||
e.writeOutputHTTP(request, resp, body, matcher, nil, request.Meta, reqURL)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// All matchers have successfully completed so now start with the
|
||||
// next task which is extraction of input from matchers.
|
||||
var extractorResults, outputExtractorResults []string
|
||||
|
||||
for _, extractor := range e.Extractors {
|
||||
for match := range extractor.Extract(resp, body, headers) {
|
||||
if _, ok := dynamicvalues[extractor.Name]; !ok {
|
||||
dynamicvalues[extractor.Name] = match
|
||||
}
|
||||
|
||||
extractorResults = append(extractorResults, match)
|
||||
|
||||
if !extractor.Internal {
|
||||
outputExtractorResults = append(outputExtractorResults, match)
|
||||
}
|
||||
}
|
||||
// probably redundant but ensures we snapshot current payload values when extractors are valid
|
||||
result.Lock()
|
||||
result.Meta = request.Meta
|
||||
result.Extractions[extractor.Name] = extractorResults
|
||||
result.Unlock()
|
||||
}
|
||||
|
||||
// Write a final string of output if matcher type is
|
||||
// AND or if we have extractors for the mechanism too.
|
||||
if len(outputExtractorResults) > 0 || matcherCondition == matchers.ANDCondition {
|
||||
e.writeOutputHTTP(request, resp, body, nil, outputExtractorResults, request.Meta, reqURL)
|
||||
result.Lock()
|
||||
result.GotResults = true
|
||||
result.Unlock()
|
||||
}
|
||||
|
||||
gologger.Verbosef("Sent for [%s] to %s\n", "http-request", e.template.ID, reqURL)
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -0,0 +1,75 @@
|
|||
package http
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"strings"
|
||||
"unsafe"
|
||||
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/protocols/common/generators"
|
||||
"github.com/projectdiscovery/rawhttp"
|
||||
)
|
||||
|
||||
// unsafeToString converts byte slice to string with zero allocations
|
||||
func unsafeToString(bs []byte) string {
|
||||
return *(*string)(unsafe.Pointer(&bs))
|
||||
}
|
||||
|
||||
// headersToString converts http headers to string
|
||||
func headersToString(headers http.Header) string {
|
||||
builder := &strings.Builder{}
|
||||
|
||||
for header, values := range headers {
|
||||
builder.WriteString(header)
|
||||
builder.WriteString(": ")
|
||||
|
||||
for i, value := range values {
|
||||
builder.WriteString(value)
|
||||
|
||||
if i != len(values)-1 {
|
||||
builder.WriteRune('\n')
|
||||
builder.WriteString(header)
|
||||
builder.WriteString(": ")
|
||||
}
|
||||
}
|
||||
builder.WriteRune('\n')
|
||||
}
|
||||
return builder.String()
|
||||
}
|
||||
|
||||
// dump creates a dump of the http request in form of a byte slice
|
||||
func dump(req *generatedRequest, reqURL string) ([]byte, error) {
|
||||
if req.request != nil {
|
||||
// Create a copy on the fly of the request body - ignore errors
|
||||
bodyBytes, _ := req.request.BodyBytes()
|
||||
req.request.Request.Body = ioutil.NopCloser(bytes.NewReader(bodyBytes))
|
||||
return httputil.DumpRequest(req.request.Request, true)
|
||||
}
|
||||
return rawhttp.DumpRequestRaw(req.rawRequest.Method, reqURL, req.rawRequest.Path, generators.ExpandMapValues(req.rawRequest.Headers), ioutil.NopCloser(strings.NewReader(req.rawRequest.Data)))
|
||||
}
|
||||
|
||||
// handleDecompression if the user specified a custom encoding (as golang transport doesn't do this automatically)
|
||||
func handleDecompression(r *generatedRequest, bodyOrig []byte) (bodyDec []byte, err error) {
|
||||
if r.request == nil {
|
||||
return bodyOrig, nil
|
||||
}
|
||||
|
||||
encodingHeader := strings.TrimSpace(strings.ToLower(r.request.Header.Get("Accept-Encoding")))
|
||||
if encodingHeader == "gzip" || encodingHeader == "gzip, deflate" {
|
||||
gzipreader, err := gzip.NewReader(bytes.NewReader(bodyOrig))
|
||||
if err != nil {
|
||||
return bodyDec, err
|
||||
}
|
||||
defer gzipreader.Close()
|
||||
|
||||
bodyDec, err = ioutil.ReadAll(gzipreader)
|
||||
if err != nil {
|
||||
return bodyDec, err
|
||||
}
|
||||
return bodyDec, nil
|
||||
}
|
||||
return bodyOrig, nil
|
||||
}
|
|
@ -5,6 +5,7 @@ import (
|
|||
"github.com/projectdiscovery/nuclei/v2/pkg/operators/extractors"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/operators/matchers"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/output"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/projectfile"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/types"
|
||||
"go.uber.org/ratelimit"
|
||||
)
|
||||
|
@ -35,6 +36,8 @@ type ExecuterOptions struct {
|
|||
Progress *progress.Progress
|
||||
// RateLimiter is a rate-limiter for limiting sent number of requests.
|
||||
RateLimiter ratelimit.Limiter
|
||||
// ProjectFile is the project file for nuclei
|
||||
ProjectFile *projectfile.ProjectFile
|
||||
}
|
||||
|
||||
// Request is an interface implemented any protocol based request generator.
|
||||
|
|
Loading…
Reference in New Issue