mirror of https://github.com/daffainfo/nuclei.git
commit
be9cf1179a
|
@ -1,19 +1,111 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/projectdiscovery/gologger"
|
||||
"github.com/projectdiscovery/nuclei/v2/internal/runner"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/types"
|
||||
"github.com/spf13/cast"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Parse the command line flags and read config files
|
||||
options := runner.ParseOptions()
|
||||
var (
|
||||
cfgFile string
|
||||
|
||||
options = &types.Options{}
|
||||
rootCmd = &cobra.Command{
|
||||
Use: "nuclei",
|
||||
Short: "Nuclei is a fast and extensible security scanner",
|
||||
Long: `Nuclei is a fast tool for configurable targeted scanning
|
||||
based on templates offering massive extensibility and ease of use.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
mergeViperConfiguration(cmd)
|
||||
|
||||
runner.ParseOptions(options)
|
||||
|
||||
nucleiRunner, err := runner.New(options)
|
||||
if err != nil {
|
||||
gologger.Fatalf("Could not create runner: %s\n", err)
|
||||
gologger.Fatal().Msgf("Could not create runner: %s\n", err)
|
||||
}
|
||||
|
||||
nucleiRunner.RunEnumeration()
|
||||
nucleiRunner.Close()
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
func main() {
|
||||
rootCmd.Execute()
|
||||
}
|
||||
|
||||
// mergeViperConfiguration merges the flag configuration with viper file.
|
||||
func mergeViperConfiguration(cmd *cobra.Command) {
|
||||
cmd.PersistentFlags().VisitAll(func(f *pflag.Flag) {
|
||||
if !f.Changed && viper.IsSet(f.Name) {
|
||||
switch p := viper.Get(f.Name).(type) {
|
||||
case []interface{}:
|
||||
for _, item := range p {
|
||||
cmd.PersistentFlags().Set(f.Name, cast.ToString(item))
|
||||
}
|
||||
default:
|
||||
cmd.PersistentFlags().Set(f.Name, viper.GetString(f.Name))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func init() {
|
||||
home, _ := os.UserHomeDir()
|
||||
templatesDirectory := path.Join(home, "nuclei-templates")
|
||||
|
||||
cobra.OnInitialize(func() {
|
||||
if cfgFile != "" {
|
||||
viper.SetConfigFile(cfgFile)
|
||||
if err := viper.ReadInConfig(); err != nil {
|
||||
gologger.Fatal().Msgf("Could not read config: %s\n", err)
|
||||
}
|
||||
}
|
||||
})
|
||||
rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "Nuclei config file (default is $HOME/.nuclei.yaml)")
|
||||
rootCmd.PersistentFlags().BoolVar(&options.Metrics, "metrics", false, "Expose nuclei metrics on a port")
|
||||
rootCmd.PersistentFlags().IntVar(&options.MetricsPort, "metrics-port", 9092, "Port to expose nuclei metrics on")
|
||||
rootCmd.PersistentFlags().StringVar(&options.Target, "target", "", "Target is a single target to scan using template")
|
||||
rootCmd.PersistentFlags().StringSliceVarP(&options.Templates, "templates", "t", []string{}, "Template input dir/file/files to run on host. Can be used multiple times. Supports globbing.")
|
||||
rootCmd.PersistentFlags().StringSliceVar(&options.ExcludedTemplates, "exclude", []string{}, "Template input dir/file/files to exclude. Can be used multiple times. Supports globbing.")
|
||||
rootCmd.PersistentFlags().StringSliceVar(&options.Severity, "severity", []string{}, "Filter templates based on their severity and only run the matching ones. Comma-separated values can be used to specify multiple severities.")
|
||||
rootCmd.PersistentFlags().StringVarP(&options.Targets, "list", "l", "", "List of URLs to run templates on")
|
||||
rootCmd.PersistentFlags().StringVarP(&options.Output, "output", "o", "", "File to write output to (optional)")
|
||||
rootCmd.PersistentFlags().StringVar(&options.ProxyURL, "proxy-url", "", "URL of the proxy server")
|
||||
rootCmd.PersistentFlags().StringVar(&options.ProxySocksURL, "proxy-socks-url", "", "URL of the proxy socks server")
|
||||
rootCmd.PersistentFlags().BoolVar(&options.Silent, "silent", false, "Show only results in output")
|
||||
rootCmd.PersistentFlags().BoolVar(&options.Version, "version", false, "Show version of nuclei")
|
||||
rootCmd.PersistentFlags().BoolVarP(&options.Verbose, "verbose", "v", false, "Show Verbose output")
|
||||
rootCmd.PersistentFlags().BoolVar(&options.NoColor, "no-color", false, "Disable colors in output")
|
||||
rootCmd.PersistentFlags().IntVar(&options.Timeout, "timeout", 5, "Time to wait in seconds before timeout")
|
||||
rootCmd.PersistentFlags().IntVar(&options.Retries, "retries", 1, "Number of times to retry a failed request")
|
||||
rootCmd.PersistentFlags().BoolVar(&options.RandomAgent, "random-agent", false, "Use randomly selected HTTP User-Agent header value")
|
||||
rootCmd.PersistentFlags().StringSliceVarP(&options.CustomHeaders, "header", "H", []string{}, "Custom Header.")
|
||||
rootCmd.PersistentFlags().BoolVar(&options.Debug, "debug", false, "Allow debugging of request/responses")
|
||||
rootCmd.PersistentFlags().BoolVar(&options.DebugRequests, "debug-req", false, "Allow debugging of request")
|
||||
rootCmd.PersistentFlags().BoolVar(&options.DebugResponse, "debug-resp", false, "Allow debugging of response")
|
||||
rootCmd.PersistentFlags().BoolVar(&options.UpdateTemplates, "update-templates", false, "Update Templates updates the installed templates (optional)")
|
||||
rootCmd.PersistentFlags().StringVar(&options.TraceLogFile, "trace-log", "", "File to write sent requests trace log")
|
||||
rootCmd.PersistentFlags().StringVar(&options.TemplatesDirectory, "update-directory", templatesDirectory, "Directory to use for storing nuclei-templates")
|
||||
rootCmd.PersistentFlags().BoolVar(&options.JSON, "json", false, "Write json output to files")
|
||||
rootCmd.PersistentFlags().BoolVar(&options.JSONRequests, "include-rr", false, "Write requests/responses for matches in JSON output")
|
||||
rootCmd.PersistentFlags().BoolVar(&options.EnableProgressBar, "stats", false, "Display stats of the running scan")
|
||||
rootCmd.PersistentFlags().BoolVar(&options.TemplateList, "tl", false, "List available templates")
|
||||
rootCmd.PersistentFlags().IntVar(&options.RateLimit, "rate-limit", 150, "Rate-Limit (maximum requests/second")
|
||||
rootCmd.PersistentFlags().BoolVar(&options.StopAtFirstMatch, "stop-at-first-match", false, "Stop processing http requests at first match (this may break template/workflow logic)")
|
||||
rootCmd.PersistentFlags().IntVar(&options.BulkSize, "bulk-size", 25, "Maximum Number of hosts analyzed in parallel per template")
|
||||
rootCmd.PersistentFlags().IntVarP(&options.TemplateThreads, "concurrency", "c", 10, "Maximum Number of templates executed in parallel")
|
||||
rootCmd.PersistentFlags().BoolVar(&options.Project, "project", false, "Use a project folder to avoid sending same request multiple times")
|
||||
rootCmd.PersistentFlags().StringVar(&options.ProjectPath, "project-path", "", "Use a user defined project folder, temporary folder is used if not specified but enabled")
|
||||
rootCmd.PersistentFlags().BoolVar(&options.NoMeta, "no-meta", false, "Don't display metadata for the matches")
|
||||
rootCmd.PersistentFlags().BoolVar(&options.TemplatesVersion, "templates-version", false, "Shows the installed nuclei-templates version")
|
||||
rootCmd.PersistentFlags().StringVar(&options.BurpCollaboratorBiid, "burp-collaborator-biid", "", "Burp Collaborator BIID")
|
||||
}
|
||||
|
|
14
v2/go.mod
14
v2/go.mod
|
@ -6,27 +6,33 @@ require (
|
|||
github.com/Knetic/govaluate v3.0.0+incompatible
|
||||
github.com/blang/semver v3.5.1+incompatible
|
||||
github.com/corpix/uarand v0.1.1
|
||||
github.com/d5/tengo/v2 v2.6.2
|
||||
github.com/goccy/go-yaml v1.8.4
|
||||
github.com/google/go-github/v32 v32.1.0
|
||||
github.com/json-iterator/go v1.1.10
|
||||
github.com/karrick/godirwalk v1.16.1
|
||||
github.com/kr/pretty v0.1.0 // indirect
|
||||
github.com/logrusorgru/aurora v2.0.3+incompatible
|
||||
github.com/miekg/dns v1.1.35
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.1 // indirect
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/projectdiscovery/clistats v0.0.7
|
||||
github.com/projectdiscovery/collaborator v0.0.2
|
||||
github.com/projectdiscovery/fastdialer v0.0.2
|
||||
github.com/projectdiscovery/gologger v1.0.1
|
||||
github.com/projectdiscovery/gologger v1.1.3
|
||||
github.com/projectdiscovery/hmap v0.0.1
|
||||
github.com/projectdiscovery/nuclei/v2 v2.2.0
|
||||
github.com/projectdiscovery/rawhttp v0.0.4
|
||||
github.com/projectdiscovery/retryabledns v1.0.5
|
||||
github.com/projectdiscovery/retryablehttp-go v1.0.1
|
||||
github.com/remeh/sizedwaitgroup v1.0.0
|
||||
github.com/segmentio/ksuid v1.0.3
|
||||
github.com/spaolacci/murmur3 v1.1.0
|
||||
github.com/spf13/cast v1.3.1
|
||||
github.com/stretchr/testify v1.6.1
|
||||
go.uber.org/atomic v1.7.0
|
||||
go.uber.org/multierr v1.6.0
|
||||
go.uber.org/ratelimit v0.1.0
|
||||
golang.org/x/net v0.0.0-20201216054612-986b41b23924
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 // indirect
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
)
|
||||
|
|
53
v2/go.sum
53
v2/go.sum
|
@ -8,15 +8,21 @@ github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnweb
|
|||
github.com/codegangsta/cli v1.20.0/go.mod h1:/qJNoX69yVSKu5o4jLyXAENLRyk1uhi7zkbQ3slBdOA=
|
||||
github.com/corpix/uarand v0.1.1 h1:RMr1TWc9F4n5jiPDzFHtmaUXLKLNUFK0SgCLo4BhX/U=
|
||||
github.com/corpix/uarand v0.1.1/go.mod h1:SFKZvkcRoLqVRFZ4u25xPmp6m9ktANfbpXZ7SJ0/FNU=
|
||||
github.com/d5/tengo/v2 v2.6.2 h1:AnPhA/Y5qrNLb5QSWHU9uXq25T3QTTdd2waTgsAHMdc=
|
||||
github.com/d5/tengo/v2 v2.6.2/go.mod h1:XRGjEs5I9jYIKTxly6HCF8oiiilk5E/RYXOZ5b0DZC8=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U=
|
||||
github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE=
|
||||
github.com/fatih/color v1.10.0 h1:s36xzo75JdqLaaWoiEHk767eHiwo0598uUxyfiPkDsg=
|
||||
github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM=
|
||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
|
||||
github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8=
|
||||
github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA=
|
||||
github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4=
|
||||
github.com/goccy/go-yaml v1.8.4 h1:AOEdR7aQgbgwHznGe3BLkDQVujxCPUpHOZZcQcp8Y3M=
|
||||
github.com/goccy/go-yaml v1.8.4/go.mod h1:U/jl18uSupI5rdI2jmuCswEA2htH9eXfferR3KfscvA=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
|
@ -34,17 +40,31 @@ github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr
|
|||
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/karrick/godirwalk v1.16.1 h1:DynhcF+bztK8gooS0+NDJFrdNZjJ3gzVzC545UNA9iw=
|
||||
github.com/karrick/godirwalk v1.16.1/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk=
|
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
|
||||
github.com/logrusorgru/aurora v0.0.0-20200102142835-e9ef32dff381/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4=
|
||||
github.com/logrusorgru/aurora v2.0.3+incompatible h1:tOpm7WcpBTn4fjmVfgpQq0EfczGlG91VSDkswnjF5A8=
|
||||
github.com/logrusorgru/aurora v2.0.3+incompatible/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4=
|
||||
github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8=
|
||||
github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||
github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
|
||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/miekg/dns v1.1.29/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM=
|
||||
github.com/miekg/dns v1.1.35 h1:oTfOaDH+mZkdcgdIjH6yBajRGtIwcwcaR+rt23ZSrJs=
|
||||
github.com/miekg/dns v1.1.35/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/ngdinhtoan/glide-cleanup v0.2.0/go.mod h1:UQzsmiDOb8YV3nOsCxK/c9zPpCZVNoHScRE3EO9pVMM=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs=
|
||||
|
@ -55,21 +75,16 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
|||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/projectdiscovery/clistats v0.0.5/go.mod h1:lV6jUHAv2bYWqrQstqW8iVIydKJhWlVaLl3Xo9ioVGg=
|
||||
github.com/projectdiscovery/clistats v0.0.7 h1:Q/erjrk2p3BIQq1RaHVtBpgboghNz0u1/lyQ2fr8Cn0=
|
||||
github.com/projectdiscovery/clistats v0.0.7/go.mod h1:lV6jUHAv2bYWqrQstqW8iVIydKJhWlVaLl3Xo9ioVGg=
|
||||
github.com/projectdiscovery/collaborator v0.0.1/go.mod h1:J1z0fC7Svutz3LJqoRyTHA3F0Suh4livmkYv8MnKw20=
|
||||
github.com/projectdiscovery/collaborator v0.0.2 h1:BSiMlWM3NvuKbpedn6fIjjEo5b7q5zmiJ6tI7+6mB3s=
|
||||
github.com/projectdiscovery/collaborator v0.0.2/go.mod h1:J1z0fC7Svutz3LJqoRyTHA3F0Suh4livmkYv8MnKw20=
|
||||
github.com/projectdiscovery/fastdialer v0.0.1/go.mod h1:d24GUzSb93wOY7lu4gJmXAzfomqAGEcRrInEVrM6zbc=
|
||||
github.com/projectdiscovery/fastdialer v0.0.2 h1:0VUoHhtUt/HThHUUwbWBxTnFI+tM13RN+TmcybEvbRc=
|
||||
github.com/projectdiscovery/fastdialer v0.0.2/go.mod h1:wjSQICydWE54N49Lcx9nnh5OmtsRwIcLgiVT3GT2zgA=
|
||||
github.com/projectdiscovery/gologger v1.0.1 h1:FzoYQZnxz9DCvSi/eg5A6+ET4CQ0CDUs27l6Exr8zMQ=
|
||||
github.com/projectdiscovery/gologger v1.0.1/go.mod h1:Ok+axMqK53bWNwDSU1nTNwITLYMXMdZtRc8/y1c7sWE=
|
||||
github.com/projectdiscovery/gologger v1.1.3 h1:rKWZW2QUigRV1jnlWwWJbJRvz8b+T/+bB5qemDGGBJU=
|
||||
github.com/projectdiscovery/gologger v1.1.3/go.mod h1:jdXflz3TLB8bcVNzb0v26TztI9KPz8Lr4BVdUhNUs6E=
|
||||
github.com/projectdiscovery/hmap v0.0.1 h1:VAONbJw5jP+syI5smhsfkrq9XPGn4aiYy5pR6KR1wog=
|
||||
github.com/projectdiscovery/hmap v0.0.1/go.mod h1:VDEfgzkKQdq7iGTKz8Ooul0NuYHQ8qiDs6r8bPD1Sb0=
|
||||
github.com/projectdiscovery/nuclei/v2 v2.2.0 h1:nUrTXM/AIJ8PfEPxEl/pkAHj7iu0TgAkE3e075a1JN0=
|
||||
github.com/projectdiscovery/nuclei/v2 v2.2.0/go.mod h1:JIgYr5seElQh161hT/BUw3g1C4UuWR+VAcT16aZdyJ8=
|
||||
github.com/projectdiscovery/rawhttp v0.0.4 h1:O5IreNGk83d4xTD9e6SpkKbX0sHTs8K1Q33Bz4eYl2E=
|
||||
github.com/projectdiscovery/rawhttp v0.0.4/go.mod h1:PQERZAhAv7yxI/hR6hdDPgK1WTU56l204BweXrBec+0=
|
||||
github.com/projectdiscovery/retryabledns v1.0.5 h1:bQivGy5CuqKlwcxRkgA5ENincqIed/BR2sA6t2gdwuI=
|
||||
|
@ -78,12 +93,14 @@ github.com/projectdiscovery/retryablehttp-go v1.0.1 h1:V7wUvsZNq1Rcz7+IlcyoyQlNw
|
|||
github.com/projectdiscovery/retryablehttp-go v1.0.1/go.mod h1:SrN6iLZilNG1X4neq1D+SBxoqfAF4nyzvmevkTkWsek=
|
||||
github.com/remeh/sizedwaitgroup v1.0.0 h1:VNGGFwNo/R5+MJBf6yrsr110p0m4/OX4S3DCy7Kyl5E=
|
||||
github.com/remeh/sizedwaitgroup v1.0.0/go.mod h1:3j2R4OIe/SeS6YDhICBy22RWjJC5eNCJ1V+9+NVNYlo=
|
||||
github.com/segmentio/ksuid v1.0.3 h1:FoResxvleQwYiPAVKe1tMUlEirodZqlqglIuFsdDntY=
|
||||
github.com/segmentio/ksuid v1.0.3/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O73XgrPE=
|
||||
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
|
||||
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng=
|
||||
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
|
@ -91,6 +108,8 @@ github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFd
|
|||
github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ=
|
||||
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
|
||||
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
|
||||
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
|
||||
go.uber.org/ratelimit v0.1.0 h1:U2AruXqeTb4Eh9sYQSTrMhH8Cb7M0Ian2ibBOnBcnAw=
|
||||
go.uber.org/ratelimit v0.1.0/go.mod h1:2X8KaoNd1J0lZV+PxJk/5+DGbO/tpwLR1m++a7FnB/Y=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
|
@ -105,39 +124,45 @@ golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn
|
|||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201216054612-986b41b23924 h1:QsnDpLLOKwHBBDa8nDws4DYNc/ryVW2vCpxCs09d4PY=
|
||||
golang.org/x/net v0.0.0-20201216054612-986b41b23924/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 h1:SQFwaSi55rU7vdNs9Yr0Z324VNlrF+0wMqRXT4St8ck=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201113233024-12cec1faf1ba/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68 h1:nxC68pudNYkKU6jWhgrqdreuFiOQWj1Fs7T3VrH4Pjw=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
||||
|
|
|
@ -1,74 +0,0 @@
|
|||
package bufwriter
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"os"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Writer is a mutex protected buffered writer
|
||||
type Writer struct {
|
||||
file *os.File
|
||||
writer *bufio.Writer
|
||||
mutex *sync.Mutex
|
||||
}
|
||||
|
||||
// New creates a new mutex protected buffered writer for a file
|
||||
func New(file string) (*Writer, error) {
|
||||
output, err := os.Create(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Writer{file: output, writer: bufio.NewWriter(output), mutex: &sync.Mutex{}}, nil
|
||||
}
|
||||
|
||||
// Write writes a byte slice to the underlying file
|
||||
//
|
||||
// It also writes a newline if the last byte isn't a newline character.
|
||||
func (w *Writer) Write(data []byte) error {
|
||||
if len(data) == 0 {
|
||||
return nil
|
||||
}
|
||||
w.mutex.Lock()
|
||||
defer w.mutex.Unlock()
|
||||
|
||||
_, err := w.writer.Write(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if data[len(data)-1] != '\n' {
|
||||
_, err = w.writer.WriteRune('\n')
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// WriteString writes a string to the underlying file
|
||||
//
|
||||
// It also writes a newline if the last byte isn't a newline character.
|
||||
func (w *Writer) WriteString(data string) error {
|
||||
if data == "" {
|
||||
return nil
|
||||
}
|
||||
w.mutex.Lock()
|
||||
defer w.mutex.Unlock()
|
||||
|
||||
_, err := w.writer.WriteString(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if data[len(data)-1] != '\n' {
|
||||
_, err = w.writer.WriteRune('\n')
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Close closes the underlying writer flushing everything to disk
|
||||
func (w *Writer) Close() error {
|
||||
w.mutex.Lock()
|
||||
defer w.mutex.Unlock()
|
||||
|
||||
w.writer.Flush()
|
||||
//nolint:errcheck // we don't care whether sync failed or succeeded.
|
||||
w.file.Sync()
|
||||
return w.file.Close()
|
||||
}
|
|
@ -0,0 +1,72 @@
|
|||
package collaborator
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/projectdiscovery/collaborator"
|
||||
)
|
||||
|
||||
var (
|
||||
// PollSeconds is the seconds to poll at.
|
||||
PollSeconds = 5
|
||||
// DefaultMaxBufferLimit is the default request buffer limit
|
||||
DefaultMaxBufferLimit = 150
|
||||
// DefaultPollInterval is the default poll interval for burp collabortor polling.
|
||||
DefaultPollInterval time.Duration = time.Second * time.Duration(PollSeconds)
|
||||
// DefaultCollaborator is the default burp collaborator instance
|
||||
DefaultCollaborator = &Collaborator{Collab: collaborator.NewBurpCollaborator()}
|
||||
)
|
||||
|
||||
// Collaborator is a client for recording burp collaborator interactions
|
||||
type Collaborator struct {
|
||||
sync.RWMutex
|
||||
options *Options // unused
|
||||
Collab *collaborator.BurpCollaborator
|
||||
}
|
||||
|
||||
// Options contains configuration options for collaborator client
|
||||
type Options struct {
|
||||
BIID string
|
||||
PollInterval time.Duration
|
||||
MaxBufferLimit int
|
||||
}
|
||||
|
||||
// New creates a new collaborator client
|
||||
func New(options *Options) *Collaborator {
|
||||
collab := collaborator.NewBurpCollaborator()
|
||||
collab.AddBIID(options.BIID)
|
||||
collab.MaxBufferLimit = options.MaxBufferLimit
|
||||
return &Collaborator{Collab: collab, options: options}
|
||||
}
|
||||
|
||||
// Poll initiates collaborator polling if any BIIDs were provided
|
||||
func (b *Collaborator) Poll() {
|
||||
// if no valid biids were provided just return
|
||||
if len(b.Collab.BIIDs) > 0 {
|
||||
go b.Collab.PollEach(DefaultPollInterval)
|
||||
}
|
||||
}
|
||||
|
||||
// Has checks if a collabrator hit was found for a URL
|
||||
func (b *Collaborator) Has(s string) bool {
|
||||
for _, r := range b.Collab.RespBuffer {
|
||||
for i := 0; i < len(r.Responses); i++ {
|
||||
// search in dns - http - smtp
|
||||
b.RLock()
|
||||
found := strings.Contains(r.Responses[i].Data.RawRequestDecoded, s) ||
|
||||
strings.Contains(r.Responses[i].Data.RequestDecoded, s) ||
|
||||
strings.Contains(r.Responses[i].Data.MessageDecoded, s)
|
||||
b.RUnlock()
|
||||
|
||||
if found {
|
||||
b.Lock()
|
||||
r.Responses = append(r.Responses[:i], r.Responses[i+1:]...)
|
||||
b.Unlock()
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
|
@ -0,0 +1,25 @@
|
|||
package colorizer
|
||||
|
||||
import "github.com/logrusorgru/aurora"
|
||||
|
||||
// Colorizer returns a colorized severity printer
|
||||
type Colorizer struct {
|
||||
Data map[string]string
|
||||
}
|
||||
|
||||
const (
|
||||
fgOrange uint8 = 208
|
||||
undefined string = "undefined"
|
||||
)
|
||||
|
||||
// New returns a new severity based colorizer
|
||||
func New(colorizer aurora.Aurora) *Colorizer {
|
||||
severityMap := map[string]string{
|
||||
"info": colorizer.Blue("info").String(),
|
||||
"low": colorizer.Green("low").String(),
|
||||
"medium": colorizer.Yellow("medium").String(),
|
||||
"high": colorizer.Index(fgOrange, "high").String(),
|
||||
"critical": colorizer.Red("critical").String(),
|
||||
}
|
||||
return &Colorizer{Data: severityMap}
|
||||
}
|
|
@ -53,7 +53,7 @@ func NewProgress(active, metrics bool, port int) (*Progress, error) {
|
|||
}
|
||||
go func() {
|
||||
if err := progress.server.ListenAndServe(); err != nil {
|
||||
gologger.Warningf("Could not serve metrics: %s\n", err)
|
||||
gologger.Warning().Msgf("Could not serve metrics: %s", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
@ -67,11 +67,12 @@ func (p *Progress) Init(hostCount int64, rulesCount int, requestCount int64) {
|
|||
p.stats.AddStatic("startedAt", time.Now())
|
||||
p.stats.AddCounter("requests", uint64(0))
|
||||
p.stats.AddCounter("errors", uint64(0))
|
||||
p.stats.AddCounter("matched", uint64(0))
|
||||
p.stats.AddCounter("total", uint64(requestCount))
|
||||
|
||||
if p.active {
|
||||
if err := p.stats.Start(makePrintCallback(), p.tickDuration); err != nil {
|
||||
gologger.Warningf("Couldn't start statistics: %s\n", err)
|
||||
gologger.Warning().Msgf("Couldn't start statistics: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -81,15 +82,20 @@ func (p *Progress) AddToTotal(delta int64) {
|
|||
p.stats.IncrementCounter("total", int(delta))
|
||||
}
|
||||
|
||||
// Update progress tracking information and increments the request counter by one unit.
|
||||
func (p *Progress) Update() {
|
||||
// IncrementRequests increments the requests counter by 1.
|
||||
func (p *Progress) IncrementRequests() {
|
||||
p.stats.IncrementCounter("requests", 1)
|
||||
}
|
||||
|
||||
// Drop drops the specified number of requests from the progress bar total.
|
||||
// This may be the case when uncompleted requests are encountered and shouldn't be part of the total count.
|
||||
func (p *Progress) Drop(count int64) {
|
||||
// IncrementMatched increments the matched counter by 1.
|
||||
func (p *Progress) IncrementMatched() {
|
||||
p.stats.IncrementCounter("matched", 1)
|
||||
}
|
||||
|
||||
// DecrementRequests decrements the number of requests from total.
|
||||
func (p *Progress) DecrementRequests(count int64) {
|
||||
// mimic dropping by incrementing the completed requests
|
||||
p.stats.IncrementCounter("requests", int(count))
|
||||
p.stats.IncrementCounter("errors", int(count))
|
||||
}
|
||||
|
||||
|
@ -119,6 +125,11 @@ func makePrintCallback() func(stats clistats.StatisticsClient) {
|
|||
builder.WriteString(" | RPS: ")
|
||||
builder.WriteString(clistats.String(uint64(float64(requests) / duration.Seconds())))
|
||||
|
||||
matched, _ := stats.GetCounter("matched")
|
||||
|
||||
builder.WriteString(" | Matched: ")
|
||||
builder.WriteString(clistats.String(matched))
|
||||
|
||||
errors, _ := stats.GetCounter("errors")
|
||||
builder.WriteString(" | Errors: ")
|
||||
builder.WriteString(clistats.String(errors))
|
||||
|
@ -153,6 +164,8 @@ func (p *Progress) getMetrics() map[string]interface{} {
|
|||
results["templates"] = clistats.String(templates)
|
||||
hosts, _ := p.stats.GetStatic("hosts")
|
||||
results["hosts"] = clistats.String(hosts)
|
||||
matched, _ := p.stats.GetStatic("matched")
|
||||
results["matched"] = clistats.String(matched)
|
||||
requests, _ := p.stats.GetCounter("requests")
|
||||
results["requests"] = clistats.String(requests)
|
||||
total, _ := p.stats.GetCounter("total")
|
||||
|
@ -183,7 +196,7 @@ func fmtDuration(d time.Duration) string {
|
|||
func (p *Progress) Stop() {
|
||||
if p.active {
|
||||
if err := p.stats.Stop(); err != nil {
|
||||
gologger.Warningf("Couldn't stop statistics: %s\n", err)
|
||||
gologger.Warning().Msgf("Couldn't stop statistics: %s", err)
|
||||
}
|
||||
}
|
||||
if p.server != nil {
|
||||
|
|
|
@ -15,9 +15,9 @@ const Version = `2.2.1-dev`
|
|||
|
||||
// showBanner is used to show the banner to the user
|
||||
func showBanner() {
|
||||
gologger.Printf("%s\n", banner)
|
||||
gologger.Printf("\t\tprojectdiscovery.io\n\n")
|
||||
gologger.Print().Msgf("%s\n", banner)
|
||||
gologger.Print().Msgf("\t\tprojectdiscovery.io\n\n")
|
||||
|
||||
gologger.Labelf("Use with caution. You are responsible for your actions\n")
|
||||
gologger.Labelf("Developers assume no liability and are not responsible for any misuse or damage.\n")
|
||||
gologger.Warning().Msgf("Use with caution. You are responsible for your actions\n")
|
||||
gologger.Warning().Msgf("Developers assume no liability and are not responsible for any misuse or damage.\n")
|
||||
}
|
||||
|
|
|
@ -93,30 +93,7 @@ func (r *Runner) readNucleiIgnoreFile() {
|
|||
}
|
||||
}
|
||||
|
||||
// checkIfInNucleiIgnore checks if a path falls under nuclei-ignore rules.
|
||||
func (r *Runner) checkIfInNucleiIgnore(item string) bool {
|
||||
if r.templatesConfig == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, paths := range r.templatesConfig.IgnorePaths {
|
||||
// If we have a path to ignore, check if it's in the item.
|
||||
if paths[len(paths)-1] == '/' {
|
||||
if strings.Contains(item, paths) {
|
||||
return true
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
// Check for file based extension in ignores
|
||||
if strings.HasSuffix(item, paths) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// getIgnoreFilePath returns the ignore file path for the runner
|
||||
func (r *Runner) getIgnoreFilePath() string {
|
||||
defIgnoreFilePath := path.Join(r.templatesConfig.TemplatesDirectory, nucleiIgnoreFile)
|
||||
|
||||
|
@ -124,13 +101,11 @@ func (r *Runner) getIgnoreFilePath() string {
|
|||
if err != nil {
|
||||
return defIgnoreFilePath
|
||||
}
|
||||
|
||||
cwdIgnoreFilePath := path.Join(cwd, nucleiIgnoreFile)
|
||||
|
||||
cwdIfpInfo, err := os.Stat(cwdIgnoreFilePath)
|
||||
if os.IsNotExist(err) || cwdIfpInfo.IsDir() {
|
||||
return defIgnoreFilePath
|
||||
}
|
||||
|
||||
return cwdIgnoreFilePath
|
||||
}
|
||||
|
|
|
@ -2,146 +2,53 @@ package runner
|
|||
|
||||
import (
|
||||
"errors"
|
||||
"flag"
|
||||
"net/url"
|
||||
"os"
|
||||
|
||||
"github.com/projectdiscovery/gologger"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/requests"
|
||||
"github.com/projectdiscovery/gologger/formatter"
|
||||
"github.com/projectdiscovery/gologger/levels"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/protocols/common/protocolinit"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/types"
|
||||
)
|
||||
|
||||
// Options contains the configuration options for tuning
|
||||
// the template requesting process.
|
||||
type Options struct {
|
||||
Vhost bool // Mark the input specified as VHOST input
|
||||
RandomAgent bool // Generate random User-Agent
|
||||
Metrics bool // Metrics enables display of metrics via an http endpoint
|
||||
Sandbox bool // Sandbox mode allows users to run isolated workflows with system commands disabled
|
||||
Debug bool // Debug mode allows debugging request/responses for the engine
|
||||
Silent bool // Silent suppresses any extra text and only writes found URLs on screen.
|
||||
Version bool // Version specifies if we should just show version and exit
|
||||
Verbose bool // Verbose flag indicates whether to show verbose output or not
|
||||
NoColor bool // No-Color disables the colored output.
|
||||
UpdateTemplates bool // UpdateTemplates updates the templates installed at startup
|
||||
JSON bool // JSON writes json output to files
|
||||
JSONRequests bool // write requests/responses for matches in JSON output
|
||||
EnableProgressBar bool // Enable progrss bar
|
||||
TemplatesVersion bool // Show the templates installed version
|
||||
TemplateList bool // List available templates
|
||||
Stdin bool // Stdin specifies whether stdin input was given to the process
|
||||
StopAtFirstMatch bool // Stop processing template at first full match (this may break chained requests)
|
||||
NoMeta bool // Don't display metadata for the matches
|
||||
Project bool // Nuclei uses project folder to avoid sending same HTTP request multiple times
|
||||
MetricsPort int // MetricsPort is the port to show metrics on
|
||||
MaxWorkflowDuration int // MaxWorkflowDuration is the maximum time a workflow can run for a URL
|
||||
BulkSize int // Number of targets analyzed in parallel for each template
|
||||
TemplateThreads int // Number of templates executed in parallel
|
||||
Timeout int // Timeout is the seconds to wait for a response from the server.
|
||||
Retries int // Retries is the number of times to retry the request
|
||||
RateLimit int // Rate-Limit of requests per specified target
|
||||
Threads int // Thread controls the number of concurrent requests to make.
|
||||
BurpCollaboratorBiid string // Burp Collaborator BIID for polling
|
||||
ProjectPath string // Nuclei uses a user defined project folder
|
||||
Severity string // Filter templates based on their severity and only run the matching ones.
|
||||
Target string // Target is a single URL/Domain to scan usng a template
|
||||
Targets string // Targets specifies the targets to scan using templates.
|
||||
Output string // Output is the file to write found subdomains to.
|
||||
ProxyURL string // ProxyURL is the URL for the proxy server
|
||||
ProxySocksURL string // ProxySocksURL is the URL for the proxy socks server
|
||||
TemplatesDirectory string // TemplatesDirectory is the directory to use for storing templates
|
||||
TraceLogFile string // TraceLogFile specifies a file to write with the trace of all requests
|
||||
Templates multiStringFlag // Signature specifies the template/templates to use
|
||||
ExcludedTemplates multiStringFlag // Signature specifies the template/templates to exclude
|
||||
CustomHeaders requests.CustomHeaders // Custom global headers
|
||||
}
|
||||
|
||||
type multiStringFlag []string
|
||||
|
||||
func (m *multiStringFlag) String() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *multiStringFlag) Set(value string) error {
|
||||
*m = append(*m, value)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ParseOptions parses the command line flags provided by a user
|
||||
func ParseOptions() *Options {
|
||||
options := &Options{}
|
||||
|
||||
flag.BoolVar(&options.Vhost, "vhost", false, "Input supplied is a comma-separated vhost list")
|
||||
flag.BoolVar(&options.Sandbox, "sandbox", false, "Run workflows in isolated sandbox mode")
|
||||
flag.BoolVar(&options.Metrics, "metrics", false, "Expose nuclei metrics on a port")
|
||||
flag.IntVar(&options.MetricsPort, "metrics-port", 9092, "Port to expose nuclei metrics on")
|
||||
flag.IntVar(&options.MaxWorkflowDuration, "workflow-duration", 10, "Max time for workflow run on single URL in minutes")
|
||||
flag.StringVar(&options.Target, "target", "", "Target is a single target to scan using template")
|
||||
flag.Var(&options.Templates, "t", "Template input dir/file/files to run on host. Can be used multiple times. Supports globbing.")
|
||||
flag.Var(&options.ExcludedTemplates, "exclude", "Template input dir/file/files to exclude. Can be used multiple times. Supports globbing.")
|
||||
flag.StringVar(&options.Severity, "severity", "", "Filter templates based on their severity and only run the matching ones. Comma-separated values can be used to specify multiple severities.")
|
||||
flag.StringVar(&options.Targets, "l", "", "List of URLs to run templates on")
|
||||
flag.StringVar(&options.Output, "o", "", "File to write output to (optional)")
|
||||
flag.StringVar(&options.ProxyURL, "proxy-url", "", "URL of the proxy server")
|
||||
flag.StringVar(&options.ProxySocksURL, "proxy-socks-url", "", "URL of the proxy socks server")
|
||||
flag.BoolVar(&options.Silent, "silent", false, "Show only results in output")
|
||||
flag.BoolVar(&options.Version, "version", false, "Show version of nuclei")
|
||||
flag.BoolVar(&options.Verbose, "v", false, "Show Verbose output")
|
||||
flag.BoolVar(&options.NoColor, "no-color", false, "Disable colors in output")
|
||||
flag.IntVar(&options.Timeout, "timeout", 5, "Time to wait in seconds before timeout")
|
||||
flag.IntVar(&options.Retries, "retries", 1, "Number of times to retry a failed request")
|
||||
flag.BoolVar(&options.RandomAgent, "random-agent", false, "Use randomly selected HTTP User-Agent header value")
|
||||
flag.Var(&options.CustomHeaders, "H", "Custom Header.")
|
||||
flag.BoolVar(&options.Debug, "debug", false, "Allow debugging of request/responses")
|
||||
flag.BoolVar(&options.UpdateTemplates, "update-templates", false, "Update Templates updates the installed templates (optional)")
|
||||
flag.StringVar(&options.TraceLogFile, "trace-log", "", "File to write sent requests trace log")
|
||||
flag.StringVar(&options.TemplatesDirectory, "update-directory", "", "Directory to use for storing nuclei-templates")
|
||||
flag.BoolVar(&options.JSON, "json", false, "Write json output to files")
|
||||
flag.BoolVar(&options.JSONRequests, "include-rr", false, "Write requests/responses for matches in JSON output")
|
||||
flag.BoolVar(&options.EnableProgressBar, "stats", false, "Display stats of the running scan")
|
||||
flag.BoolVar(&options.TemplateList, "tl", false, "List available templates")
|
||||
flag.IntVar(&options.RateLimit, "rate-limit", 150, "Rate-Limit (maximum requests/second")
|
||||
flag.BoolVar(&options.StopAtFirstMatch, "stop-at-first-match", false, "Stop processing http requests at first match (this may break template/workflow logic)")
|
||||
flag.IntVar(&options.BulkSize, "bulk-size", 25, "Maximum Number of hosts analyzed in parallel per template")
|
||||
flag.IntVar(&options.TemplateThreads, "c", 10, "Maximum Number of templates executed in parallel")
|
||||
flag.BoolVar(&options.Project, "project", false, "Use a project folder to avoid sending same request multiple times")
|
||||
flag.StringVar(&options.ProjectPath, "project-path", "", "Use a user defined project folder, temporary folder is used if not specified but enabled")
|
||||
flag.BoolVar(&options.NoMeta, "no-meta", false, "Don't display metadata for the matches")
|
||||
flag.BoolVar(&options.TemplatesVersion, "templates-version", false, "Shows the installed nuclei-templates version")
|
||||
flag.StringVar(&options.BurpCollaboratorBiid, "burp-collaborator-biid", "", "Burp Collaborator BIID")
|
||||
flag.Parse()
|
||||
func ParseOptions(options *types.Options) {
|
||||
err := protocolinit.Init(options)
|
||||
if err != nil {
|
||||
gologger.Fatal().Msgf("Could not initialize protocols: %s\n", err)
|
||||
}
|
||||
|
||||
// Check if stdin pipe was given
|
||||
options.Stdin = hasStdin()
|
||||
|
||||
// Read the inputs and configure the logging
|
||||
options.configureOutput()
|
||||
configureOutput(options)
|
||||
|
||||
// Show the user the banner
|
||||
showBanner()
|
||||
|
||||
if options.Version {
|
||||
gologger.Infof("Current Version: %s\n", Version)
|
||||
gologger.Info().Msgf("Current Version: %s\n", Version)
|
||||
os.Exit(0)
|
||||
}
|
||||
if options.TemplatesVersion {
|
||||
config, err := readConfiguration()
|
||||
if err != nil {
|
||||
gologger.Fatalf("Could not read template configuration: %s\n", err)
|
||||
gologger.Fatal().Msgf("Could not read template configuration: %s\n", err)
|
||||
}
|
||||
gologger.Infof("Current nuclei-templates version: %s (%s)\n", config.CurrentVersion, config.TemplatesDirectory)
|
||||
gologger.Info().Msgf("Current nuclei-templates version: %s (%s)\n", config.CurrentVersion, config.TemplatesDirectory)
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
// Validate the options passed by the user and if any
|
||||
// invalid options have been used, exit.
|
||||
err := options.validateOptions()
|
||||
if err != nil {
|
||||
gologger.Fatalf("Program exiting: %s\n", err)
|
||||
if err = validateOptions(options); err != nil {
|
||||
gologger.Fatal().Msgf("Program exiting: %s\n", err)
|
||||
}
|
||||
|
||||
return options
|
||||
}
|
||||
|
||||
// hasStdin returns true if we have stdin input
|
||||
func hasStdin() bool {
|
||||
stat, err := os.Stdin.Stat()
|
||||
if err != nil {
|
||||
|
@ -155,7 +62,7 @@ func hasStdin() bool {
|
|||
}
|
||||
|
||||
// validateOptions validates the configuration options passed
|
||||
func (options *Options) validateOptions() error {
|
||||
func validateOptions(options *types.Options) error {
|
||||
// Both verbose and silent flags were used
|
||||
if options.Verbose && options.Silent {
|
||||
return errors.New("both verbose and silent mode specified")
|
||||
|
@ -173,22 +80,15 @@ func (options *Options) validateOptions() error {
|
|||
}
|
||||
|
||||
// Validate proxy options if provided
|
||||
err := validateProxyURL(
|
||||
options.ProxyURL,
|
||||
"invalid http proxy format (It should be http://username:password@host:port)",
|
||||
)
|
||||
err := validateProxyURL(options.ProxyURL, "invalid http proxy format (It should be http://username:password@host:port)")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = validateProxyURL(
|
||||
options.ProxySocksURL,
|
||||
"invalid socks proxy format (It should be socks5://username:password@host:port)",
|
||||
)
|
||||
err = validateProxyURL(options.ProxySocksURL, "invalid socks proxy format (It should be socks5://username:password@host:port)")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -202,22 +102,22 @@ func validateProxyURL(proxyURL, message string) error {
|
|||
|
||||
func isValidURL(urlString string) bool {
|
||||
_, err := url.Parse(urlString)
|
||||
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// configureOutput configures the output on the screen
|
||||
func (options *Options) configureOutput() {
|
||||
func configureOutput(options *types.Options) {
|
||||
// If the user desires verbose output, show verbose output
|
||||
if options.Verbose {
|
||||
gologger.MaxLevel = gologger.Verbose
|
||||
gologger.DefaultLogger.SetMaxLevel(levels.LevelVerbose)
|
||||
}
|
||||
if options.Debug {
|
||||
gologger.DefaultLogger.SetMaxLevel(levels.LevelDebug)
|
||||
}
|
||||
|
||||
if options.NoColor {
|
||||
gologger.UseColors = false
|
||||
gologger.DefaultLogger.SetFormatter(formatter.NewCLI(true))
|
||||
}
|
||||
|
||||
if options.Silent {
|
||||
gologger.MaxLevel = gologger.Silent
|
||||
gologger.DefaultLogger.SetMaxLevel(levels.LevelSilent)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,48 +0,0 @@
|
|||
package runner
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/projectdiscovery/gologger"
|
||||
)
|
||||
|
||||
// isRelative checks if a given path is a relative path
|
||||
func isRelative(filePath string) bool {
|
||||
if strings.HasPrefix(filePath, "/") || strings.Contains(filePath, ":\\") {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// resolvePath gets the absolute path to the template by either
|
||||
// looking in the current directory or checking the nuclei templates directory.
|
||||
//
|
||||
// Current directory is given preference over the nuclei-templates directory.
|
||||
func (r *Runner) resolvePath(templateName string) (string, error) {
|
||||
curDirectory, err := os.Getwd()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
templatePath := path.Join(curDirectory, templateName)
|
||||
if _, err := os.Stat(templatePath); !os.IsNotExist(err) {
|
||||
gologger.Debugf("Found template in current directory: %s\n", templatePath)
|
||||
|
||||
return templatePath, nil
|
||||
}
|
||||
|
||||
if r.templatesConfig != nil {
|
||||
templatePath := path.Join(r.templatesConfig.TemplatesDirectory, templateName)
|
||||
if _, err := os.Stat(templatePath); !os.IsNotExist(err) {
|
||||
gologger.Debugf("Found template in nuclei-templates directory: %s\n", templatePath)
|
||||
|
||||
return templatePath, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("no such path found: %s", templateName)
|
||||
}
|
|
@ -1,347 +1,69 @@
|
|||
package runner
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http/cookiejar"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
tengo "github.com/d5/tengo/v2"
|
||||
"github.com/d5/tengo/v2/stdlib"
|
||||
"github.com/karrick/godirwalk"
|
||||
"github.com/projectdiscovery/gologger"
|
||||
"github.com/projectdiscovery/nuclei/v2/internal/progress"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/atomicboolean"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/executer"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/requests"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/templates"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/workflows"
|
||||
"github.com/remeh/sizedwaitgroup"
|
||||
"go.uber.org/atomic"
|
||||
)
|
||||
|
||||
// workflowTemplates contains the initialized workflow templates per template group
|
||||
type workflowTemplates struct {
|
||||
Name string
|
||||
Templates []*workflows.Template
|
||||
}
|
||||
|
||||
var sandboxedModules = []string{"math", "text", "rand", "fmt", "json", "base64", "hex", "enum"}
|
||||
|
||||
// processTemplateWithList processes a template and runs the enumeration on all the targets
|
||||
func (r *Runner) processTemplateWithList(p *progress.Progress, template *templates.Template, request interface{}) bool {
|
||||
var httpExecuter *executer.HTTPExecuter
|
||||
var dnsExecuter *executer.DNSExecuter
|
||||
var err error
|
||||
|
||||
// Create an executer based on the request type.
|
||||
switch value := request.(type) {
|
||||
case *requests.DNSRequest:
|
||||
dnsExecuter = executer.NewDNSExecuter(&executer.DNSOptions{
|
||||
TraceLog: r.traceLog,
|
||||
Debug: r.options.Debug,
|
||||
Template: template,
|
||||
DNSRequest: value,
|
||||
Writer: r.output,
|
||||
VHost: r.options.Vhost,
|
||||
JSON: r.options.JSON,
|
||||
JSONRequests: r.options.JSONRequests,
|
||||
NoMeta: r.options.NoMeta,
|
||||
ColoredOutput: !r.options.NoColor,
|
||||
Colorizer: r.colorizer,
|
||||
Decolorizer: r.decolorizer,
|
||||
RateLimiter: r.ratelimiter,
|
||||
})
|
||||
case *requests.BulkHTTPRequest:
|
||||
httpExecuter, err = executer.NewHTTPExecuter(&executer.HTTPOptions{
|
||||
TraceLog: r.traceLog,
|
||||
Debug: r.options.Debug,
|
||||
Template: template,
|
||||
BulkHTTPRequest: value,
|
||||
Writer: r.output,
|
||||
Timeout: r.options.Timeout,
|
||||
Retries: r.options.Retries,
|
||||
ProxyURL: r.options.ProxyURL,
|
||||
ProxySocksURL: r.options.ProxySocksURL,
|
||||
RandomAgent: r.options.RandomAgent,
|
||||
CustomHeaders: r.options.CustomHeaders,
|
||||
JSON: r.options.JSON,
|
||||
Vhost: r.options.Vhost,
|
||||
JSONRequests: r.options.JSONRequests,
|
||||
NoMeta: r.options.NoMeta,
|
||||
CookieReuse: value.CookieReuse,
|
||||
ColoredOutput: !r.options.NoColor,
|
||||
Colorizer: &r.colorizer,
|
||||
Decolorizer: r.decolorizer,
|
||||
StopAtFirstMatch: r.options.StopAtFirstMatch,
|
||||
PF: r.pf,
|
||||
Dialer: r.dialer,
|
||||
RateLimiter: r.ratelimiter,
|
||||
})
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
p.Drop(request.(*requests.BulkHTTPRequest).GetRequestCount())
|
||||
gologger.Warningf("Could not create http client: %s\n", err)
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
var globalresult atomicboolean.AtomBool
|
||||
|
||||
// processTemplateWithList process a template on the URL list
|
||||
func (r *Runner) processTemplateWithList(template *templates.Template) bool {
|
||||
results := &atomic.Bool{}
|
||||
wg := sizedwaitgroup.New(r.options.BulkSize)
|
||||
|
||||
r.hm.Scan(func(k, _ []byte) error {
|
||||
r.hostMap.Scan(func(k, _ []byte) error {
|
||||
URL := string(k)
|
||||
|
||||
wg.Add()
|
||||
go func(URL string) {
|
||||
defer wg.Done()
|
||||
|
||||
var result *executer.Result
|
||||
|
||||
if httpExecuter != nil {
|
||||
result = httpExecuter.ExecuteHTTP(p, URL)
|
||||
globalresult.Or(result.GotResults)
|
||||
}
|
||||
|
||||
if dnsExecuter != nil {
|
||||
result = dnsExecuter.ExecuteDNS(p, URL)
|
||||
globalresult.Or(result.GotResults)
|
||||
}
|
||||
|
||||
if result.Error != nil {
|
||||
gologger.Warningf("[%s] Could not execute step: %s\n", r.colorizer.Colorizer.BrightBlue(template.ID), result.Error)
|
||||
match, err := template.Executer.Execute(URL)
|
||||
if err != nil {
|
||||
gologger.Warning().Msgf("[%s] Could not execute step: %s\n", r.colorizer.BrightBlue(template.ID), err)
|
||||
}
|
||||
results.CAS(false, match)
|
||||
}(URL)
|
||||
return nil
|
||||
})
|
||||
|
||||
wg.Wait()
|
||||
|
||||
// See if we got any results from the executers
|
||||
return globalresult.Get()
|
||||
return results.Load()
|
||||
}
|
||||
|
||||
// ProcessWorkflowWithList coming from stdin or list of targets
|
||||
func (r *Runner) processWorkflowWithList(p *progress.Progress, workflow *workflows.Workflow) bool {
|
||||
result := false
|
||||
|
||||
workflowTemplatesList, err := r.preloadWorkflowTemplates(p, workflow)
|
||||
if err != nil {
|
||||
gologger.Warningf("Could not preload templates for workflow %s: %s\n", workflow.ID, err)
|
||||
return false
|
||||
}
|
||||
logicBytes := []byte(workflow.Logic)
|
||||
|
||||
// processTemplateWithList process a template on the URL list
|
||||
func (r *Runner) processWorkflowWithList(template *templates.Template) bool {
|
||||
results := &atomic.Bool{}
|
||||
wg := sizedwaitgroup.New(r.options.BulkSize)
|
||||
r.hm.Scan(func(k, _ []byte) error {
|
||||
targetURL := string(k)
|
||||
|
||||
r.hostMap.Scan(func(k, _ []byte) error {
|
||||
URL := string(k)
|
||||
wg.Add()
|
||||
|
||||
go func(targetURL string) {
|
||||
go func(URL string) {
|
||||
defer wg.Done()
|
||||
|
||||
script := tengo.NewScript(logicBytes)
|
||||
if !r.options.Sandbox {
|
||||
script.SetImports(stdlib.GetModuleMap(stdlib.AllModuleNames()...))
|
||||
} else {
|
||||
script.SetImports(stdlib.GetModuleMap(sandboxedModules...))
|
||||
}
|
||||
|
||||
variables := make(map[string]*workflows.NucleiVar)
|
||||
for _, workflowTemplate := range *workflowTemplatesList {
|
||||
name := workflowTemplate.Name
|
||||
variable := &workflows.NucleiVar{Templates: workflowTemplate.Templates, URL: targetURL}
|
||||
err := script.Add(name, variable)
|
||||
match, err := template.CompiledWorkflow.RunWorkflow(URL)
|
||||
if err != nil {
|
||||
gologger.Errorf("Could not initialize script for workflow '%s': %s\n", workflow.ID, err)
|
||||
continue
|
||||
gologger.Warning().Msgf("[%s] Could not execute step: %s\n", r.colorizer.BrightBlue(template.ID), err)
|
||||
}
|
||||
variables[name] = variable
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(r.options.MaxWorkflowDuration)*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
_, err := script.RunContext(ctx)
|
||||
if err != nil {
|
||||
gologger.Errorf("Could not execute workflow '%s': %s\n", workflow.ID, err)
|
||||
}
|
||||
|
||||
for _, variable := range variables {
|
||||
result = !variable.IsFalsy()
|
||||
if result {
|
||||
break
|
||||
}
|
||||
}
|
||||
}(targetURL)
|
||||
results.CAS(false, match)
|
||||
}(URL)
|
||||
return nil
|
||||
})
|
||||
|
||||
wg.Wait()
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func (r *Runner) preloadWorkflowTemplates(p *progress.Progress, workflow *workflows.Workflow) (*[]workflowTemplates, error) {
|
||||
var jar *cookiejar.Jar
|
||||
|
||||
if workflow.CookieReuse {
|
||||
var err error
|
||||
jar, err = cookiejar.New(nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Single yaml provided
|
||||
var wflTemplatesList []workflowTemplates
|
||||
|
||||
for name, value := range workflow.Variables {
|
||||
// Check if the template is an absolute path or relative path.
|
||||
// If the path is absolute, use it. Otherwise,
|
||||
if isRelative(value) {
|
||||
newPath, err := r.resolvePath(value)
|
||||
if err != nil {
|
||||
newPath, err = resolvePathWithBaseFolder(filepath.Dir(workflow.GetPath()), value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
value = newPath
|
||||
}
|
||||
|
||||
var wtlst []*workflows.Template
|
||||
|
||||
if strings.HasSuffix(value, ".yaml") {
|
||||
t, err := templates.Parse(value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
template := &workflows.Template{Progress: p}
|
||||
if len(t.BulkRequestsHTTP) > 0 {
|
||||
template.HTTPOptions = &executer.HTTPOptions{
|
||||
TraceLog: r.traceLog,
|
||||
Debug: r.options.Debug,
|
||||
Writer: r.output,
|
||||
Template: t,
|
||||
Timeout: r.options.Timeout,
|
||||
Retries: r.options.Retries,
|
||||
ProxyURL: r.options.ProxyURL,
|
||||
ProxySocksURL: r.options.ProxySocksURL,
|
||||
RandomAgent: r.options.RandomAgent,
|
||||
CustomHeaders: r.options.CustomHeaders,
|
||||
Vhost: r.options.Vhost,
|
||||
JSON: r.options.JSON,
|
||||
JSONRequests: r.options.JSONRequests,
|
||||
CookieJar: jar,
|
||||
ColoredOutput: !r.options.NoColor,
|
||||
Colorizer: &r.colorizer,
|
||||
Decolorizer: r.decolorizer,
|
||||
PF: r.pf,
|
||||
RateLimiter: r.ratelimiter,
|
||||
NoMeta: r.options.NoMeta,
|
||||
StopAtFirstMatch: r.options.StopAtFirstMatch,
|
||||
Dialer: r.dialer,
|
||||
}
|
||||
} else if len(t.RequestsDNS) > 0 {
|
||||
template.DNSOptions = &executer.DNSOptions{
|
||||
TraceLog: r.traceLog,
|
||||
Debug: r.options.Debug,
|
||||
Template: t,
|
||||
Writer: r.output,
|
||||
VHost: r.options.Vhost,
|
||||
JSON: r.options.JSON,
|
||||
JSONRequests: r.options.JSONRequests,
|
||||
ColoredOutput: !r.options.NoColor,
|
||||
Colorizer: r.colorizer,
|
||||
Decolorizer: r.decolorizer,
|
||||
NoMeta: r.options.NoMeta,
|
||||
RateLimiter: r.ratelimiter,
|
||||
}
|
||||
}
|
||||
|
||||
if template.DNSOptions != nil || template.HTTPOptions != nil {
|
||||
wtlst = append(wtlst, template)
|
||||
}
|
||||
} else {
|
||||
matches := []string{}
|
||||
|
||||
err := godirwalk.Walk(value, &godirwalk.Options{
|
||||
Callback: func(path string, d *godirwalk.Dirent) error {
|
||||
if !d.IsDir() && strings.HasSuffix(path, ".yaml") {
|
||||
matches = append(matches, path)
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
ErrorCallback: func(path string, err error) godirwalk.ErrorAction {
|
||||
return godirwalk.SkipNode
|
||||
},
|
||||
Unsorted: true,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// 0 matches means no templates were found in directory
|
||||
if len(matches) == 0 {
|
||||
return nil, fmt.Errorf("no match found in the directory %s", value)
|
||||
}
|
||||
|
||||
for _, match := range matches {
|
||||
t, err := templates.Parse(match)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
template := &workflows.Template{Progress: p}
|
||||
if len(t.BulkRequestsHTTP) > 0 {
|
||||
template.HTTPOptions = &executer.HTTPOptions{
|
||||
Debug: r.options.Debug,
|
||||
Writer: r.output,
|
||||
Template: t,
|
||||
Timeout: r.options.Timeout,
|
||||
Retries: r.options.Retries,
|
||||
ProxyURL: r.options.ProxyURL,
|
||||
ProxySocksURL: r.options.ProxySocksURL,
|
||||
RandomAgent: r.options.RandomAgent,
|
||||
CustomHeaders: r.options.CustomHeaders,
|
||||
Vhost: r.options.Vhost,
|
||||
CookieJar: jar,
|
||||
TraceLog: r.traceLog,
|
||||
}
|
||||
} else if len(t.RequestsDNS) > 0 {
|
||||
template.DNSOptions = &executer.DNSOptions{
|
||||
Debug: r.options.Debug,
|
||||
Template: t,
|
||||
Writer: r.output,
|
||||
VHost: r.options.Vhost,
|
||||
TraceLog: r.traceLog,
|
||||
}
|
||||
}
|
||||
if template.DNSOptions != nil || template.HTTPOptions != nil {
|
||||
wtlst = append(wtlst, template)
|
||||
}
|
||||
}
|
||||
}
|
||||
wflTemplatesList = append(wflTemplatesList, workflowTemplates{Name: name, Templates: wtlst})
|
||||
}
|
||||
|
||||
return &wflTemplatesList, nil
|
||||
return results.Load()
|
||||
}
|
||||
|
||||
// resolvePathWithBaseFolder resolves a path with the base folder
|
||||
func resolvePathWithBaseFolder(baseFolder, templateName string) (string, error) {
|
||||
templatePath := path.Join(baseFolder, templateName)
|
||||
if _, err := os.Stat(templatePath); !os.IsNotExist(err) {
|
||||
gologger.Debugf("Found template in current directory: %s\n", templatePath)
|
||||
gologger.Debug().Msgf("Found template in current directory: %s\n", templatePath)
|
||||
return templatePath, nil
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("no such path found: %s", templateName)
|
||||
}
|
||||
|
|
|
@ -2,84 +2,62 @@ package runner
|
|||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/logrusorgru/aurora"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/projectdiscovery/fastdialer/fastdialer"
|
||||
"github.com/projectdiscovery/gologger"
|
||||
"github.com/projectdiscovery/hmap/store/hybrid"
|
||||
"github.com/projectdiscovery/nuclei/v2/internal/bufwriter"
|
||||
"github.com/projectdiscovery/nuclei/v2/internal/collaborator"
|
||||
"github.com/projectdiscovery/nuclei/v2/internal/colorizer"
|
||||
"github.com/projectdiscovery/nuclei/v2/internal/progress"
|
||||
"github.com/projectdiscovery/nuclei/v2/internal/tracelog"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/atomicboolean"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/collaborator"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/colorizer"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/catalogue"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/output"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/projectfile"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/protocols"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/protocols/common/clusterer"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/templates"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/workflows"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/types"
|
||||
"github.com/remeh/sizedwaitgroup"
|
||||
"github.com/rs/xid"
|
||||
"go.uber.org/atomic"
|
||||
"go.uber.org/ratelimit"
|
||||
)
|
||||
|
||||
// Runner is a client for running the enumeration process.
|
||||
type Runner struct {
|
||||
hostMap *hybrid.HybridMap
|
||||
output output.Writer
|
||||
inputCount int64
|
||||
|
||||
traceLog tracelog.Log
|
||||
|
||||
// output is the output file to write if any
|
||||
output *bufwriter.Writer
|
||||
|
||||
templatesConfig *nucleiConfig
|
||||
// options contains configuration options for runner
|
||||
options *Options
|
||||
|
||||
pf *projectfile.ProjectFile
|
||||
|
||||
// progress tracking
|
||||
options *types.Options
|
||||
projectFile *projectfile.ProjectFile
|
||||
catalogue *catalogue.Catalogue
|
||||
progress *progress.Progress
|
||||
|
||||
// output coloring
|
||||
colorizer colorizer.NucleiColorizer
|
||||
decolorizer *regexp.Regexp
|
||||
|
||||
// rate limiter
|
||||
colorizer aurora.Aurora
|
||||
severityColors *colorizer.Colorizer
|
||||
ratelimiter ratelimit.Limiter
|
||||
|
||||
// input deduplication
|
||||
hm *hybrid.HybridMap
|
||||
dialer *fastdialer.Dialer
|
||||
}
|
||||
|
||||
// New creates a new client for running enumeration process.
|
||||
func New(options *Options) (*Runner, error) {
|
||||
func New(options *types.Options) (*Runner, error) {
|
||||
runner := &Runner{
|
||||
traceLog: &tracelog.NoopLogger{},
|
||||
options: options,
|
||||
}
|
||||
if options.TraceLogFile != "" {
|
||||
fileLog, err := tracelog.NewFileLogger(options.TraceLogFile)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not create file trace logger")
|
||||
}
|
||||
runner.traceLog = fileLog
|
||||
}
|
||||
|
||||
if err := runner.updateTemplates(); err != nil {
|
||||
gologger.Labelf("Could not update templates: %s\n", err)
|
||||
gologger.Warning().Msgf("Could not update templates: %s\n", err)
|
||||
}
|
||||
// Read nucleiignore file if given a templateconfig
|
||||
if runner.templatesConfig != nil {
|
||||
runner.readNucleiIgnoreFile()
|
||||
}
|
||||
runner.catalogue = catalogue.New(runner.options.TemplatesDirectory)
|
||||
|
||||
// output coloring
|
||||
useColor := !options.NoColor
|
||||
runner.colorizer = *colorizer.NewNucleiColorizer(aurora.NewAurora(useColor))
|
||||
|
||||
if useColor {
|
||||
// compile a decolorization regex to cleanup file output messages
|
||||
runner.decolorizer = regexp.MustCompile(`\x1B\[[0-9;]*[a-zA-Z]`)
|
||||
}
|
||||
runner.colorizer = aurora.NewAurora(useColor)
|
||||
runner.severityColors = colorizer.New(runner.colorizer)
|
||||
|
||||
if options.TemplateList {
|
||||
runner.listAvailableTemplates()
|
||||
|
@ -89,15 +67,10 @@ func New(options *Options) (*Runner, error) {
|
|||
if (len(options.Templates) == 0 || (options.Targets == "" && !options.Stdin && options.Target == "")) && options.UpdateTemplates {
|
||||
os.Exit(0)
|
||||
}
|
||||
// Read nucleiignore file if given a templateconfig
|
||||
if runner.templatesConfig != nil {
|
||||
runner.readNucleiIgnoreFile()
|
||||
}
|
||||
|
||||
if hm, err := hybrid.New(hybrid.DefaultDiskOptions); err != nil {
|
||||
gologger.Fatalf("Could not create temporary input file: %s\n", err)
|
||||
gologger.Fatal().Msgf("Could not create temporary input file: %s\n", err)
|
||||
} else {
|
||||
runner.hm = hm
|
||||
runner.hostMap = hm
|
||||
}
|
||||
|
||||
runner.inputCount = 0
|
||||
|
@ -107,7 +80,7 @@ func New(options *Options) (*Runner, error) {
|
|||
if options.Target != "" {
|
||||
runner.inputCount++
|
||||
// nolint:errcheck // ignoring error
|
||||
runner.hm.Set(options.Target, nil)
|
||||
runner.hostMap.Set(options.Target, nil)
|
||||
}
|
||||
|
||||
// Handle stdin
|
||||
|
@ -115,20 +88,16 @@ func New(options *Options) (*Runner, error) {
|
|||
scanner := bufio.NewScanner(os.Stdin)
|
||||
for scanner.Scan() {
|
||||
url := strings.TrimSpace(scanner.Text())
|
||||
// skip empty lines
|
||||
if url == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
// skip dupes
|
||||
if _, ok := runner.hm.Get(url); ok {
|
||||
if _, ok := runner.hostMap.Get(url); ok {
|
||||
dupeCount++
|
||||
continue
|
||||
}
|
||||
|
||||
runner.inputCount++
|
||||
// nolint:errcheck // ignoring error
|
||||
runner.hm.Set(url, nil)
|
||||
runner.hostMap.Set(url, nil)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -136,41 +105,35 @@ func New(options *Options) (*Runner, error) {
|
|||
if options.Targets != "" {
|
||||
input, err := os.Open(options.Targets)
|
||||
if err != nil {
|
||||
gologger.Fatalf("Could not open targets file '%s': %s\n", options.Targets, err)
|
||||
gologger.Fatal().Msgf("Could not open targets file '%s': %s\n", options.Targets, err)
|
||||
}
|
||||
scanner := bufio.NewScanner(input)
|
||||
for scanner.Scan() {
|
||||
url := strings.TrimSpace(scanner.Text())
|
||||
// skip empty lines
|
||||
if url == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
// skip dupes
|
||||
if _, ok := runner.hm.Get(url); ok {
|
||||
if _, ok := runner.hostMap.Get(url); ok {
|
||||
dupeCount++
|
||||
continue
|
||||
}
|
||||
|
||||
runner.inputCount++
|
||||
// nolint:errcheck // ignoring error
|
||||
runner.hm.Set(url, nil)
|
||||
runner.hostMap.Set(url, nil)
|
||||
}
|
||||
input.Close()
|
||||
}
|
||||
|
||||
if dupeCount > 0 {
|
||||
gologger.Labelf("Supplied input was automatically deduplicated (%d removed).", dupeCount)
|
||||
gologger.Info().Msgf("Supplied input was automatically deduplicated (%d removed).", dupeCount)
|
||||
}
|
||||
|
||||
// Create the output file if asked
|
||||
if options.Output != "" {
|
||||
output, errBufWriter := bufwriter.New(options.Output)
|
||||
if errBufWriter != nil {
|
||||
gologger.Fatalf("Could not create output file '%s': %s\n", options.Output, errBufWriter)
|
||||
output, err := output.NewStandardWriter(!options.NoColor, options.NoMeta, options.JSON, options.Output, options.TraceLogFile)
|
||||
if err != nil {
|
||||
gologger.Fatal().Msgf("Could not create output file '%s': %s\n", options.Output, err)
|
||||
}
|
||||
runner.output = output
|
||||
}
|
||||
|
||||
// Creates the progress tracking object
|
||||
var progressErr error
|
||||
|
@ -182,7 +145,7 @@ func New(options *Options) (*Runner, error) {
|
|||
// create project file if requested or load existing one
|
||||
if options.Project {
|
||||
var projectFileErr error
|
||||
runner.pf, projectFileErr = projectfile.New(&projectfile.Options{Path: options.ProjectPath, Cleanup: options.ProjectPath == ""})
|
||||
runner.projectFile, projectFileErr = projectfile.New(&projectfile.Options{Path: options.ProjectPath, Cleanup: options.ProjectPath == ""})
|
||||
if projectFileErr != nil {
|
||||
return nil, projectFileErr
|
||||
}
|
||||
|
@ -193,19 +156,11 @@ func New(options *Options) (*Runner, error) {
|
|||
collaborator.DefaultCollaborator.Collab.AddBIID(options.BurpCollaboratorBiid)
|
||||
}
|
||||
|
||||
// Create Dialer
|
||||
var err error
|
||||
runner.dialer, err = fastdialer.NewDialer(fastdialer.DefaultOptions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if options.RateLimit > 0 {
|
||||
runner.ratelimiter = ratelimit.New(options.RateLimit)
|
||||
} else {
|
||||
runner.ratelimiter = ratelimit.NewUnlimited()
|
||||
}
|
||||
|
||||
return runner, nil
|
||||
}
|
||||
|
||||
|
@ -214,9 +169,9 @@ func (r *Runner) Close() {
|
|||
if r.output != nil {
|
||||
r.output.Close()
|
||||
}
|
||||
r.hm.Close()
|
||||
if r.pf != nil {
|
||||
r.pf.Close()
|
||||
r.hostMap.Close()
|
||||
if r.projectFile != nil {
|
||||
r.projectFile.Close()
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -224,8 +179,8 @@ func (r *Runner) Close() {
|
|||
// binary and runs the actual enumeration
|
||||
func (r *Runner) RunEnumeration() {
|
||||
// resolves input templates definitions and any optional exclusion
|
||||
includedTemplates := r.getTemplatesFor(r.options.Templates)
|
||||
excludedTemplates := r.getTemplatesFor(r.options.ExcludedTemplates)
|
||||
includedTemplates := r.catalogue.GetTemplatesPath(r.options.Templates)
|
||||
excludedTemplates := r.catalogue.GetTemplatesPath(r.options.ExcludedTemplates)
|
||||
// defaults to all templates
|
||||
allTemplates := includedTemplates
|
||||
|
||||
|
@ -241,79 +196,107 @@ func (r *Runner) RunEnumeration() {
|
|||
if _, found := excludedMap[incl]; !found {
|
||||
allTemplates = append(allTemplates, incl)
|
||||
} else {
|
||||
gologger.Warningf("Excluding '%s'", incl)
|
||||
gologger.Warning().Msgf("Excluding '%s'", incl)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
executerOpts := &protocols.ExecuterOptions{
|
||||
Output: r.output,
|
||||
Options: r.options,
|
||||
Progress: r.progress,
|
||||
Catalogue: r.catalogue,
|
||||
RateLimiter: r.ratelimiter,
|
||||
ProjectFile: r.projectFile,
|
||||
}
|
||||
// pre-parse all the templates, apply filters
|
||||
finalTemplates := []*templates.Template{}
|
||||
availableTemplates, workflowCount := r.getParsedTemplatesFor(allTemplates, r.options.Severity)
|
||||
templateCount := len(availableTemplates)
|
||||
|
||||
var unclusteredRequests int64 = 0
|
||||
for _, template := range availableTemplates {
|
||||
// workflows will dynamically adjust the totals while running, as
|
||||
// it can't be know in advance which requests will be called
|
||||
if len(template.Workflows) > 0 {
|
||||
continue
|
||||
}
|
||||
unclusteredRequests += int64(template.TotalRequests) * r.inputCount
|
||||
}
|
||||
|
||||
originalTemplatesCount := len(availableTemplates)
|
||||
clusterCount := 0
|
||||
clusters := clusterer.Cluster(availableTemplates)
|
||||
for _, cluster := range clusters {
|
||||
if len(cluster) > 1 {
|
||||
clusterID := fmt.Sprintf("cluster-%s", xid.New().String())
|
||||
|
||||
finalTemplates = append(finalTemplates, &templates.Template{
|
||||
ID: clusterID,
|
||||
RequestsHTTP: cluster[0].RequestsHTTP,
|
||||
Executer: clusterer.NewExecuter(cluster, executerOpts),
|
||||
TotalRequests: len(cluster[0].RequestsHTTP),
|
||||
})
|
||||
clusterCount++
|
||||
} else {
|
||||
finalTemplates = append(finalTemplates, cluster[0])
|
||||
}
|
||||
}
|
||||
|
||||
var totalRequests int64 = 0
|
||||
for _, t := range finalTemplates {
|
||||
if len(t.Workflows) > 0 {
|
||||
continue
|
||||
}
|
||||
totalRequests += int64(t.TotalRequests) * r.inputCount
|
||||
}
|
||||
if totalRequests < unclusteredRequests {
|
||||
gologger.Info().Msgf("Reduced %d requests to %d (%d templates clustered)", unclusteredRequests, totalRequests, clusterCount)
|
||||
}
|
||||
templateCount := originalTemplatesCount
|
||||
hasWorkflows := workflowCount > 0
|
||||
|
||||
// 0 matches means no templates were found in directory
|
||||
if templateCount == 0 {
|
||||
gologger.Fatalf("Error, no templates were found.\n")
|
||||
gologger.Fatal().Msgf("Error, no templates were found.\n")
|
||||
}
|
||||
|
||||
gologger.Infof("Using %s rules (%s templates, %s workflows)",
|
||||
r.colorizer.Colorizer.Bold(templateCount).String(),
|
||||
r.colorizer.Colorizer.Bold(templateCount-workflowCount).String(),
|
||||
r.colorizer.Colorizer.Bold(workflowCount).String())
|
||||
gologger.Info().Msgf("Using %s rules (%s templates, %s workflows)",
|
||||
r.colorizer.Bold(templateCount).String(),
|
||||
r.colorizer.Bold(templateCount-workflowCount).String(),
|
||||
r.colorizer.Bold(workflowCount).String())
|
||||
|
||||
// precompute total request count
|
||||
var totalRequests int64 = 0
|
||||
|
||||
for _, t := range availableTemplates {
|
||||
switch av := t.(type) {
|
||||
case *templates.Template:
|
||||
totalRequests += (av.GetHTTPRequestCount() + av.GetDNSRequestCount()) * r.inputCount
|
||||
case *workflows.Workflow:
|
||||
// workflows will dynamically adjust the totals while running, as
|
||||
// it can't be know in advance which requests will be called
|
||||
} // nolint:wsl // comment
|
||||
}
|
||||
|
||||
results := atomicboolean.New()
|
||||
results := &atomic.Bool{}
|
||||
wgtemplates := sizedwaitgroup.New(r.options.TemplateThreads)
|
||||
// Starts polling or ignore
|
||||
collaborator.DefaultCollaborator.Poll()
|
||||
|
||||
if r.inputCount == 0 {
|
||||
gologger.Errorf("Could not find any valid input URLs.")
|
||||
gologger.Error().Msgf("Could not find any valid input URLs.")
|
||||
} else if totalRequests > 0 || hasWorkflows {
|
||||
// tracks global progress and captures stdout/stderr until p.Wait finishes
|
||||
p := r.progress
|
||||
p.Init(r.inputCount, templateCount, totalRequests)
|
||||
r.progress.Init(r.inputCount, templateCount, totalRequests)
|
||||
|
||||
for _, t := range availableTemplates {
|
||||
for _, t := range finalTemplates {
|
||||
wgtemplates.Add()
|
||||
go func(template interface{}) {
|
||||
go func(template *templates.Template) {
|
||||
defer wgtemplates.Done()
|
||||
switch tt := template.(type) {
|
||||
case *templates.Template:
|
||||
for _, request := range tt.RequestsDNS {
|
||||
results.Or(r.processTemplateWithList(p, tt, request))
|
||||
}
|
||||
for _, request := range tt.BulkRequestsHTTP {
|
||||
results.Or(r.processTemplateWithList(p, tt, request))
|
||||
}
|
||||
case *workflows.Workflow:
|
||||
results.Or(r.processWorkflowWithList(p, template.(*workflows.Workflow)))
|
||||
|
||||
if len(template.Workflows) > 0 {
|
||||
results.CAS(false, r.processWorkflowWithList(template))
|
||||
} else {
|
||||
results.CAS(false, r.processTemplateWithList(template))
|
||||
}
|
||||
}(t)
|
||||
}
|
||||
|
||||
wgtemplates.Wait()
|
||||
p.Stop()
|
||||
r.progress.Stop()
|
||||
}
|
||||
|
||||
if !results.Get() {
|
||||
if !results.Load() {
|
||||
if r.output != nil {
|
||||
r.output.Close()
|
||||
os.Remove(r.options.Output)
|
||||
}
|
||||
|
||||
gologger.Infof("No results found. Happy hacking!")
|
||||
gologger.Info().Msgf("No results found. Better luck next time!")
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,209 +1,80 @@
|
|||
package runner
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/karrick/godirwalk"
|
||||
"github.com/projectdiscovery/gologger"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/protocols"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/templates"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/workflows"
|
||||
)
|
||||
|
||||
// getTemplatesFor parses the specified input template definitions and returns a list of unique, absolute template paths.
|
||||
func (r *Runner) getTemplatesFor(definitions []string) []string {
|
||||
// keeps track of processed dirs and files
|
||||
processed := make(map[string]bool)
|
||||
allTemplates := []string{}
|
||||
|
||||
// parses user input, handle file/directory cases and produce a list of unique templates
|
||||
for _, t := range definitions {
|
||||
var absPath string
|
||||
|
||||
var err error
|
||||
|
||||
if strings.Contains(t, "*") {
|
||||
dirs := strings.Split(t, "/")
|
||||
priorDir := strings.Join(dirs[:len(dirs)-1], "/")
|
||||
absPath, err = r.resolvePathIfRelative(priorDir)
|
||||
absPath += "/" + dirs[len(dirs)-1]
|
||||
} else {
|
||||
// resolve and convert relative to absolute path
|
||||
absPath, err = r.resolvePathIfRelative(t)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
gologger.Errorf("Could not find template file '%s': %s\n", t, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Template input includes a wildcard
|
||||
if strings.Contains(absPath, "*") {
|
||||
var matches []string
|
||||
matches, err = filepath.Glob(absPath)
|
||||
|
||||
if err != nil {
|
||||
gologger.Labelf("Wildcard found, but unable to glob '%s': %s\n", absPath, err)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
// couldn't find templates in directory
|
||||
if len(matches) == 0 {
|
||||
gologger.Labelf("Error, no templates were found with '%s'.\n", absPath)
|
||||
continue
|
||||
} else {
|
||||
gologger.Labelf("Identified %d templates\n", len(matches))
|
||||
}
|
||||
|
||||
for _, match := range matches {
|
||||
if !r.checkIfInNucleiIgnore(match) {
|
||||
processed[match] = true
|
||||
|
||||
allTemplates = append(allTemplates, match)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// determine file/directory
|
||||
isFile, err := isFilePath(absPath)
|
||||
if err != nil {
|
||||
gologger.Errorf("Could not stat '%s': %s\n", absPath, err)
|
||||
continue
|
||||
}
|
||||
// test for uniqueness
|
||||
if !isNewPath(absPath, processed) {
|
||||
continue
|
||||
}
|
||||
// mark this absolute path as processed
|
||||
// - if it's a file, we'll never process it again
|
||||
// - if it's a dir, we'll never walk it again
|
||||
processed[absPath] = true
|
||||
|
||||
if isFile {
|
||||
allTemplates = append(allTemplates, absPath)
|
||||
} else {
|
||||
matches := []string{}
|
||||
|
||||
// Recursively walk down the Templates directory and run all the template file checks
|
||||
err := directoryWalker(
|
||||
absPath,
|
||||
func(path string, d *godirwalk.Dirent) error {
|
||||
if !d.IsDir() && strings.HasSuffix(path, ".yaml") {
|
||||
if !r.checkIfInNucleiIgnore(path) && isNewPath(path, processed) {
|
||||
matches = append(matches, path)
|
||||
processed[path] = true
|
||||
}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
)
|
||||
|
||||
// directory couldn't be walked
|
||||
if err != nil {
|
||||
gologger.Labelf("Could not find templates in directory '%s': %s\n", absPath, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// couldn't find templates in directory
|
||||
if len(matches) == 0 {
|
||||
gologger.Labelf("Error, no templates were found in '%s'.\n", absPath)
|
||||
continue
|
||||
}
|
||||
|
||||
allTemplates = append(allTemplates, matches...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return allTemplates
|
||||
}
|
||||
|
||||
// getParsedTemplatesFor parse the specified templates and returns a slice of the parsable ones, optionally filtered
|
||||
// by severity, along with a flag indicating if workflows are present.
|
||||
func (r *Runner) getParsedTemplatesFor(templatePaths []string, severities string) (parsedTemplates []interface{}, workflowCount int) {
|
||||
workflowCount = 0
|
||||
severities = strings.ToLower(severities)
|
||||
allSeverities := strings.Split(severities, ",")
|
||||
func (r *Runner) getParsedTemplatesFor(templatePaths []string, severities []string) (map[string]*templates.Template, int) {
|
||||
workflowCount := 0
|
||||
filterBySeverity := len(severities) > 0
|
||||
|
||||
gologger.Infof("Loading templates...")
|
||||
gologger.Info().Msgf("Loading templates...")
|
||||
|
||||
parsedTemplates := make(map[string]*templates.Template)
|
||||
for _, match := range templatePaths {
|
||||
t, err := r.parseTemplateFile(match)
|
||||
switch tp := t.(type) {
|
||||
case *templates.Template:
|
||||
// only include if severity matches or no severity filtering
|
||||
sev := strings.ToLower(tp.Info["severity"])
|
||||
if !filterBySeverity || hasMatchingSeverity(sev, allSeverities) {
|
||||
parsedTemplates = append(parsedTemplates, tp)
|
||||
gologger.Infof("%s\n", r.templateLogMsg(tp.ID, tp.Info["name"], tp.Info["author"], tp.Info["severity"]))
|
||||
} else {
|
||||
gologger.Warningf("Excluding template %s due to severity filter (%s not in [%s])", tp.ID, sev, severities)
|
||||
if err != nil {
|
||||
gologger.Error().Msgf("Could not parse file '%s': %s\n", match, err)
|
||||
continue
|
||||
}
|
||||
case *workflows.Workflow:
|
||||
parsedTemplates = append(parsedTemplates, tp)
|
||||
gologger.Infof("%s\n", r.templateLogMsg(tp.ID, tp.Info["name"], tp.Info["author"], tp.Info["severity"]))
|
||||
if len(t.Workflows) > 0 {
|
||||
workflowCount++
|
||||
default:
|
||||
gologger.Errorf("Could not parse file '%s': %s\n", match, err)
|
||||
}
|
||||
sev := strings.ToLower(t.Info["severity"])
|
||||
if !filterBySeverity || hasMatchingSeverity(sev, severities) {
|
||||
parsedTemplates[t.ID] = t
|
||||
gologger.Info().Msgf("%s\n", r.templateLogMsg(t.ID, t.Info["name"], t.Info["author"], t.Info["severity"]))
|
||||
} else {
|
||||
gologger.Error().Msgf("Excluding template %s due to severity filter (%s not in [%s])", t.ID, sev, severities)
|
||||
}
|
||||
}
|
||||
|
||||
return parsedTemplates, workflowCount
|
||||
}
|
||||
|
||||
func (r *Runner) parseTemplateFile(file string) (interface{}, error) {
|
||||
// check if it's a template
|
||||
template, errTemplate := templates.Parse(file)
|
||||
if errTemplate == nil {
|
||||
// parseTemplateFile returns the parsed template file
|
||||
func (r *Runner) parseTemplateFile(file string) (*templates.Template, error) {
|
||||
executerOpts := &protocols.ExecuterOptions{
|
||||
Output: r.output,
|
||||
Options: r.options,
|
||||
Progress: r.progress,
|
||||
Catalogue: r.catalogue,
|
||||
RateLimiter: r.ratelimiter,
|
||||
ProjectFile: r.projectFile,
|
||||
}
|
||||
template, err := templates.Parse(file, executerOpts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return template, nil
|
||||
}
|
||||
|
||||
// check if it's a workflow
|
||||
workflow, errWorkflow := workflows.Parse(file)
|
||||
if errWorkflow == nil {
|
||||
return workflow, nil
|
||||
}
|
||||
|
||||
if errTemplate != nil {
|
||||
return nil, errTemplate
|
||||
}
|
||||
|
||||
if errWorkflow != nil {
|
||||
return nil, errWorkflow
|
||||
}
|
||||
|
||||
return nil, errors.New("unknown error occurred")
|
||||
}
|
||||
|
||||
func (r *Runner) templateLogMsg(id, name, author, severity string) string {
|
||||
// Display the message for the template
|
||||
message := fmt.Sprintf("[%s] %s (%s)",
|
||||
r.colorizer.Colorizer.BrightBlue(id).String(),
|
||||
r.colorizer.Colorizer.Bold(name).String(),
|
||||
r.colorizer.Colorizer.BrightYellow("@"+author).String())
|
||||
|
||||
r.colorizer.BrightBlue(id).String(),
|
||||
r.colorizer.Bold(name).String(),
|
||||
r.colorizer.BrightYellow("@"+author).String())
|
||||
if severity != "" {
|
||||
message += " [" + r.colorizer.GetColorizedSeverity(severity) + "]"
|
||||
message += " [" + r.severityColors.Data[severity] + "]"
|
||||
}
|
||||
|
||||
return message
|
||||
}
|
||||
|
||||
func (r *Runner) logAvailableTemplate(tplPath string) {
|
||||
t, err := r.parseTemplateFile(tplPath)
|
||||
if t != nil {
|
||||
switch tp := t.(type) {
|
||||
case *templates.Template:
|
||||
gologger.Silentf("%s\n", r.templateLogMsg(tp.ID, tp.Info["name"], tp.Info["author"], tp.Info["severity"]))
|
||||
case *workflows.Workflow:
|
||||
gologger.Silentf("%s\n", r.templateLogMsg(tp.ID, tp.Info["name"], tp.Info["author"], tp.Info["severity"]))
|
||||
default:
|
||||
gologger.Errorf("Could not parse file '%s': %s\n", tplPath, err)
|
||||
}
|
||||
if err != nil {
|
||||
gologger.Error().Msgf("Could not parse file '%s': %s\n", tplPath, err)
|
||||
} else {
|
||||
gologger.Print().Msgf("%s\n", r.templateLogMsg(t.ID, t.Info["name"], t.Info["author"], t.Info["severity"]))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -214,11 +85,11 @@ func (r *Runner) listAvailableTemplates() {
|
|||
}
|
||||
|
||||
if _, err := os.Stat(r.templatesConfig.TemplatesDirectory); os.IsNotExist(err) {
|
||||
gologger.Errorf("%s does not exists", r.templatesConfig.TemplatesDirectory)
|
||||
gologger.Error().Msgf("%s does not exists", r.templatesConfig.TemplatesDirectory)
|
||||
return
|
||||
}
|
||||
|
||||
gologger.Silentf(
|
||||
gologger.Print().Msgf(
|
||||
"\nListing available v.%s nuclei templates for %s",
|
||||
r.templatesConfig.CurrentVersion,
|
||||
r.templatesConfig.TemplatesDirectory,
|
||||
|
@ -227,42 +98,26 @@ func (r *Runner) listAvailableTemplates() {
|
|||
r.templatesConfig.TemplatesDirectory,
|
||||
func(path string, d *godirwalk.Dirent) error {
|
||||
if d.IsDir() && path != r.templatesConfig.TemplatesDirectory {
|
||||
gologger.Silentf("\n%s:\n\n", r.colorizer.Colorizer.Bold(r.colorizer.Colorizer.BgBrightBlue(d.Name())).String())
|
||||
gologger.Print().Msgf("\n%s:\n\n", r.colorizer.Bold(r.colorizer.BgBrightBlue(d.Name())).String())
|
||||
} else if strings.HasSuffix(path, ".yaml") {
|
||||
r.logAvailableTemplate(path)
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
)
|
||||
|
||||
// directory couldn't be walked
|
||||
if err != nil {
|
||||
gologger.Labelf("Could not find templates in directory '%s': %s\n", r.templatesConfig.TemplatesDirectory, err)
|
||||
gologger.Error().Msgf("Could not find templates in directory '%s': %s\n", r.templatesConfig.TemplatesDirectory, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Runner) resolvePathIfRelative(filePath string) (string, error) {
|
||||
if isRelative(filePath) {
|
||||
newPath, err := r.resolvePath(filePath)
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return newPath, nil
|
||||
}
|
||||
|
||||
return filePath, nil
|
||||
}
|
||||
|
||||
func hasMatchingSeverity(templateSeverity string, allowedSeverities []string) bool {
|
||||
for _, s := range allowedSeverities {
|
||||
s = strings.ToLower(s)
|
||||
if s != "" && strings.HasPrefix(templateSeverity, s) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
|
@ -294,9 +149,8 @@ func isFilePath(filePath string) (bool, error) {
|
|||
|
||||
func isNewPath(filePath string, pathMap map[string]bool) bool {
|
||||
if _, already := pathMap[filePath]; already {
|
||||
gologger.Warningf("Skipping already specified path '%s'", filePath)
|
||||
gologger.Warning().Msgf("Skipping already specified path '%s'", filePath)
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
|
|
@ -2,8 +2,11 @@ package runner
|
|||
|
||||
import (
|
||||
"archive/zip"
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
@ -12,11 +15,13 @@ import (
|
|||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/blang/semver"
|
||||
"github.com/google/go-github/v32/github"
|
||||
"github.com/olekukonko/tablewriter"
|
||||
"github.com/projectdiscovery/gologger"
|
||||
)
|
||||
|
||||
|
@ -47,42 +52,36 @@ func (r *Runner) updateTemplates() error {
|
|||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
if r.templatesConfig == nil || (r.options.TemplatesDirectory != "" && r.templatesConfig.TemplatesDirectory != r.options.TemplatesDirectory) {
|
||||
if !r.options.UpdateTemplates {
|
||||
gologger.Labelf("nuclei-templates are not installed, use update-templates flag.\n")
|
||||
gologger.Warning().Msgf("nuclei-templates are not installed, use update-templates flag.\n")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Use custom location if user has given a template directory
|
||||
if r.options.TemplatesDirectory != "" {
|
||||
home = r.options.TemplatesDirectory
|
||||
}
|
||||
|
||||
r.templatesConfig = &nucleiConfig{TemplatesDirectory: path.Join(home, "nuclei-templates")}
|
||||
if r.options.TemplatesDirectory != "" && r.options.TemplatesDirectory != path.Join(home, "nuclei-templates") {
|
||||
r.templatesConfig.TemplatesDirectory = r.options.TemplatesDirectory
|
||||
}
|
||||
|
||||
// Download the repository and also write the revision to a HEAD file.
|
||||
version, asset, getErr := r.getLatestReleaseFromGithub()
|
||||
if getErr != nil {
|
||||
return getErr
|
||||
}
|
||||
gologger.Verbose().Msgf("Downloading nuclei-templates (v%s) to %s\n", version.String(), r.templatesConfig.TemplatesDirectory)
|
||||
|
||||
gologger.Verbosef("Downloading nuclei-templates (v%s) to %s\n", "update-templates", version.String(), r.templatesConfig.TemplatesDirectory)
|
||||
|
||||
err = r.downloadReleaseAndUnzip(ctx, asset.GetZipballURL())
|
||||
err = r.downloadReleaseAndUnzip(ctx, version.String(), asset.GetZipballURL())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.templatesConfig.CurrentVersion = version.String()
|
||||
|
||||
err = r.writeConfiguration(r.templatesConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
gologger.Infof("Successfully downloaded nuclei-templates (v%s). Enjoy!\n", version.String())
|
||||
|
||||
gologger.Info().Msgf("Successfully downloaded nuclei-templates (v%s). Enjoy!\n", version.String())
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -95,17 +94,14 @@ func (r *Runner) updateTemplates() error {
|
|||
// Get the configuration currently on disk.
|
||||
verText := r.templatesConfig.CurrentVersion
|
||||
indices := reVersion.FindStringIndex(verText)
|
||||
|
||||
if indices == nil {
|
||||
return fmt.Errorf("invalid release found with tag %s", err)
|
||||
}
|
||||
|
||||
if indices[0] > 0 {
|
||||
verText = verText[indices[0]:]
|
||||
}
|
||||
|
||||
oldVersion, err := semver.Make(verText)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -116,26 +112,23 @@ func (r *Runner) updateTemplates() error {
|
|||
}
|
||||
|
||||
if version.EQ(oldVersion) {
|
||||
gologger.Infof("Your nuclei-templates are up to date: v%s\n", oldVersion.String())
|
||||
gologger.Info().Msgf("Your nuclei-templates are up to date: v%s\n", oldVersion.String())
|
||||
return r.writeConfiguration(r.templatesConfig)
|
||||
}
|
||||
|
||||
if version.GT(oldVersion) {
|
||||
if !r.options.UpdateTemplates {
|
||||
gologger.Labelf("Your current nuclei-templates v%s are outdated. Latest is v%s\n", oldVersion, version.String())
|
||||
gologger.Warning().Msgf("Your current nuclei-templates v%s are outdated. Latest is v%s\n", oldVersion, version.String())
|
||||
return r.writeConfiguration(r.templatesConfig)
|
||||
}
|
||||
|
||||
if r.options.TemplatesDirectory != "" {
|
||||
home = r.options.TemplatesDirectory
|
||||
r.templatesConfig.TemplatesDirectory = path.Join(home, "nuclei-templates")
|
||||
r.templatesConfig.TemplatesDirectory = r.options.TemplatesDirectory
|
||||
}
|
||||
|
||||
r.templatesConfig.CurrentVersion = version.String()
|
||||
|
||||
gologger.Verbosef("Downloading nuclei-templates (v%s) to %s\n", "update-templates", version.String(), r.templatesConfig.TemplatesDirectory)
|
||||
|
||||
err = r.downloadReleaseAndUnzip(ctx, asset.GetZipballURL())
|
||||
gologger.Verbose().Msgf("Downloading nuclei-templates (v%s) to %s\n", version.String(), r.templatesConfig.TemplatesDirectory)
|
||||
err = r.downloadReleaseAndUnzip(ctx, version.String(), asset.GetZipballURL())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -144,10 +137,8 @@ func (r *Runner) updateTemplates() error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
gologger.Infof("Successfully updated nuclei-templates (v%s). Enjoy!\n", version.String())
|
||||
gologger.Info().Msgf("Successfully updated nuclei-templates (v%s). Enjoy!\n", version.String())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -162,17 +153,13 @@ func (r *Runner) getLatestReleaseFromGithub() (semver.Version, *github.Repositor
|
|||
|
||||
// Find the most recent version based on semantic versioning.
|
||||
var latestRelease semver.Version
|
||||
|
||||
var latestPublish *github.RepositoryRelease
|
||||
|
||||
for _, release := range rels {
|
||||
verText := release.GetTagName()
|
||||
indices := reVersion.FindStringIndex(verText)
|
||||
|
||||
if indices == nil {
|
||||
return semver.Version{}, nil, fmt.Errorf("invalid release found with tag %s", err)
|
||||
}
|
||||
|
||||
if indices[0] > 0 {
|
||||
verText = verText[indices[0]:]
|
||||
}
|
||||
|
@ -187,16 +174,14 @@ func (r *Runner) getLatestReleaseFromGithub() (semver.Version, *github.Repositor
|
|||
latestPublish = release
|
||||
}
|
||||
}
|
||||
|
||||
if latestPublish == nil {
|
||||
return semver.Version{}, nil, errors.New("no version found for the templates")
|
||||
}
|
||||
|
||||
return latestRelease, latestPublish, nil
|
||||
}
|
||||
|
||||
// downloadReleaseAndUnzip downloads and unzips the release in a directory
|
||||
func (r *Runner) downloadReleaseAndUnzip(ctx context.Context, downloadURL string) error {
|
||||
func (r *Runner) downloadReleaseAndUnzip(ctx context.Context, version, downloadURL string) error {
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, downloadURL, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create HTTP request to %s: %s", downloadURL, err)
|
||||
|
@ -207,7 +192,6 @@ func (r *Runner) downloadReleaseAndUnzip(ctx context.Context, downloadURL string
|
|||
return fmt.Errorf("failed to download a release file from %s: %s", downloadURL, err)
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
if res.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("failed to download a release file from %s: Not successful status %d", downloadURL, res.StatusCode)
|
||||
}
|
||||
|
@ -219,7 +203,6 @@ func (r *Runner) downloadReleaseAndUnzip(ctx context.Context, downloadURL string
|
|||
|
||||
reader := bytes.NewReader(buf)
|
||||
z, err := zip.NewReader(reader, reader.Size())
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to uncompress zip file: %s", err)
|
||||
}
|
||||
|
@ -230,23 +213,41 @@ func (r *Runner) downloadReleaseAndUnzip(ctx context.Context, downloadURL string
|
|||
return fmt.Errorf("failed to create template base folder: %s", err)
|
||||
}
|
||||
|
||||
totalCount := 0
|
||||
additions, deletions, modifications := []string{}, []string{}, []string{}
|
||||
// We use file-checksums that are md5 hashes to store the list of files->hashes
|
||||
// that have been downloaded previously.
|
||||
// If the path isn't found in new update after being read from the previous checksum,
|
||||
// it is removed. This allows us fine-grained control over the download process
|
||||
// as well as solves a long problem with nuclei-template updates.
|
||||
checksumFile := path.Join(r.templatesConfig.TemplatesDirectory, ".checksum")
|
||||
previousChecksum := readPreviousTemplatesChecksum(checksumFile)
|
||||
checksums := make(map[string]string)
|
||||
for _, file := range z.File {
|
||||
directory, name := filepath.Split(file.Name)
|
||||
if name == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
paths := strings.Split(directory, "/")
|
||||
finalPath := strings.Join(paths[1:], "/")
|
||||
|
||||
if strings.HasPrefix(name, ".") || strings.HasPrefix(finalPath, ".") || strings.EqualFold(name, "README.md") {
|
||||
continue
|
||||
}
|
||||
totalCount++
|
||||
templateDirectory := path.Join(r.templatesConfig.TemplatesDirectory, finalPath)
|
||||
err = os.MkdirAll(templateDirectory, os.ModePerm)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create template folder %s : %s", templateDirectory, err)
|
||||
}
|
||||
|
||||
f, err := os.OpenFile(path.Join(templateDirectory, name), os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0777)
|
||||
templatePath := path.Join(templateDirectory, name)
|
||||
|
||||
isAddition := false
|
||||
if _, err := os.Stat(templatePath); os.IsNotExist(err) {
|
||||
isAddition = true
|
||||
}
|
||||
f, err := os.OpenFile(templatePath, os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0777)
|
||||
if err != nil {
|
||||
f.Close()
|
||||
return fmt.Errorf("could not create uncompressed file: %s", err)
|
||||
|
@ -257,15 +258,129 @@ func (r *Runner) downloadReleaseAndUnzip(ctx context.Context, downloadURL string
|
|||
f.Close()
|
||||
return fmt.Errorf("could not open archive to extract file: %s", err)
|
||||
}
|
||||
hasher := md5.New()
|
||||
|
||||
_, err = io.Copy(f, reader)
|
||||
// Save file and also read into hasher for md5
|
||||
_, err = io.Copy(f, io.TeeReader(reader, hasher))
|
||||
if err != nil {
|
||||
f.Close()
|
||||
return fmt.Errorf("could not write template file: %s", err)
|
||||
}
|
||||
|
||||
f.Close()
|
||||
|
||||
if isAddition {
|
||||
additions = append(additions, path.Join(finalPath, name))
|
||||
} else {
|
||||
modifications = append(modifications, path.Join(finalPath, name))
|
||||
}
|
||||
checksums[templatePath] = hex.EncodeToString(hasher.Sum(nil))
|
||||
}
|
||||
|
||||
// If we don't find a previous file in new download and it hasn't been
|
||||
// changed on the disk, delete it.
|
||||
if previousChecksum != nil {
|
||||
for k, v := range previousChecksum {
|
||||
_, ok := checksums[k]
|
||||
if !ok && v[0] == v[1] {
|
||||
os.Remove(k)
|
||||
deletions = append(deletions, strings.TrimPrefix(strings.TrimPrefix(k, r.templatesConfig.TemplatesDirectory), "/"))
|
||||
}
|
||||
}
|
||||
}
|
||||
r.printUpdateChangelog(additions, modifications, deletions, version, totalCount)
|
||||
return writeTemplatesChecksum(checksumFile, checksums)
|
||||
}
|
||||
|
||||
// readPreviousTemplatesChecksum reads the previous checksum file from the disk.
|
||||
//
|
||||
// It reads two checksums, the first checksum is what we expect and the second is
|
||||
// the actual checksum of the file on disk currently.
|
||||
func readPreviousTemplatesChecksum(file string) map[string][2]string {
|
||||
f, err := os.Open(file)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
defer f.Close()
|
||||
scanner := bufio.NewScanner(f)
|
||||
|
||||
checksum := make(map[string][2]string)
|
||||
for scanner.Scan() {
|
||||
text := scanner.Text()
|
||||
if text == "" {
|
||||
continue
|
||||
}
|
||||
parts := strings.Split(text, ",")
|
||||
if len(parts) < 2 {
|
||||
continue
|
||||
}
|
||||
values := [2]string{parts[1]}
|
||||
|
||||
f, err := os.Open(parts[0])
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
hasher := md5.New()
|
||||
if _, err := io.Copy(hasher, f); err != nil {
|
||||
f.Close()
|
||||
continue
|
||||
}
|
||||
f.Close()
|
||||
|
||||
values[1] = hex.EncodeToString(hasher.Sum(nil))
|
||||
checksum[parts[0]] = values
|
||||
}
|
||||
return checksum
|
||||
}
|
||||
|
||||
// writeTemplatesChecksum writes the nuclei-templates checksum data to disk.
|
||||
func writeTemplatesChecksum(file string, checksum map[string]string) error {
|
||||
f, err := os.Create(file)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
for k, v := range checksum {
|
||||
f.WriteString(k)
|
||||
f.WriteString(",")
|
||||
f.WriteString(v)
|
||||
f.WriteString("\n")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Runner) printUpdateChangelog(additions, modifications, deletions []string, version string, totalCount int) {
|
||||
if len(additions) > 0 {
|
||||
gologger.Print().Msgf("\nNew additions: \n\n")
|
||||
|
||||
for _, addition := range additions {
|
||||
gologger.Print().Msgf("%s", addition)
|
||||
}
|
||||
}
|
||||
if len(modifications) > 0 {
|
||||
gologger.Print().Msgf("\nModifications: \n\n")
|
||||
|
||||
for _, modification := range modifications {
|
||||
gologger.Print().Msgf("%s", modification)
|
||||
}
|
||||
}
|
||||
if len(deletions) > 0 {
|
||||
gologger.Print().Msgf("\nDeletions: \n\n")
|
||||
|
||||
for _, deletion := range deletions {
|
||||
gologger.Print().Msgf("%s", deletion)
|
||||
}
|
||||
}
|
||||
|
||||
gologger.Print().Msgf("\nNuclei Templates v%s Changelog\n", version)
|
||||
data := [][]string{
|
||||
{strconv.Itoa(totalCount), strconv.Itoa(len(additions)), strconv.Itoa(len(modifications)), strconv.Itoa(len(deletions))},
|
||||
}
|
||||
table := tablewriter.NewWriter(os.Stdout)
|
||||
table.SetHeader([]string{"Total", "New", "Modifications", "Deletions"})
|
||||
for _, v := range data {
|
||||
table.Append(v)
|
||||
}
|
||||
table.Render()
|
||||
}
|
||||
|
|
|
@ -1,76 +0,0 @@
|
|||
package tracelog
|
||||
|
||||
import (
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
)
|
||||
|
||||
// Log is an interface for logging trace log of all the requests
|
||||
type Log interface {
|
||||
// Close closes the log interface flushing data
|
||||
Close()
|
||||
// Request writes a log the requests trace log
|
||||
Request(templateID, url, requestType string, err error)
|
||||
}
|
||||
|
||||
// NoopLogger is a noop logger that simply does nothing
|
||||
type NoopLogger struct{}
|
||||
|
||||
// Close closes the log interface flushing data
|
||||
func (n *NoopLogger) Close() {}
|
||||
|
||||
// Request writes a log the requests trace log
|
||||
func (n *NoopLogger) Request(templateID, url, requestType string, err error) {}
|
||||
|
||||
// FileLogger is a trace logger that writes request logs to a file.
|
||||
type FileLogger struct {
|
||||
encoder *jsoniter.Encoder
|
||||
file *os.File
|
||||
mutex *sync.Mutex
|
||||
}
|
||||
|
||||
// NewFileLogger creates a new file logger structure
|
||||
func NewFileLogger(path string) (*FileLogger, error) {
|
||||
file, err := os.Create(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &FileLogger{file: file, encoder: jsoniter.NewEncoder(file), mutex: &sync.Mutex{}}, nil
|
||||
}
|
||||
|
||||
// Close closes the log interface flushing data
|
||||
func (f *FileLogger) Close() {
|
||||
f.mutex.Lock()
|
||||
defer f.mutex.Unlock()
|
||||
|
||||
f.file.Close()
|
||||
}
|
||||
|
||||
// JSONRequest is a trace log request written to file
|
||||
type JSONRequest struct {
|
||||
ID string `json:"id"`
|
||||
URL string `json:"url"`
|
||||
Error string `json:"error"`
|
||||
Type string `json:"type"`
|
||||
}
|
||||
|
||||
// Request writes a log the requests trace log
|
||||
func (f *FileLogger) Request(templateID, url, requestType string, err error) {
|
||||
request := &JSONRequest{
|
||||
ID: templateID,
|
||||
URL: url,
|
||||
Type: requestType,
|
||||
}
|
||||
if err != nil {
|
||||
request.Error = err.Error()
|
||||
} else {
|
||||
request.Error = "none"
|
||||
}
|
||||
|
||||
f.mutex.Lock()
|
||||
defer f.mutex.Unlock()
|
||||
//nolint:errcheck // We don't need to do anything here
|
||||
f.encoder.Encode(request)
|
||||
}
|
|
@ -1,42 +0,0 @@
|
|||
package atomicboolean
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
type AtomBool struct {
|
||||
sync.RWMutex
|
||||
flag bool
|
||||
}
|
||||
|
||||
func New() *AtomBool {
|
||||
return &AtomBool{}
|
||||
}
|
||||
|
||||
func (b *AtomBool) Or(value bool) {
|
||||
b.Lock()
|
||||
defer b.Unlock()
|
||||
|
||||
b.flag = b.flag || value
|
||||
}
|
||||
|
||||
func (b *AtomBool) And(value bool) {
|
||||
b.Lock()
|
||||
defer b.Unlock()
|
||||
|
||||
b.flag = b.flag && value
|
||||
}
|
||||
|
||||
func (b *AtomBool) Set(value bool) {
|
||||
b.Lock()
|
||||
defer b.Unlock()
|
||||
|
||||
b.flag = value
|
||||
}
|
||||
|
||||
func (b *AtomBool) Get() bool {
|
||||
b.RLock()
|
||||
defer b.RUnlock() //nolint
|
||||
|
||||
return b.flag
|
||||
}
|
|
@ -0,0 +1,14 @@
|
|||
package catalogue
|
||||
|
||||
// Catalogue is a template catalouge helper implementation
|
||||
type Catalogue struct {
|
||||
ignoreFiles []string
|
||||
templatesDirectory string
|
||||
}
|
||||
|
||||
// New creates a new catalogue structure using provided input items
|
||||
func New(directory string) *Catalogue {
|
||||
catalogue := &Catalogue{templatesDirectory: directory}
|
||||
catalogue.readNucleiIgnoreFile()
|
||||
return catalogue
|
||||
}
|
|
@ -0,0 +1,155 @@
|
|||
package catalogue
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/karrick/godirwalk"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/projectdiscovery/gologger"
|
||||
)
|
||||
|
||||
// GetTemplatesPath returns a list of absolute paths for the provided template list.
|
||||
func (c *Catalogue) GetTemplatesPath(definitions []string) []string {
|
||||
// keeps track of processed dirs and files
|
||||
processed := make(map[string]bool)
|
||||
allTemplates := []string{}
|
||||
|
||||
for _, t := range definitions {
|
||||
paths, err := c.GetTemplatePath(t)
|
||||
if err != nil {
|
||||
gologger.Error().Msgf("Could not find template '%s': %s\n", t, err)
|
||||
}
|
||||
for _, path := range paths {
|
||||
if _, ok := processed[path]; !ok {
|
||||
processed[path] = true
|
||||
allTemplates = append(allTemplates, path)
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(allTemplates) > 0 {
|
||||
gologger.Verbose().Msgf("Identified %d templates", len(allTemplates))
|
||||
}
|
||||
return allTemplates
|
||||
}
|
||||
|
||||
// GetTemplatePath parses the specified input template path and returns a compiled
|
||||
// list of finished absolute paths to the templates evaluating any glob patterns
|
||||
// or folders provided as in.
|
||||
func (c *Catalogue) GetTemplatePath(target string) ([]string, error) {
|
||||
processed := make(map[string]struct{})
|
||||
|
||||
absPath, err := c.convertPathToAbsolute(target)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not find template file")
|
||||
}
|
||||
|
||||
// Template input includes a wildcard
|
||||
if strings.Contains(absPath, "*") {
|
||||
matches, err := c.findGlobPathMatches(absPath, processed)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not find glob matches")
|
||||
}
|
||||
if len(matches) == 0 {
|
||||
return nil, errors.Errorf("no templates found for path")
|
||||
}
|
||||
return matches, nil
|
||||
}
|
||||
|
||||
// Template input is either a file or a directory
|
||||
match, file, err := c.findFileMatches(absPath, processed)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not find file")
|
||||
}
|
||||
if file {
|
||||
if match != "" {
|
||||
return []string{match}, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Recursively walk down the Templates directory and run all
|
||||
// the template file checks
|
||||
matches, err := c.findDirectoryMatches(absPath, processed)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not find directory matches")
|
||||
}
|
||||
if len(matches) == 0 {
|
||||
return nil, errors.Errorf("no templates found in path")
|
||||
}
|
||||
return matches, nil
|
||||
}
|
||||
|
||||
// convertPathToAbsolute resolves the paths provided to absolute paths
|
||||
// before doing any operations on them regardless of them being blob, folders, files, etc.
|
||||
func (c *Catalogue) convertPathToAbsolute(t string) (string, error) {
|
||||
if strings.Contains(t, "*") {
|
||||
file := path.Base(t)
|
||||
absPath, err := c.ResolvePath(path.Dir(t), "")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return path.Join(absPath, file), nil
|
||||
}
|
||||
return c.ResolvePath(t, "")
|
||||
}
|
||||
|
||||
// findGlobPathMatches returns the matched files from a glob path
|
||||
func (c *Catalogue) findGlobPathMatches(absPath string, processed map[string]struct{}) ([]string, error) {
|
||||
matches, err := filepath.Glob(absPath)
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("wildcard found, but unable to glob: %s\n", err)
|
||||
}
|
||||
results := make([]string, 0, len(matches))
|
||||
for _, match := range matches {
|
||||
if _, ok := processed[match]; !ok {
|
||||
processed[match] = struct{}{}
|
||||
results = append(results, match)
|
||||
}
|
||||
}
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// findFileMatches finds if a path is an absolute file. If the path
|
||||
// is a file, it returns true otherwise false with no errors.
|
||||
func (c *Catalogue) findFileMatches(absPath string, processed map[string]struct{}) (string, bool, error) {
|
||||
info, err := os.Stat(absPath)
|
||||
if err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
if !info.Mode().IsRegular() {
|
||||
return "", false, nil
|
||||
}
|
||||
if _, ok := processed[absPath]; !ok {
|
||||
processed[absPath] = struct{}{}
|
||||
return absPath, true, nil
|
||||
}
|
||||
return "", true, nil
|
||||
}
|
||||
|
||||
// findDirectoryMatches finds matches for templates from a directory
|
||||
func (c *Catalogue) findDirectoryMatches(absPath string, processed map[string]struct{}) ([]string, error) {
|
||||
var results []string
|
||||
err := godirwalk.Walk(absPath, &godirwalk.Options{
|
||||
Unsorted: true,
|
||||
ErrorCallback: func(fsPath string, err error) godirwalk.ErrorAction {
|
||||
return godirwalk.SkipNode
|
||||
},
|
||||
Callback: func(path string, d *godirwalk.Dirent) error {
|
||||
if !d.IsDir() && strings.HasSuffix(path, ".yaml") {
|
||||
if c.checkIfInNucleiIgnore(path) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if _, ok := processed[path]; !ok {
|
||||
results = append(results, path)
|
||||
processed[path] = struct{}{}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
})
|
||||
return results, err
|
||||
}
|
|
@ -0,0 +1,81 @@
|
|||
package catalogue
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/projectdiscovery/gologger"
|
||||
)
|
||||
|
||||
const nucleiIgnoreFile = ".nuclei-ignore"
|
||||
|
||||
// readNucleiIgnoreFile reads the nuclei ignore file marking it in map
|
||||
func (c *Catalogue) readNucleiIgnoreFile() {
|
||||
file, err := os.Open(path.Join(c.templatesDirectory, nucleiIgnoreFile))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
scanner := bufio.NewScanner(file)
|
||||
for scanner.Scan() {
|
||||
text := scanner.Text()
|
||||
if text == "" {
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(text, "#") {
|
||||
continue
|
||||
}
|
||||
c.ignoreFiles = append(c.ignoreFiles, text)
|
||||
}
|
||||
}
|
||||
|
||||
// checkIfInNucleiIgnore checks if a path falls under nuclei-ignore rules.
|
||||
func (c *Catalogue) checkIfInNucleiIgnore(item string) bool {
|
||||
if c.templatesDirectory == "" {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, paths := range c.ignoreFiles {
|
||||
dir := path.Dir(item)
|
||||
|
||||
if strings.EqualFold(dir, paths) {
|
||||
gologger.Error().Msgf("Excluding %s due to nuclei-ignore filter", item)
|
||||
return true
|
||||
}
|
||||
if strings.HasSuffix(paths, ".yaml") && strings.HasSuffix(item, paths) {
|
||||
gologger.Error().Msgf("Excluding %s due to nuclei-ignore filter", item)
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ignoreFilesWithExcludes ignores results with exclude paths
|
||||
func (c *Catalogue) ignoreFilesWithExcludes(results, excluded []string) []string {
|
||||
var templates []string
|
||||
|
||||
for _, result := range results {
|
||||
matched := false
|
||||
for _, paths := range excluded {
|
||||
dir := path.Dir(result)
|
||||
|
||||
if strings.EqualFold(dir, paths) {
|
||||
matched = true
|
||||
break
|
||||
}
|
||||
if strings.HasSuffix(paths, ".yaml") && strings.HasSuffix(result, paths) {
|
||||
matched = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !matched {
|
||||
templates = append(templates, result)
|
||||
} else {
|
||||
gologger.Error().Msgf("Excluding %s due to excludes filter", result)
|
||||
}
|
||||
}
|
||||
return templates
|
||||
}
|
|
@ -0,0 +1,39 @@
|
|||
package catalogue
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestIgnoreFilesIgnore(t *testing.T) {
|
||||
c := &Catalogue{
|
||||
ignoreFiles: []string{"workflows/", "cves/2020/cve-2020-5432.yaml"},
|
||||
templatesDirectory: "test",
|
||||
}
|
||||
tests := []struct {
|
||||
path string
|
||||
ignore bool
|
||||
}{
|
||||
{"workflows/", true},
|
||||
{"misc", false},
|
||||
{"cves/", false},
|
||||
{"cves/2020/cve-2020-5432.yaml", true},
|
||||
{"/Users/test/nuclei-templates/workflows/", true},
|
||||
{"/Users/test/nuclei-templates/misc", false},
|
||||
{"/Users/test/nuclei-templates/cves/", false},
|
||||
{"/Users/test/nuclei-templates/cves/2020/cve-2020-5432.yaml", true},
|
||||
}
|
||||
for _, test := range tests {
|
||||
require.Equal(t, test.ignore, c.checkIfInNucleiIgnore(test.path), "could not ignore file correctly")
|
||||
}
|
||||
}
|
||||
|
||||
func TestExcludeFilesIgnore(t *testing.T) {
|
||||
c := &Catalogue{}
|
||||
excludes := []string{"workflows/", "cves/2020/cve-2020-5432.yaml"}
|
||||
paths := []string{"/Users/test/nuclei-templates/workflows/", "/Users/test/nuclei-templates/cves/2020/cve-2020-5432.yaml", "/Users/test/nuclei-templates/workflows/test-workflow.yaml", "/Users/test/nuclei-templates/cves/"}
|
||||
|
||||
data := c.ignoreFilesWithExcludes(paths, excludes)
|
||||
require.Equal(t, []string{"/Users/test/nuclei-templates/workflows/test-workflow.yaml", "/Users/test/nuclei-templates/cves/"}, data, "could not exclude correct files")
|
||||
}
|
|
@ -0,0 +1,45 @@
|
|||
package catalogue
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ResolvePath resolves the path to an absolute one in various ways.
|
||||
//
|
||||
// It checks if the filename is an absolute path, looks in the current directory
|
||||
// or checking the nuclei templates directory. If a second path is given,
|
||||
// it also tries to find paths relative to that second path.
|
||||
func (c *Catalogue) ResolvePath(templateName, second string) (string, error) {
|
||||
if strings.HasPrefix(templateName, "/") || strings.Contains(templateName, ":\\") {
|
||||
return templateName, nil
|
||||
}
|
||||
|
||||
if second != "" {
|
||||
secondBasePath := path.Join(filepath.Dir(second), templateName)
|
||||
if _, err := os.Stat(secondBasePath); !os.IsNotExist(err) {
|
||||
return secondBasePath, nil
|
||||
}
|
||||
}
|
||||
|
||||
curDirectory, err := os.Getwd()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
templatePath := path.Join(curDirectory, templateName)
|
||||
if _, err := os.Stat(templatePath); !os.IsNotExist(err) {
|
||||
return templatePath, nil
|
||||
}
|
||||
|
||||
if c.templatesDirectory != "" {
|
||||
templatePath := path.Join(c.templatesDirectory, templateName)
|
||||
if _, err := os.Stat(templatePath); !os.IsNotExist(err) {
|
||||
return templatePath, nil
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("no such path found: %s", templateName)
|
||||
}
|
|
@ -1,64 +0,0 @@
|
|||
package collaborator
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/projectdiscovery/collaborator"
|
||||
)
|
||||
|
||||
const (
|
||||
PollSeconds = 5
|
||||
DefaultMaxBufferLimit = 150
|
||||
)
|
||||
|
||||
var DefaultPollInterval time.Duration = time.Second * time.Duration(PollSeconds)
|
||||
|
||||
var DefaultCollaborator BurpCollaborator = BurpCollaborator{Collab: collaborator.NewBurpCollaborator()}
|
||||
|
||||
type BurpCollaborator struct {
|
||||
sync.RWMutex
|
||||
options *Options // unused
|
||||
Collab *collaborator.BurpCollaborator
|
||||
}
|
||||
|
||||
type Options struct {
|
||||
BIID string
|
||||
PollInterval time.Duration
|
||||
MaxBufferLimit int
|
||||
}
|
||||
|
||||
func New(options *Options) *BurpCollaborator {
|
||||
collab := collaborator.NewBurpCollaborator()
|
||||
collab.AddBIID(options.BIID)
|
||||
collab.MaxBufferLimit = options.MaxBufferLimit
|
||||
return &BurpCollaborator{Collab: collab, options: options}
|
||||
}
|
||||
|
||||
func (b *BurpCollaborator) Poll() {
|
||||
// if no valid biids were provided just return
|
||||
if len(b.Collab.BIIDs) > 0 {
|
||||
go b.Collab.PollEach(DefaultPollInterval)
|
||||
}
|
||||
}
|
||||
|
||||
func (b *BurpCollaborator) Has(s string) (found bool) {
|
||||
foundAt := 0
|
||||
for _, r := range b.Collab.RespBuffer {
|
||||
for i := 0; i < len(r.Responses); i++ {
|
||||
// search in dns - http - smtp
|
||||
b.RLock()
|
||||
found = strings.Contains(r.Responses[i].Data.RawRequestDecoded, s) || strings.Contains(r.Responses[i].Data.RequestDecoded, s) || strings.Contains(r.Responses[i].Data.MessageDecoded, s)
|
||||
b.RUnlock()
|
||||
if found {
|
||||
b.Lock()
|
||||
r.Responses = removeMatch(r.Responses, foundAt)
|
||||
b.Unlock()
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
|
@ -1,9 +0,0 @@
|
|||
package collaborator
|
||||
|
||||
import (
|
||||
"github.com/projectdiscovery/collaborator"
|
||||
)
|
||||
|
||||
func removeMatch(responses []collaborator.BurpResponse, index int) []collaborator.BurpResponse {
|
||||
return append(responses[:index], responses[index+1:]...)
|
||||
}
|
|
@ -1,42 +0,0 @@
|
|||
package colorizer
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/logrusorgru/aurora"
|
||||
)
|
||||
|
||||
const (
|
||||
fgOrange uint8 = 208
|
||||
undefined string = "undefined"
|
||||
)
|
||||
|
||||
// NucleiColorizer contains the severity color mapping
|
||||
type NucleiColorizer struct {
|
||||
Colorizer aurora.Aurora
|
||||
SeverityMap map[string]string
|
||||
}
|
||||
|
||||
// NewNucleiColorizer initializes the new nuclei colorizer
|
||||
func NewNucleiColorizer(colorizer aurora.Aurora) *NucleiColorizer {
|
||||
return &NucleiColorizer{
|
||||
Colorizer: colorizer,
|
||||
SeverityMap: map[string]string{
|
||||
"info": colorizer.Blue("info").String(),
|
||||
"low": colorizer.Green("low").String(),
|
||||
"medium": colorizer.Yellow("medium").String(),
|
||||
"high": colorizer.Index(fgOrange, "high").String(),
|
||||
"critical": colorizer.Red("critical").String(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// GetColorizedSeverity returns the colorized severity string
|
||||
func (r *NucleiColorizer) GetColorizedSeverity(severity string) string {
|
||||
sev := r.SeverityMap[strings.ToLower(severity)]
|
||||
if sev == "" {
|
||||
return undefined
|
||||
}
|
||||
|
||||
return sev
|
||||
}
|
|
@ -1,184 +0,0 @@
|
|||
package executer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/projectdiscovery/gologger"
|
||||
"github.com/projectdiscovery/nuclei/v2/internal/bufwriter"
|
||||
"github.com/projectdiscovery/nuclei/v2/internal/progress"
|
||||
"github.com/projectdiscovery/nuclei/v2/internal/tracelog"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/colorizer"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/matchers"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/requests"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/templates"
|
||||
retryabledns "github.com/projectdiscovery/retryabledns"
|
||||
"go.uber.org/ratelimit"
|
||||
)
|
||||
|
||||
// DNSExecuter is a client for performing a DNS request
|
||||
// for a template.
|
||||
type DNSExecuter struct {
|
||||
// hm *hybrid.HybridMap // Unused
|
||||
coloredOutput bool
|
||||
debug bool
|
||||
jsonOutput bool
|
||||
jsonRequest bool
|
||||
noMeta bool
|
||||
Results bool
|
||||
vhost bool
|
||||
traceLog tracelog.Log
|
||||
dnsClient *retryabledns.Client
|
||||
template *templates.Template
|
||||
dnsRequest *requests.DNSRequest
|
||||
writer *bufwriter.Writer
|
||||
ratelimiter ratelimit.Limiter
|
||||
|
||||
colorizer colorizer.NucleiColorizer
|
||||
decolorizer *regexp.Regexp
|
||||
}
|
||||
|
||||
// DefaultResolvers contains the list of resolvers known to be trusted.
|
||||
var DefaultResolvers = []string{
|
||||
"1.1.1.1:53", // Cloudflare
|
||||
"1.0.0.1:53", // Cloudflare
|
||||
"8.8.8.8:53", // Google
|
||||
"8.8.4.4:53", // Google
|
||||
}
|
||||
|
||||
// DNSOptions contains configuration options for the DNS executer.
|
||||
type DNSOptions struct {
|
||||
ColoredOutput bool
|
||||
Debug bool
|
||||
JSON bool
|
||||
JSONRequests bool
|
||||
NoMeta bool
|
||||
VHost bool
|
||||
TraceLog tracelog.Log
|
||||
Template *templates.Template
|
||||
DNSRequest *requests.DNSRequest
|
||||
Writer *bufwriter.Writer
|
||||
|
||||
Colorizer colorizer.NucleiColorizer
|
||||
Decolorizer *regexp.Regexp
|
||||
RateLimiter ratelimit.Limiter
|
||||
}
|
||||
|
||||
// NewDNSExecuter creates a new DNS executer from a template
|
||||
// and a DNS request query.
|
||||
func NewDNSExecuter(options *DNSOptions) *DNSExecuter {
|
||||
dnsClient := retryabledns.New(DefaultResolvers, options.DNSRequest.Retries)
|
||||
|
||||
executer := &DNSExecuter{
|
||||
debug: options.Debug,
|
||||
noMeta: options.NoMeta,
|
||||
jsonOutput: options.JSON,
|
||||
traceLog: options.TraceLog,
|
||||
jsonRequest: options.JSONRequests,
|
||||
dnsClient: dnsClient,
|
||||
vhost: options.VHost,
|
||||
template: options.Template,
|
||||
dnsRequest: options.DNSRequest,
|
||||
writer: options.Writer,
|
||||
coloredOutput: options.ColoredOutput,
|
||||
colorizer: options.Colorizer,
|
||||
decolorizer: options.Decolorizer,
|
||||
ratelimiter: options.RateLimiter,
|
||||
}
|
||||
return executer
|
||||
}
|
||||
|
||||
// ExecuteDNS executes the DNS request on a URL
|
||||
func (e *DNSExecuter) ExecuteDNS(p *progress.Progress, reqURL string) *Result {
|
||||
result := &Result{}
|
||||
if e.vhost {
|
||||
parts := strings.Split(reqURL, ",")
|
||||
reqURL = parts[0]
|
||||
}
|
||||
|
||||
// Parse the URL and return domain if URL.
|
||||
var domain string
|
||||
if isURL(reqURL) {
|
||||
domain = extractDomain(reqURL)
|
||||
} else {
|
||||
domain = reqURL
|
||||
}
|
||||
|
||||
// Compile each request for the template based on the URL
|
||||
compiledRequest, err := e.dnsRequest.MakeDNSRequest(domain)
|
||||
if err != nil {
|
||||
e.traceLog.Request(e.template.ID, domain, "dns", err)
|
||||
result.Error = errors.Wrap(err, "could not make dns request")
|
||||
p.Drop(1)
|
||||
return result
|
||||
}
|
||||
e.traceLog.Request(e.template.ID, domain, "dns", nil)
|
||||
|
||||
if e.debug {
|
||||
gologger.Infof("Dumped DNS request for %s (%s)\n\n", reqURL, e.template.ID)
|
||||
fmt.Fprintf(os.Stderr, "%s\n", compiledRequest.String())
|
||||
}
|
||||
|
||||
// Send the request to the target servers
|
||||
resp, err := e.dnsClient.Do(compiledRequest)
|
||||
if err != nil {
|
||||
result.Error = errors.Wrap(err, "could not send dns request")
|
||||
p.Drop(1)
|
||||
return result
|
||||
}
|
||||
p.Update()
|
||||
|
||||
gologger.Verbosef("Sent for [%s] to %s\n", "dns-request", e.template.ID, reqURL)
|
||||
|
||||
if e.debug {
|
||||
gologger.Infof("Dumped DNS response for %s (%s)\n\n", reqURL, e.template.ID)
|
||||
fmt.Fprintf(os.Stderr, "%s\n", resp.String())
|
||||
}
|
||||
|
||||
matcherCondition := e.dnsRequest.GetMatchersCondition()
|
||||
|
||||
for _, matcher := range e.dnsRequest.Matchers {
|
||||
// Check if the matcher matched
|
||||
if !matcher.MatchDNS(resp) {
|
||||
// If the condition is AND we haven't matched, return.
|
||||
if matcherCondition == matchers.ANDCondition {
|
||||
return result
|
||||
}
|
||||
} else {
|
||||
// If the matcher has matched, and its an OR
|
||||
// write the first output then move to next matcher.
|
||||
if matcherCondition == matchers.ORCondition && len(e.dnsRequest.Extractors) == 0 {
|
||||
e.writeOutputDNS(domain, compiledRequest, resp, matcher, nil)
|
||||
result.GotResults = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// All matchers have successfully completed so now start with the
|
||||
// next task which is extraction of input from matchers.
|
||||
var extractorResults []string
|
||||
|
||||
for _, extractor := range e.dnsRequest.Extractors {
|
||||
for match := range extractor.ExtractDNS(resp) {
|
||||
if !extractor.Internal {
|
||||
extractorResults = append(extractorResults, match)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Write a final string of output if matcher type is
|
||||
// AND or if we have extractors for the mechanism too.
|
||||
if len(e.dnsRequest.Extractors) > 0 || matcherCondition == matchers.ANDCondition {
|
||||
e.writeOutputDNS(domain, compiledRequest, resp, nil, extractorResults)
|
||||
|
||||
result.GotResults = true
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// Close closes the dns executer for a template.
|
||||
func (e *DNSExecuter) Close() {}
|
|
@ -1,778 +0,0 @@
|
|||
package executer
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/cookiejar"
|
||||
"net/http/httputil"
|
||||
"net/url"
|
||||
"os"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/corpix/uarand"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/projectdiscovery/fastdialer/fastdialer"
|
||||
"github.com/projectdiscovery/gologger"
|
||||
"github.com/projectdiscovery/nuclei/v2/internal/bufwriter"
|
||||
"github.com/projectdiscovery/nuclei/v2/internal/progress"
|
||||
"github.com/projectdiscovery/nuclei/v2/internal/tracelog"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/colorizer"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/generators"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/matchers"
|
||||
projetctfile "github.com/projectdiscovery/nuclei/v2/pkg/projectfile"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/requests"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/templates"
|
||||
"github.com/projectdiscovery/rawhttp"
|
||||
"github.com/projectdiscovery/retryablehttp-go"
|
||||
"github.com/remeh/sizedwaitgroup"
|
||||
"go.uber.org/ratelimit"
|
||||
"golang.org/x/net/proxy"
|
||||
)
|
||||
|
||||
const (
|
||||
two = 2
|
||||
ten = 10
|
||||
defaultMaxWorkers = 150
|
||||
defaultMaxHistorydata = 150
|
||||
)
|
||||
|
||||
// HTTPExecuter is client for performing HTTP requests
|
||||
// for a template.
|
||||
type HTTPExecuter struct {
|
||||
pf *projetctfile.ProjectFile
|
||||
customHeaders requests.CustomHeaders
|
||||
colorizer colorizer.NucleiColorizer
|
||||
httpClient *retryablehttp.Client
|
||||
rawHTTPClient *rawhttp.Client
|
||||
template *templates.Template
|
||||
bulkHTTPRequest *requests.BulkHTTPRequest
|
||||
writer *bufwriter.Writer
|
||||
CookieJar *cookiejar.Jar
|
||||
traceLog tracelog.Log
|
||||
decolorizer *regexp.Regexp
|
||||
randomAgent bool
|
||||
vhost bool
|
||||
coloredOutput bool
|
||||
debug bool
|
||||
Results bool
|
||||
jsonOutput bool
|
||||
jsonRequest bool
|
||||
noMeta bool
|
||||
stopAtFirstMatch bool
|
||||
ratelimiter ratelimit.Limiter
|
||||
}
|
||||
|
||||
// HTTPOptions contains configuration options for the HTTP executer.
|
||||
type HTTPOptions struct {
|
||||
RandomAgent bool
|
||||
Debug bool
|
||||
JSON bool
|
||||
JSONRequests bool
|
||||
NoMeta bool
|
||||
CookieReuse bool
|
||||
ColoredOutput bool
|
||||
StopAtFirstMatch bool
|
||||
Vhost bool
|
||||
Timeout int
|
||||
Retries int
|
||||
ProxyURL string
|
||||
ProxySocksURL string
|
||||
Template *templates.Template
|
||||
BulkHTTPRequest *requests.BulkHTTPRequest
|
||||
Writer *bufwriter.Writer
|
||||
CustomHeaders requests.CustomHeaders
|
||||
CookieJar *cookiejar.Jar
|
||||
Colorizer *colorizer.NucleiColorizer
|
||||
Decolorizer *regexp.Regexp
|
||||
TraceLog tracelog.Log
|
||||
PF *projetctfile.ProjectFile
|
||||
RateLimiter ratelimit.Limiter
|
||||
Dialer *fastdialer.Dialer
|
||||
}
|
||||
|
||||
// NewHTTPExecuter creates a new HTTP executer from a template
|
||||
// and a HTTP request query.
|
||||
func NewHTTPExecuter(options *HTTPOptions) (*HTTPExecuter, error) {
|
||||
var (
|
||||
proxyURL *url.URL
|
||||
err error
|
||||
)
|
||||
|
||||
if options.ProxyURL != "" {
|
||||
proxyURL, err = url.Parse(options.ProxyURL)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create the HTTP Client
|
||||
client := makeHTTPClient(proxyURL, options)
|
||||
// nolint:bodyclose // false positive there is no body to close yet
|
||||
client.CheckRetry = retryablehttp.HostSprayRetryPolicy()
|
||||
|
||||
if options.CookieJar != nil {
|
||||
client.HTTPClient.Jar = options.CookieJar
|
||||
} else if options.CookieReuse {
|
||||
jar, err := cookiejar.New(nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client.HTTPClient.Jar = jar
|
||||
}
|
||||
|
||||
// initiate raw http client
|
||||
rawClient := rawhttp.NewClient(rawhttp.DefaultOptions)
|
||||
|
||||
executer := &HTTPExecuter{
|
||||
debug: options.Debug,
|
||||
jsonOutput: options.JSON,
|
||||
jsonRequest: options.JSONRequests,
|
||||
noMeta: options.NoMeta,
|
||||
httpClient: client,
|
||||
rawHTTPClient: rawClient,
|
||||
traceLog: options.TraceLog,
|
||||
template: options.Template,
|
||||
bulkHTTPRequest: options.BulkHTTPRequest,
|
||||
writer: options.Writer,
|
||||
randomAgent: options.RandomAgent,
|
||||
customHeaders: options.CustomHeaders,
|
||||
CookieJar: options.CookieJar,
|
||||
coloredOutput: options.ColoredOutput,
|
||||
colorizer: *options.Colorizer,
|
||||
decolorizer: options.Decolorizer,
|
||||
stopAtFirstMatch: options.StopAtFirstMatch,
|
||||
pf: options.PF,
|
||||
vhost: options.Vhost,
|
||||
ratelimiter: options.RateLimiter,
|
||||
}
|
||||
return executer, nil
|
||||
}
|
||||
|
||||
func (e *HTTPExecuter) ExecuteRaceRequest(reqURL string) *Result {
|
||||
result := &Result{
|
||||
Matches: make(map[string]interface{}),
|
||||
Extractions: make(map[string]interface{}),
|
||||
}
|
||||
|
||||
dynamicvalues := make(map[string]interface{})
|
||||
|
||||
// verify if the URL is already being processed
|
||||
if e.bulkHTTPRequest.HasGenerator(reqURL) {
|
||||
return result
|
||||
}
|
||||
|
||||
e.bulkHTTPRequest.CreateGenerator(reqURL)
|
||||
|
||||
// Workers that keeps enqueuing new requests
|
||||
maxWorkers := e.bulkHTTPRequest.RaceNumberRequests
|
||||
swg := sizedwaitgroup.New(maxWorkers)
|
||||
for i := 0; i < e.bulkHTTPRequest.RaceNumberRequests; i++ {
|
||||
swg.Add()
|
||||
// base request
|
||||
result.Lock()
|
||||
request, err := e.bulkHTTPRequest.MakeHTTPRequest(reqURL, dynamicvalues, e.bulkHTTPRequest.Current(reqURL))
|
||||
payloads, _ := e.bulkHTTPRequest.GetPayloadsValues(reqURL)
|
||||
result.Unlock()
|
||||
// ignore the error due to the base request having null paylods
|
||||
if err == requests.ErrNoPayload {
|
||||
// pass through
|
||||
} else if err != nil {
|
||||
result.Error = err
|
||||
}
|
||||
go func(httpRequest *requests.HTTPRequest) {
|
||||
defer swg.Done()
|
||||
|
||||
// If the request was built correctly then execute it
|
||||
err = e.handleHTTP(reqURL, httpRequest, dynamicvalues, result, payloads, "")
|
||||
if err != nil {
|
||||
result.Error = errors.Wrap(err, "could not handle http request")
|
||||
}
|
||||
}(request)
|
||||
}
|
||||
|
||||
swg.Wait()
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func (e *HTTPExecuter) ExecuteParallelHTTP(p *progress.Progress, reqURL string) *Result {
|
||||
result := &Result{
|
||||
Matches: make(map[string]interface{}),
|
||||
Extractions: make(map[string]interface{}),
|
||||
}
|
||||
|
||||
dynamicvalues := make(map[string]interface{})
|
||||
|
||||
// verify if the URL is already being processed
|
||||
if e.bulkHTTPRequest.HasGenerator(reqURL) {
|
||||
return result
|
||||
}
|
||||
|
||||
remaining := e.bulkHTTPRequest.GetRequestCount()
|
||||
e.bulkHTTPRequest.CreateGenerator(reqURL)
|
||||
|
||||
// Workers that keeps enqueuing new requests
|
||||
maxWorkers := e.bulkHTTPRequest.Threads
|
||||
swg := sizedwaitgroup.New(maxWorkers)
|
||||
for e.bulkHTTPRequest.Next(reqURL) {
|
||||
result.Lock()
|
||||
request, err := e.bulkHTTPRequest.MakeHTTPRequest(reqURL, dynamicvalues, e.bulkHTTPRequest.Current(reqURL))
|
||||
payloads, _ := e.bulkHTTPRequest.GetPayloadsValues(reqURL)
|
||||
result.Unlock()
|
||||
// ignore the error due to the base request having null paylods
|
||||
if err == requests.ErrNoPayload {
|
||||
// pass through
|
||||
} else if err != nil {
|
||||
result.Error = err
|
||||
p.Drop(remaining)
|
||||
} else {
|
||||
swg.Add()
|
||||
go func(httpRequest *requests.HTTPRequest) {
|
||||
defer swg.Done()
|
||||
|
||||
e.ratelimiter.Take()
|
||||
|
||||
// If the request was built correctly then execute it
|
||||
err = e.handleHTTP(reqURL, httpRequest, dynamicvalues, result, payloads, "")
|
||||
if err != nil {
|
||||
e.traceLog.Request(e.template.ID, reqURL, "http", err)
|
||||
result.Error = errors.Wrap(err, "could not handle http request")
|
||||
p.Drop(remaining)
|
||||
} else {
|
||||
e.traceLog.Request(e.template.ID, reqURL, "http", nil)
|
||||
}
|
||||
}(request)
|
||||
}
|
||||
p.Update()
|
||||
e.bulkHTTPRequest.Increment(reqURL)
|
||||
}
|
||||
swg.Wait()
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func (e *HTTPExecuter) ExecuteTurboHTTP(reqURL string) *Result {
|
||||
result := &Result{
|
||||
Matches: make(map[string]interface{}),
|
||||
Extractions: make(map[string]interface{}),
|
||||
}
|
||||
|
||||
dynamicvalues := make(map[string]interface{})
|
||||
|
||||
// verify if the URL is already being processed
|
||||
if e.bulkHTTPRequest.HasGenerator(reqURL) {
|
||||
return result
|
||||
}
|
||||
|
||||
e.bulkHTTPRequest.CreateGenerator(reqURL)
|
||||
|
||||
// need to extract the target from the url
|
||||
URL, err := url.Parse(reqURL)
|
||||
if err != nil {
|
||||
return result
|
||||
}
|
||||
|
||||
pipeOptions := rawhttp.DefaultPipelineOptions
|
||||
pipeOptions.Host = URL.Host
|
||||
pipeOptions.MaxConnections = 1
|
||||
if e.bulkHTTPRequest.PipelineConcurrentConnections > 0 {
|
||||
pipeOptions.MaxConnections = e.bulkHTTPRequest.PipelineConcurrentConnections
|
||||
}
|
||||
if e.bulkHTTPRequest.PipelineRequestsPerConnection > 0 {
|
||||
pipeOptions.MaxPendingRequests = e.bulkHTTPRequest.PipelineRequestsPerConnection
|
||||
}
|
||||
pipeclient := rawhttp.NewPipelineClient(pipeOptions)
|
||||
|
||||
// defaultMaxWorkers should be a sufficient value to keep queues always full
|
||||
maxWorkers := defaultMaxWorkers
|
||||
// in case the queue is bigger increase the workers
|
||||
if pipeOptions.MaxPendingRequests > maxWorkers {
|
||||
maxWorkers = pipeOptions.MaxPendingRequests
|
||||
}
|
||||
swg := sizedwaitgroup.New(maxWorkers)
|
||||
for e.bulkHTTPRequest.Next(reqURL) {
|
||||
result.Lock()
|
||||
request, err := e.bulkHTTPRequest.MakeHTTPRequest(reqURL, dynamicvalues, e.bulkHTTPRequest.Current(reqURL))
|
||||
payloads, _ := e.bulkHTTPRequest.GetPayloadsValues(reqURL)
|
||||
result.Unlock()
|
||||
// ignore the error due to the base request having null paylods
|
||||
if err == requests.ErrNoPayload {
|
||||
// pass through
|
||||
} else if err != nil {
|
||||
result.Error = err
|
||||
} else {
|
||||
swg.Add()
|
||||
go func(httpRequest *requests.HTTPRequest) {
|
||||
defer swg.Done()
|
||||
|
||||
// HTTP pipelining ignores rate limit
|
||||
// If the request was built correctly then execute it
|
||||
request.Pipeline = true
|
||||
request.PipelineClient = pipeclient
|
||||
err = e.handleHTTP(reqURL, httpRequest, dynamicvalues, result, payloads, "")
|
||||
if err != nil {
|
||||
e.traceLog.Request(e.template.ID, reqURL, "http", err)
|
||||
result.Error = errors.Wrap(err, "could not handle http request")
|
||||
} else {
|
||||
e.traceLog.Request(e.template.ID, reqURL, "http", nil)
|
||||
}
|
||||
request.PipelineClient = nil
|
||||
}(request)
|
||||
}
|
||||
|
||||
e.bulkHTTPRequest.Increment(reqURL)
|
||||
}
|
||||
swg.Wait()
|
||||
return result
|
||||
}
|
||||
|
||||
// ExecuteHTTP executes the HTTP request on a URL
|
||||
func (e *HTTPExecuter) ExecuteHTTP(p *progress.Progress, reqURL string) *Result {
|
||||
var customHost string
|
||||
if e.vhost {
|
||||
parts := strings.Split(reqURL, ",")
|
||||
reqURL = parts[0]
|
||||
customHost = parts[1]
|
||||
}
|
||||
|
||||
// verify if pipeline was requested
|
||||
if e.bulkHTTPRequest.Pipeline {
|
||||
return e.ExecuteTurboHTTP(reqURL)
|
||||
}
|
||||
|
||||
// verify if a basic race condition was requested
|
||||
if e.bulkHTTPRequest.Race && e.bulkHTTPRequest.RaceNumberRequests > 0 {
|
||||
return e.ExecuteRaceRequest(reqURL)
|
||||
}
|
||||
|
||||
// verify if parallel elaboration was requested
|
||||
if e.bulkHTTPRequest.Threads > 0 {
|
||||
return e.ExecuteParallelHTTP(p, reqURL)
|
||||
}
|
||||
|
||||
var requestNumber int
|
||||
|
||||
result := &Result{
|
||||
Matches: make(map[string]interface{}),
|
||||
Extractions: make(map[string]interface{}),
|
||||
historyData: make(map[string]interface{}),
|
||||
}
|
||||
|
||||
dynamicvalues := make(map[string]interface{})
|
||||
|
||||
// verify if the URL is already being processed
|
||||
if e.bulkHTTPRequest.HasGenerator(reqURL) {
|
||||
return result
|
||||
}
|
||||
|
||||
remaining := e.bulkHTTPRequest.GetRequestCount()
|
||||
e.bulkHTTPRequest.CreateGenerator(reqURL)
|
||||
|
||||
for e.bulkHTTPRequest.Next(reqURL) {
|
||||
requestNumber++
|
||||
result.Lock()
|
||||
httpRequest, err := e.bulkHTTPRequest.MakeHTTPRequest(reqURL, dynamicvalues, e.bulkHTTPRequest.Current(reqURL))
|
||||
payloads, _ := e.bulkHTTPRequest.GetPayloadsValues(reqURL)
|
||||
result.Unlock()
|
||||
// ignore the error due to the base request having null paylods
|
||||
if err == requests.ErrNoPayload {
|
||||
// pass through
|
||||
} else if err != nil {
|
||||
result.Error = err
|
||||
p.Drop(remaining)
|
||||
} else {
|
||||
if e.vhost {
|
||||
if httpRequest.Request != nil {
|
||||
httpRequest.Request.Host = customHost
|
||||
}
|
||||
if httpRequest.RawRequest != nil && httpRequest.RawRequest.Headers != nil {
|
||||
httpRequest.RawRequest.Headers["Host"] = customHost
|
||||
}
|
||||
}
|
||||
|
||||
e.ratelimiter.Take()
|
||||
// If the request was built correctly then execute it
|
||||
format := "%s_" + strconv.Itoa(requestNumber)
|
||||
err = e.handleHTTP(reqURL, httpRequest, dynamicvalues, result, payloads, format)
|
||||
if err != nil {
|
||||
result.Error = errors.Wrap(err, "could not handle http request")
|
||||
p.Drop(remaining)
|
||||
e.traceLog.Request(e.template.ID, reqURL, "http", err)
|
||||
} else {
|
||||
e.traceLog.Request(e.template.ID, reqURL, "http", nil)
|
||||
}
|
||||
}
|
||||
p.Update()
|
||||
|
||||
// Check if has to stop processing at first valid result
|
||||
if e.stopAtFirstMatch && result.GotResults {
|
||||
p.Drop(remaining)
|
||||
break
|
||||
}
|
||||
|
||||
// move always forward with requests
|
||||
e.bulkHTTPRequest.Increment(reqURL)
|
||||
remaining--
|
||||
}
|
||||
gologger.Verbosef("Sent for [%s] to %s\n", "http-request", e.template.ID, reqURL)
|
||||
return result
|
||||
}
|
||||
|
||||
func (e *HTTPExecuter) handleHTTP(reqURL string, request *requests.HTTPRequest, dynamicvalues map[string]interface{}, result *Result, payloads map[string]interface{}, format string) error {
|
||||
// Add User-Agent value randomly to the customHeaders slice if `random-agent` flag is given
|
||||
if e.randomAgent {
|
||||
// nolint:errcheck // ignoring error
|
||||
e.customHeaders.Set("User-Agent: " + uarand.GetRandom())
|
||||
}
|
||||
|
||||
e.setCustomHeaders(request)
|
||||
|
||||
var (
|
||||
resp *http.Response
|
||||
err error
|
||||
dumpedRequest []byte
|
||||
fromcache bool
|
||||
)
|
||||
|
||||
if e.debug || e.pf != nil {
|
||||
dumpedRequest, err = requests.Dump(request, reqURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if e.debug {
|
||||
gologger.Infof("Dumped HTTP request for %s (%s)\n\n", reqURL, e.template.ID)
|
||||
fmt.Fprintf(os.Stderr, "%s", string(dumpedRequest))
|
||||
}
|
||||
|
||||
timeStart := time.Now()
|
||||
|
||||
if request.Pipeline {
|
||||
resp, err = request.PipelineClient.DoRaw(request.RawRequest.Method, reqURL, request.RawRequest.Path, requests.ExpandMapValues(request.RawRequest.Headers), ioutil.NopCloser(strings.NewReader(request.RawRequest.Data)))
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
resp.Body.Close()
|
||||
}
|
||||
e.traceLog.Request(e.template.ID, reqURL, "http", err)
|
||||
return err
|
||||
}
|
||||
e.traceLog.Request(e.template.ID, reqURL, "http", nil)
|
||||
} else if request.Unsafe {
|
||||
// rawhttp
|
||||
// burp uses "\r\n" as new line character
|
||||
request.RawRequest.Data = strings.ReplaceAll(request.RawRequest.Data, "\n", "\r\n")
|
||||
options := e.rawHTTPClient.Options
|
||||
options.AutomaticContentLength = request.AutomaticContentLengthHeader
|
||||
options.AutomaticHostHeader = request.AutomaticHostHeader
|
||||
options.FollowRedirects = request.FollowRedirects
|
||||
resp, err = e.rawHTTPClient.DoRawWithOptions(request.RawRequest.Method, reqURL, request.RawRequest.Path, requests.ExpandMapValues(request.RawRequest.Headers), ioutil.NopCloser(strings.NewReader(request.RawRequest.Data)), options)
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
resp.Body.Close()
|
||||
}
|
||||
e.traceLog.Request(e.template.ID, reqURL, "http", err)
|
||||
return err
|
||||
}
|
||||
e.traceLog.Request(e.template.ID, reqURL, "http", nil)
|
||||
} else {
|
||||
// if nuclei-project is available check if the request was already sent previously
|
||||
if e.pf != nil {
|
||||
// if unavailable fail silently
|
||||
fromcache = true
|
||||
// nolint:bodyclose // false positive the response is generated at runtime
|
||||
resp, err = e.pf.Get(dumpedRequest)
|
||||
if err != nil {
|
||||
fromcache = false
|
||||
}
|
||||
}
|
||||
|
||||
// retryablehttp
|
||||
if resp == nil {
|
||||
resp, err = e.httpClient.Do(request.Request)
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
resp.Body.Close()
|
||||
}
|
||||
e.traceLog.Request(e.template.ID, reqURL, "http", err)
|
||||
return err
|
||||
}
|
||||
e.traceLog.Request(e.template.ID, reqURL, "http", nil)
|
||||
}
|
||||
}
|
||||
|
||||
duration := time.Since(timeStart)
|
||||
|
||||
// Dump response - Step 1 - Decompression not yet handled
|
||||
var dumpedResponse []byte
|
||||
if e.debug {
|
||||
var dumpErr error
|
||||
dumpedResponse, dumpErr = httputil.DumpResponse(resp, true)
|
||||
if dumpErr != nil {
|
||||
return errors.Wrap(dumpErr, "could not dump http response")
|
||||
}
|
||||
}
|
||||
|
||||
data, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
_, copyErr := io.Copy(ioutil.Discard, resp.Body)
|
||||
if copyErr != nil {
|
||||
resp.Body.Close()
|
||||
return copyErr
|
||||
}
|
||||
|
||||
resp.Body.Close()
|
||||
|
||||
return errors.Wrap(err, "could not read http body")
|
||||
}
|
||||
|
||||
resp.Body.Close()
|
||||
|
||||
// net/http doesn't automatically decompress the response body if an encoding has been specified by the user in the request
|
||||
// so in case we have to manually do it
|
||||
dataOrig := data
|
||||
data, err = requests.HandleDecompression(request, data)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not decompress http body")
|
||||
}
|
||||
|
||||
// Dump response - step 2 - replace gzip body with deflated one or with itself (NOP operation)
|
||||
if e.debug {
|
||||
dumpedResponse = bytes.ReplaceAll(dumpedResponse, dataOrig, data)
|
||||
gologger.Infof("Dumped HTTP response for %s (%s)\n\n", reqURL, e.template.ID)
|
||||
fmt.Fprintf(os.Stderr, "%s\n", string(dumpedResponse))
|
||||
}
|
||||
|
||||
// if nuclei-project is enabled store the response if not previously done
|
||||
if e.pf != nil && !fromcache {
|
||||
err := e.pf.Set(dumpedRequest, resp, data)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not store in project file")
|
||||
}
|
||||
}
|
||||
|
||||
// Convert response body from []byte to string with zero copy
|
||||
body := unsafeToString(data)
|
||||
|
||||
headers := headersToString(resp.Header)
|
||||
|
||||
var matchData map[string]interface{}
|
||||
if payloads != nil {
|
||||
matchData = generators.MergeMaps(result.historyData, payloads)
|
||||
}
|
||||
|
||||
// store for internal purposes the DSL matcher data
|
||||
// hardcode stopping storing data after defaultMaxHistorydata items
|
||||
if len(result.historyData) < defaultMaxHistorydata {
|
||||
result.Lock()
|
||||
// update history data with current reqURL and hostname
|
||||
result.historyData["reqURL"] = reqURL
|
||||
if parsed, err := url.Parse(reqURL); err == nil {
|
||||
result.historyData["Hostname"] = parsed.Host
|
||||
}
|
||||
result.historyData = generators.MergeMaps(result.historyData, matchers.HTTPToMap(resp, body, headers, duration, format))
|
||||
if payloads == nil {
|
||||
// merge them to history data
|
||||
result.historyData = generators.MergeMaps(result.historyData, payloads)
|
||||
}
|
||||
result.historyData = generators.MergeMaps(result.historyData, dynamicvalues)
|
||||
|
||||
// complement match data with new one if necessary
|
||||
matchData = generators.MergeMaps(matchData, result.historyData)
|
||||
result.Unlock()
|
||||
}
|
||||
|
||||
matcherCondition := e.bulkHTTPRequest.GetMatchersCondition()
|
||||
for _, matcher := range e.bulkHTTPRequest.Matchers {
|
||||
// Check if the matcher matched
|
||||
if !matcher.Match(resp, body, headers, duration, matchData) {
|
||||
// If the condition is AND we haven't matched, try next request.
|
||||
if matcherCondition == matchers.ANDCondition {
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
// If the matcher has matched, and its an OR
|
||||
// write the first output then move to next matcher.
|
||||
if matcherCondition == matchers.ORCondition {
|
||||
result.Lock()
|
||||
result.Matches[matcher.Name] = nil
|
||||
// probably redundant but ensures we snapshot current payload values when matchers are valid
|
||||
result.Meta = request.Meta
|
||||
result.GotResults = true
|
||||
result.Unlock()
|
||||
e.writeOutputHTTP(request, resp, body, matcher, nil, request.Meta, reqURL)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// All matchers have successfully completed so now start with the
|
||||
// next task which is extraction of input from matchers.
|
||||
var extractorResults, outputExtractorResults []string
|
||||
|
||||
for _, extractor := range e.bulkHTTPRequest.Extractors {
|
||||
for match := range extractor.Extract(resp, body, headers) {
|
||||
if _, ok := dynamicvalues[extractor.Name]; !ok {
|
||||
dynamicvalues[extractor.Name] = match
|
||||
}
|
||||
|
||||
extractorResults = append(extractorResults, match)
|
||||
|
||||
if !extractor.Internal {
|
||||
outputExtractorResults = append(outputExtractorResults, match)
|
||||
}
|
||||
}
|
||||
// probably redundant but ensures we snapshot current payload values when extractors are valid
|
||||
result.Lock()
|
||||
result.Meta = request.Meta
|
||||
result.Extractions[extractor.Name] = extractorResults
|
||||
result.Unlock()
|
||||
}
|
||||
|
||||
// Write a final string of output if matcher type is
|
||||
// AND or if we have extractors for the mechanism too.
|
||||
if len(outputExtractorResults) > 0 || matcherCondition == matchers.ANDCondition {
|
||||
e.writeOutputHTTP(request, resp, body, nil, outputExtractorResults, request.Meta, reqURL)
|
||||
result.Lock()
|
||||
result.GotResults = true
|
||||
result.Unlock()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close closes the http executer for a template.
|
||||
func (e *HTTPExecuter) Close() {}
|
||||
|
||||
// makeHTTPClient creates a http client
|
||||
func makeHTTPClient(proxyURL *url.URL, options *HTTPOptions) *retryablehttp.Client {
|
||||
// Multiple Host
|
||||
retryablehttpOptions := retryablehttp.DefaultOptionsSpraying
|
||||
disableKeepAlives := true
|
||||
maxIdleConns := 0
|
||||
maxConnsPerHost := 0
|
||||
maxIdleConnsPerHost := -1
|
||||
|
||||
if options.BulkHTTPRequest.Threads > 0 {
|
||||
// Single host
|
||||
retryablehttpOptions = retryablehttp.DefaultOptionsSingle
|
||||
disableKeepAlives = false
|
||||
maxIdleConnsPerHost = 500
|
||||
maxConnsPerHost = 500
|
||||
}
|
||||
|
||||
retryablehttpOptions.RetryWaitMax = 10 * time.Second
|
||||
retryablehttpOptions.RetryMax = options.Retries
|
||||
followRedirects := options.BulkHTTPRequest.Redirects
|
||||
maxRedirects := options.BulkHTTPRequest.MaxRedirects
|
||||
|
||||
transport := &http.Transport{
|
||||
DialContext: options.Dialer.Dial,
|
||||
MaxIdleConns: maxIdleConns,
|
||||
MaxIdleConnsPerHost: maxIdleConnsPerHost,
|
||||
MaxConnsPerHost: maxConnsPerHost,
|
||||
TLSClientConfig: &tls.Config{
|
||||
Renegotiation: tls.RenegotiateOnceAsClient,
|
||||
InsecureSkipVerify: true,
|
||||
},
|
||||
DisableKeepAlives: disableKeepAlives,
|
||||
}
|
||||
|
||||
// Attempts to overwrite the dial function with the socks proxied version
|
||||
if options.ProxySocksURL != "" {
|
||||
var proxyAuth *proxy.Auth
|
||||
|
||||
socksURL, err := url.Parse(options.ProxySocksURL)
|
||||
|
||||
if err == nil {
|
||||
proxyAuth = &proxy.Auth{}
|
||||
proxyAuth.User = socksURL.User.Username()
|
||||
proxyAuth.Password, _ = socksURL.User.Password()
|
||||
}
|
||||
|
||||
dialer, err := proxy.SOCKS5("tcp", fmt.Sprintf("%s:%s", socksURL.Hostname(), socksURL.Port()), proxyAuth, proxy.Direct)
|
||||
dc := dialer.(interface {
|
||||
DialContext(ctx context.Context, network, addr string) (net.Conn, error)
|
||||
})
|
||||
|
||||
if err == nil {
|
||||
transport.DialContext = dc.DialContext
|
||||
}
|
||||
}
|
||||
|
||||
if proxyURL != nil {
|
||||
transport.Proxy = http.ProxyURL(proxyURL)
|
||||
}
|
||||
|
||||
return retryablehttp.NewWithHTTPClient(&http.Client{
|
||||
Transport: transport,
|
||||
Timeout: time.Duration(options.Timeout) * time.Second,
|
||||
CheckRedirect: makeCheckRedirectFunc(followRedirects, maxRedirects),
|
||||
}, retryablehttpOptions)
|
||||
}
|
||||
|
||||
type checkRedirectFunc func(_ *http.Request, requests []*http.Request) error
|
||||
|
||||
func makeCheckRedirectFunc(followRedirects bool, maxRedirects int) checkRedirectFunc {
|
||||
return func(_ *http.Request, requests []*http.Request) error {
|
||||
if !followRedirects {
|
||||
return http.ErrUseLastResponse
|
||||
}
|
||||
|
||||
if maxRedirects == 0 {
|
||||
if len(requests) > ten {
|
||||
return http.ErrUseLastResponse
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(requests) > maxRedirects {
|
||||
return http.ErrUseLastResponse
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (e *HTTPExecuter) setCustomHeaders(r *requests.HTTPRequest) {
|
||||
for _, customHeader := range e.customHeaders {
|
||||
// This should be pre-computed somewhere and done only once
|
||||
tokens := strings.SplitN(customHeader, ":", two)
|
||||
// if it's an invalid header skip it
|
||||
if len(tokens) < two {
|
||||
continue
|
||||
}
|
||||
|
||||
headerName, headerValue := tokens[0], strings.Join(tokens[1:], "")
|
||||
if r.RawRequest != nil {
|
||||
// rawhttp
|
||||
r.RawRequest.Headers[headerName] = headerValue
|
||||
} else {
|
||||
// retryablehttp
|
||||
headerName = strings.TrimSpace(headerName)
|
||||
headerValue = strings.TrimSpace(headerValue)
|
||||
r.Request.Header[headerName] = []string{headerValue}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type Result struct {
|
||||
sync.Mutex
|
||||
GotResults bool
|
||||
Meta map[string]interface{}
|
||||
Matches map[string]interface{}
|
||||
Extractions map[string]interface{}
|
||||
historyData map[string]interface{}
|
||||
Error error
|
||||
}
|
|
@ -1,106 +0,0 @@
|
|||
package executer
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/miekg/dns"
|
||||
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
"github.com/projectdiscovery/gologger"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/matchers"
|
||||
)
|
||||
|
||||
// writeOutputDNS writes dns output to streams
|
||||
// nolint:interfacer // dns.Msg is out of current scope
|
||||
func (e *DNSExecuter) writeOutputDNS(domain string, req, resp *dns.Msg, matcher *matchers.Matcher, extractorResults []string) {
|
||||
if e.jsonOutput {
|
||||
output := make(jsonOutput)
|
||||
output["matched"] = domain
|
||||
|
||||
if !e.noMeta {
|
||||
output["template"] = e.template.ID
|
||||
output["type"] = "dns"
|
||||
output["host"] = domain
|
||||
for k, v := range e.template.Info {
|
||||
output[k] = v
|
||||
}
|
||||
if matcher != nil && len(matcher.Name) > 0 {
|
||||
output["matcher_name"] = matcher.Name
|
||||
}
|
||||
if len(extractorResults) > 0 {
|
||||
output["extracted_results"] = extractorResults
|
||||
}
|
||||
if e.jsonRequest {
|
||||
output["request"] = req.String()
|
||||
output["response"] = resp.String()
|
||||
}
|
||||
}
|
||||
|
||||
data, err := jsoniter.Marshal(output)
|
||||
if err != nil {
|
||||
gologger.Warningf("Could not marshal json output: %s\n", err)
|
||||
}
|
||||
gologger.Silentf("%s", string(data))
|
||||
if e.writer != nil {
|
||||
if err := e.writer.Write(data); err != nil {
|
||||
gologger.Errorf("Could not write output data: %s\n", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
builder := &strings.Builder{}
|
||||
colorizer := e.colorizer
|
||||
|
||||
if !e.noMeta {
|
||||
builder.WriteRune('[')
|
||||
builder.WriteString(colorizer.Colorizer.BrightGreen(e.template.ID).String())
|
||||
|
||||
if matcher != nil && len(matcher.Name) > 0 {
|
||||
builder.WriteString(":")
|
||||
builder.WriteString(colorizer.Colorizer.BrightGreen(matcher.Name).Bold().String())
|
||||
}
|
||||
|
||||
builder.WriteString("] [")
|
||||
builder.WriteString(colorizer.Colorizer.BrightBlue("dns").String())
|
||||
builder.WriteString("] ")
|
||||
|
||||
if e.template.Info["severity"] != "" {
|
||||
builder.WriteString("[")
|
||||
builder.WriteString(colorizer.GetColorizedSeverity(e.template.Info["severity"]))
|
||||
builder.WriteString("] ")
|
||||
}
|
||||
}
|
||||
builder.WriteString(domain)
|
||||
|
||||
// If any extractors, write the results
|
||||
if len(extractorResults) > 0 && !e.noMeta {
|
||||
builder.WriteString(" [")
|
||||
|
||||
for i, result := range extractorResults {
|
||||
builder.WriteString(colorizer.Colorizer.BrightCyan(result).String())
|
||||
|
||||
if i != len(extractorResults)-1 {
|
||||
builder.WriteRune(',')
|
||||
}
|
||||
}
|
||||
builder.WriteString("]")
|
||||
}
|
||||
builder.WriteRune('\n')
|
||||
|
||||
// Write output to screen as well as any output file
|
||||
message := builder.String()
|
||||
gologger.Silentf("%s", message)
|
||||
|
||||
if e.writer != nil {
|
||||
if e.coloredOutput {
|
||||
message = e.decolorizer.ReplaceAllString(message, "")
|
||||
}
|
||||
|
||||
if err := e.writer.WriteString(message); err != nil {
|
||||
gologger.Errorf("Could not write output data: %s\n", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,147 +0,0 @@
|
|||
package executer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"strings"
|
||||
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
"github.com/projectdiscovery/gologger"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/matchers"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/requests"
|
||||
)
|
||||
|
||||
// writeOutputHTTP writes http output to streams
|
||||
func (e *HTTPExecuter) writeOutputHTTP(req *requests.HTTPRequest, resp *http.Response, body string, matcher *matchers.Matcher, extractorResults []string, meta map[string]interface{}, reqURL string) {
|
||||
var URL string
|
||||
if req.RawRequest != nil {
|
||||
URL = req.RawRequest.FullURL
|
||||
}
|
||||
if req.Request != nil {
|
||||
URL = req.Request.URL.String()
|
||||
}
|
||||
|
||||
if e.jsonOutput {
|
||||
output := make(jsonOutput)
|
||||
|
||||
output["matched"] = URL
|
||||
if !e.noMeta {
|
||||
output["template"] = e.template.ID
|
||||
output["type"] = "http"
|
||||
output["host"] = reqURL
|
||||
if len(meta) > 0 {
|
||||
output["meta"] = meta
|
||||
}
|
||||
for k, v := range e.template.Info {
|
||||
output[k] = v
|
||||
}
|
||||
if matcher != nil && len(matcher.Name) > 0 {
|
||||
output["matcher_name"] = matcher.Name
|
||||
}
|
||||
if len(extractorResults) > 0 {
|
||||
output["extracted_results"] = extractorResults
|
||||
}
|
||||
|
||||
// TODO: URL should be an argument
|
||||
if e.jsonRequest {
|
||||
dumpedRequest, err := requests.Dump(req, URL)
|
||||
if err != nil {
|
||||
gologger.Warningf("could not dump request: %s\n", err)
|
||||
} else {
|
||||
output["request"] = string(dumpedRequest)
|
||||
}
|
||||
|
||||
dumpedResponse, err := httputil.DumpResponse(resp, false)
|
||||
if err != nil {
|
||||
gologger.Warningf("could not dump response: %s\n", err)
|
||||
} else {
|
||||
output["response"] = string(dumpedResponse) + body
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
data, err := jsoniter.Marshal(output)
|
||||
if err != nil {
|
||||
gologger.Warningf("Could not marshal json output: %s\n", err)
|
||||
}
|
||||
gologger.Silentf("%s", string(data))
|
||||
|
||||
if e.writer != nil {
|
||||
if err := e.writer.Write(data); err != nil {
|
||||
gologger.Errorf("Could not write output data: %s\n", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
builder := &strings.Builder{}
|
||||
colorizer := e.colorizer
|
||||
|
||||
if !e.noMeta {
|
||||
builder.WriteRune('[')
|
||||
builder.WriteString(colorizer.Colorizer.BrightGreen(e.template.ID).String())
|
||||
|
||||
if matcher != nil && len(matcher.Name) > 0 {
|
||||
builder.WriteString(":")
|
||||
builder.WriteString(colorizer.Colorizer.BrightGreen(matcher.Name).Bold().String())
|
||||
}
|
||||
|
||||
builder.WriteString("] [")
|
||||
builder.WriteString(colorizer.Colorizer.BrightBlue("http").String())
|
||||
builder.WriteString("] ")
|
||||
|
||||
if e.template.Info["severity"] != "" {
|
||||
builder.WriteString("[")
|
||||
builder.WriteString(colorizer.GetColorizedSeverity(e.template.Info["severity"]))
|
||||
builder.WriteString("] ")
|
||||
}
|
||||
}
|
||||
builder.WriteString(URL)
|
||||
|
||||
// If any extractors, write the results
|
||||
if len(extractorResults) > 0 && !e.noMeta {
|
||||
builder.WriteString(" [")
|
||||
|
||||
for i, result := range extractorResults {
|
||||
builder.WriteString(colorizer.Colorizer.BrightCyan(result).String())
|
||||
|
||||
if i != len(extractorResults)-1 {
|
||||
builder.WriteRune(',')
|
||||
}
|
||||
}
|
||||
|
||||
builder.WriteString("]")
|
||||
}
|
||||
|
||||
// write meta if any
|
||||
if len(req.Meta) > 0 && !e.noMeta {
|
||||
builder.WriteString(" [")
|
||||
|
||||
var metas []string
|
||||
for name, value := range req.Meta {
|
||||
metas = append(metas, colorizer.Colorizer.BrightYellow(name).Bold().String()+"="+colorizer.Colorizer.BrightYellow(fmt.Sprint(value)).String())
|
||||
}
|
||||
|
||||
builder.WriteString(strings.Join(metas, ","))
|
||||
builder.WriteString("]")
|
||||
}
|
||||
|
||||
builder.WriteRune('\n')
|
||||
|
||||
// Write output to screen as well as any output file
|
||||
message := builder.String()
|
||||
gologger.Silentf("%s", message)
|
||||
|
||||
if e.writer != nil {
|
||||
if e.coloredOutput {
|
||||
message = e.decolorizer.ReplaceAllString(message, "")
|
||||
}
|
||||
|
||||
if err := e.writer.WriteString(message); err != nil {
|
||||
gologger.Errorf("Could not write output data: %s\n", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,59 +0,0 @@
|
|||
package executer
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
type jsonOutput map[string]interface{}
|
||||
|
||||
// unsafeToString converts byte slice to string with zero allocations
|
||||
func unsafeToString(bs []byte) string {
|
||||
return *(*string)(unsafe.Pointer(&bs))
|
||||
}
|
||||
|
||||
// headersToString converts http headers to string
|
||||
func headersToString(headers http.Header) string {
|
||||
builder := &strings.Builder{}
|
||||
|
||||
for header, values := range headers {
|
||||
builder.WriteString(header)
|
||||
builder.WriteString(": ")
|
||||
|
||||
for i, value := range values {
|
||||
builder.WriteString(value)
|
||||
|
||||
if i != len(values)-1 {
|
||||
builder.WriteRune('\n')
|
||||
builder.WriteString(header)
|
||||
builder.WriteString(": ")
|
||||
}
|
||||
}
|
||||
builder.WriteRune('\n')
|
||||
}
|
||||
return builder.String()
|
||||
}
|
||||
|
||||
// isURL tests a string to determine if it is a well-structured url or not.
|
||||
func isURL(toTest string) bool {
|
||||
_, err := url.ParseRequestURI(toTest)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
u, err := url.Parse(toTest)
|
||||
if err != nil || u.Scheme == "" || u.Host == "" {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// extractDomain extracts the domain name of a URL
|
||||
func extractDomain(theURL string) string {
|
||||
u, err := url.Parse(theURL)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return u.Hostname()
|
||||
}
|
|
@ -1,95 +0,0 @@
|
|||
package extractors
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/miekg/dns"
|
||||
)
|
||||
|
||||
// Extract extracts response from the parts of request using a regex
|
||||
func (e *Extractor) Extract(resp *http.Response, body, headers string) map[string]struct{} {
|
||||
switch e.extractorType {
|
||||
case RegexExtractor:
|
||||
if e.part == BodyPart {
|
||||
return e.extractRegex(body)
|
||||
} else if e.part == HeaderPart {
|
||||
return e.extractRegex(headers)
|
||||
} else {
|
||||
matches := e.extractRegex(headers)
|
||||
if len(matches) > 0 {
|
||||
return matches
|
||||
}
|
||||
return e.extractRegex(body)
|
||||
}
|
||||
case KValExtractor:
|
||||
if e.part == HeaderPart {
|
||||
return e.extractKVal(resp)
|
||||
}
|
||||
|
||||
matches := e.extractKVal(resp)
|
||||
|
||||
if len(matches) > 0 {
|
||||
return matches
|
||||
}
|
||||
|
||||
return e.extractCookieKVal(resp)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ExtractDNS extracts response from dns message using a regex
|
||||
// nolint:interfacer // dns.Msg is out of current scope
|
||||
func (e *Extractor) ExtractDNS(msg *dns.Msg) map[string]struct{} {
|
||||
switch e.extractorType {
|
||||
case RegexExtractor:
|
||||
return e.extractRegex(msg.String())
|
||||
case KValExtractor:
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// extractRegex extracts text from a corpus and returns it
|
||||
func (e *Extractor) extractRegex(corpus string) map[string]struct{} {
|
||||
results := make(map[string]struct{})
|
||||
|
||||
groupPlusOne := e.RegexGroup + 1
|
||||
for _, regex := range e.regexCompiled {
|
||||
matches := regex.FindAllStringSubmatch(corpus, -1)
|
||||
for _, match := range matches {
|
||||
if len(match) >= groupPlusOne {
|
||||
results[match[e.RegexGroup]] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
return results
|
||||
}
|
||||
|
||||
// extractKVal extracts text from http response
|
||||
func (e *Extractor) extractKVal(r *http.Response) map[string]struct{} {
|
||||
results := make(map[string]struct{})
|
||||
|
||||
for _, k := range e.KVal {
|
||||
for _, v := range r.Header.Values(k) {
|
||||
results[v] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
return results
|
||||
}
|
||||
|
||||
// extractCookieKVal extracts text from cookies
|
||||
func (e *Extractor) extractCookieKVal(r *http.Response) map[string]struct{} {
|
||||
results := make(map[string]struct{})
|
||||
|
||||
for _, k := range e.KVal {
|
||||
for _, cookie := range r.Cookies() {
|
||||
if cookie.Name == k {
|
||||
results[cookie.Value] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return results
|
||||
}
|
|
@ -1,20 +0,0 @@
|
|||
package generators
|
||||
|
||||
// Type is type of attack
|
||||
type Type int
|
||||
|
||||
const (
|
||||
// Sniper attack - each variable replaced with values at a time
|
||||
Sniper Type = iota + 1
|
||||
// PitchFork attack - Each variable replaced with positional value in multiple wordlists
|
||||
PitchFork
|
||||
// ClusterBomb attack - Generate all possible combinations of values
|
||||
ClusterBomb
|
||||
)
|
||||
|
||||
// AttackTypes is an table for conversion of attack type from string.
|
||||
var AttackTypes = map[string]Type{
|
||||
"sniper": Sniper,
|
||||
"pitchfork": PitchFork,
|
||||
"clusterbomb": ClusterBomb,
|
||||
}
|
|
@ -1,53 +0,0 @@
|
|||
package generators
|
||||
|
||||
// ClusterbombGenerator Attack - Generate all possible combinations from an input map with all values listed
|
||||
// as slices of the same size
|
||||
func ClusterbombGenerator(payloads map[string][]string) (out chan map[string]interface{}) {
|
||||
out = make(chan map[string]interface{})
|
||||
|
||||
// generator
|
||||
go func() {
|
||||
defer close(out)
|
||||
|
||||
var order []string
|
||||
|
||||
var parts [][]string
|
||||
|
||||
for name, wordlist := range payloads {
|
||||
order = append(order, name)
|
||||
parts = append(parts, wordlist)
|
||||
}
|
||||
|
||||
var n = 1
|
||||
for _, ar := range parts {
|
||||
n *= len(ar)
|
||||
}
|
||||
|
||||
var at = make([]int, len(parts))
|
||||
loop:
|
||||
for {
|
||||
// increment position counters
|
||||
for i := len(parts) - 1; i >= 0; i-- {
|
||||
if at[i] > 0 && at[i] >= len(parts[i]) {
|
||||
if i == 0 || (i == 1 && at[i-1] == len(parts[0])-1) {
|
||||
break loop
|
||||
}
|
||||
at[i] = 0
|
||||
at[i-1]++
|
||||
}
|
||||
}
|
||||
// construct permutation
|
||||
item := make(map[string]interface{})
|
||||
for i, ar := range parts {
|
||||
var p = at[i]
|
||||
if p >= 0 && p < len(ar) {
|
||||
item[order[i]] = ar[p]
|
||||
}
|
||||
}
|
||||
out <- item
|
||||
at[len(parts)-1]++
|
||||
}
|
||||
}()
|
||||
|
||||
return out
|
||||
}
|
|
@ -1,38 +0,0 @@
|
|||
package generators
|
||||
|
||||
// PitchforkGenerator Attack - Generate positional combinations from an input map with all values listed
|
||||
// as slices of the same size
|
||||
func PitchforkGenerator(payloads map[string][]string) (out chan map[string]interface{}) {
|
||||
out = make(chan map[string]interface{})
|
||||
|
||||
size := 0
|
||||
|
||||
// check if all wordlists have the same size
|
||||
for _, wordlist := range payloads {
|
||||
if size == 0 {
|
||||
size = len(wordlist)
|
||||
}
|
||||
|
||||
if len(wordlist) != size {
|
||||
// set size = 0 and exit the cycle
|
||||
size = 0
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// generator
|
||||
go func() {
|
||||
defer close(out)
|
||||
|
||||
for i := 0; i < size; i++ {
|
||||
element := make(map[string]interface{})
|
||||
for name, wordlist := range payloads {
|
||||
element[name] = wordlist[i]
|
||||
}
|
||||
|
||||
out <- element
|
||||
}
|
||||
}()
|
||||
|
||||
return out
|
||||
}
|
|
@ -1,21 +0,0 @@
|
|||
package generators
|
||||
|
||||
// SniperGenerator Attack - Generate sequential combinations
|
||||
func SniperGenerator(payloads map[string][]string) (out chan map[string]interface{}) {
|
||||
out = make(chan map[string]interface{})
|
||||
|
||||
// generator
|
||||
go func() {
|
||||
defer close(out)
|
||||
|
||||
for name, wordlist := range payloads {
|
||||
for _, value := range wordlist {
|
||||
element := CopyMapWithDefaultValue(payloads, "")
|
||||
element[name] = value
|
||||
out <- element
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return out
|
||||
}
|
|
@ -1,205 +0,0 @@
|
|||
package generators
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const two = 2
|
||||
|
||||
// LoadPayloads creating proper data structure
|
||||
func LoadPayloads(payloads map[string]interface{}) map[string][]string {
|
||||
loadedPayloads := make(map[string][]string)
|
||||
// load all wordlists
|
||||
for name, payload := range payloads {
|
||||
switch pt := payload.(type) {
|
||||
case string:
|
||||
elements := strings.Split(pt, "\n")
|
||||
if len(elements) >= two {
|
||||
loadedPayloads[name] = elements
|
||||
} else {
|
||||
loadedPayloads[name] = LoadFile(pt)
|
||||
}
|
||||
case []interface{}, interface{}:
|
||||
vv := payload.([]interface{})
|
||||
|
||||
var v []string
|
||||
|
||||
for _, vvv := range vv {
|
||||
v = append(v, fmt.Sprintf("%v", vvv))
|
||||
}
|
||||
|
||||
loadedPayloads[name] = v
|
||||
}
|
||||
}
|
||||
|
||||
return loadedPayloads
|
||||
}
|
||||
|
||||
// LoadFile into slice of strings
|
||||
func LoadFile(filepath string) (lines []string) {
|
||||
for line := range StreamFile(filepath) {
|
||||
lines = append(lines, line)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// StreamFile content to a chan
|
||||
func StreamFile(filepath string) (content chan string) {
|
||||
content = make(chan string)
|
||||
|
||||
go func() {
|
||||
defer close(content)
|
||||
|
||||
file, err := os.Open(filepath)
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
// yql filter applied
|
||||
scanner := bufio.NewScanner(file)
|
||||
for scanner.Scan() {
|
||||
content <- scanner.Text()
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// MergeMaps into a new one
|
||||
func MergeMaps(m1, m2 map[string]interface{}) (m map[string]interface{}) {
|
||||
m = make(map[string]interface{})
|
||||
|
||||
for k, v := range m1 {
|
||||
m[k] = v
|
||||
}
|
||||
|
||||
for k, v := range m2 {
|
||||
m[k] = v
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// MergeMapsWithStrings into a new string one
|
||||
func MergeMapsWithStrings(m1, m2 map[string]string) (m map[string]string) {
|
||||
m = make(map[string]string)
|
||||
for k, v := range m1 {
|
||||
m[k] = v
|
||||
}
|
||||
|
||||
for k, v := range m2 {
|
||||
m[k] = v
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func reverseString(s string) string {
|
||||
runes := []rune(s)
|
||||
for i, j := 0, len(runes)-1; i < j; i, j = i+1, j-1 {
|
||||
runes[i], runes[j] = runes[j], runes[i]
|
||||
}
|
||||
|
||||
return string(runes)
|
||||
}
|
||||
|
||||
// CopyMap creates a new copy of an existing map
|
||||
func CopyMap(originalMap map[string]interface{}) map[string]interface{} {
|
||||
newMap := make(map[string]interface{})
|
||||
for key, value := range originalMap {
|
||||
newMap[key] = value
|
||||
}
|
||||
|
||||
return newMap
|
||||
}
|
||||
|
||||
// CopyMapWithDefaultValue creates a new copy of an existing map and set a default value
|
||||
func CopyMapWithDefaultValue(originalMap map[string][]string, defaultValue interface{}) map[string]interface{} {
|
||||
newMap := make(map[string]interface{})
|
||||
for key := range originalMap {
|
||||
newMap[key] = defaultValue
|
||||
}
|
||||
|
||||
return newMap
|
||||
}
|
||||
|
||||
// StringContainsAnyMapItem verifies is a string contains any value of a map
|
||||
func StringContainsAnyMapItem(m map[string]interface{}, s string) bool {
|
||||
for key := range m {
|
||||
if strings.Contains(s, key) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// TrimDelimiters removes trailing brackets
|
||||
func TrimDelimiters(s string) string {
|
||||
return strings.TrimSuffix(strings.TrimPrefix(s, "{{"), "}}")
|
||||
}
|
||||
|
||||
// FileExists checks if a file exists and is not a directory
|
||||
func FileExists(filename string) bool {
|
||||
info, err := os.Stat(filename)
|
||||
if os.IsNotExist(err) {
|
||||
return false
|
||||
}
|
||||
|
||||
return !info.IsDir()
|
||||
}
|
||||
|
||||
// TrimDelimiters removes trailing brackets
|
||||
func SliceContins(s []string, k string) bool {
|
||||
for _, a := range s {
|
||||
if a == k {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func TrimAll(s, cutset string) string {
|
||||
for _, c := range cutset {
|
||||
s = strings.ReplaceAll(s, string(c), "")
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func RandSeq(base string, n int) string {
|
||||
b := make([]rune, n)
|
||||
for i := range b {
|
||||
b[i] = rune(base[rand.Intn(len(base))])
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
|
||||
func insertInto(s string, interval int, sep rune) string {
|
||||
var buffer bytes.Buffer
|
||||
before := interval - 1
|
||||
last := len(s) - 1
|
||||
for i, char := range s {
|
||||
buffer.WriteRune(char)
|
||||
if i%interval == before && i != last {
|
||||
buffer.WriteRune(sep)
|
||||
}
|
||||
}
|
||||
buffer.WriteRune(sep)
|
||||
return buffer.String()
|
||||
}
|
||||
|
||||
func toString(v interface{}) string {
|
||||
return fmt.Sprint(v)
|
||||
}
|
|
@ -1,85 +0,0 @@
|
|||
package matchers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/miekg/dns"
|
||||
)
|
||||
|
||||
const defaultFormat = "%s"
|
||||
|
||||
// HTTPToMap Converts HTTP to Matcher Map
|
||||
func HTTPToMap(resp *http.Response, body, headers string, duration time.Duration, format string) (m map[string]interface{}) {
|
||||
m = make(map[string]interface{})
|
||||
|
||||
if format == "" {
|
||||
format = defaultFormat
|
||||
}
|
||||
|
||||
m[fmt.Sprintf(format, "content_length")] = resp.ContentLength
|
||||
m[fmt.Sprintf(format, "status_code")] = resp.StatusCode
|
||||
|
||||
for k, v := range resp.Header {
|
||||
k = strings.ToLower(strings.TrimSpace(strings.ReplaceAll(k, "-", "_")))
|
||||
m[fmt.Sprintf(format, k)] = strings.Join(v, " ")
|
||||
}
|
||||
|
||||
m[fmt.Sprintf(format, "all_headers")] = headers
|
||||
m[fmt.Sprintf(format, "body")] = body
|
||||
|
||||
if r, err := httputil.DumpResponse(resp, true); err == nil {
|
||||
m[fmt.Sprintf(format, "raw")] = string(r)
|
||||
}
|
||||
|
||||
// Converts duration to seconds (floating point) for DSL syntax
|
||||
m[fmt.Sprintf(format, "duration")] = duration.Seconds()
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
// DNSToMap Converts DNS to Matcher Map
|
||||
func DNSToMap(msg *dns.Msg, format string) (m map[string]interface{}) {
|
||||
m = make(map[string]interface{})
|
||||
|
||||
if format == "" {
|
||||
format = defaultFormat
|
||||
}
|
||||
|
||||
m[fmt.Sprintf(format, "rcode")] = msg.Rcode
|
||||
|
||||
var qs string
|
||||
|
||||
for _, question := range msg.Question {
|
||||
qs += fmt.Sprintln(question.String())
|
||||
}
|
||||
|
||||
m[fmt.Sprintf(format, "question")] = qs
|
||||
|
||||
var exs string
|
||||
for _, extra := range msg.Extra {
|
||||
exs += fmt.Sprintln(extra.String())
|
||||
}
|
||||
|
||||
m[fmt.Sprintf(format, "extra")] = exs
|
||||
|
||||
var ans string
|
||||
for _, answer := range msg.Answer {
|
||||
ans += fmt.Sprintln(answer.String())
|
||||
}
|
||||
|
||||
m[fmt.Sprintf(format, "answer")] = ans
|
||||
|
||||
var nss string
|
||||
for _, ns := range msg.Ns {
|
||||
nss += fmt.Sprintln(ns.String())
|
||||
}
|
||||
|
||||
m[fmt.Sprintf(format, "ns")] = nss
|
||||
m[fmt.Sprintf(format, "raw")] = msg.String()
|
||||
|
||||
return m
|
||||
}
|
|
@ -1,6 +1,7 @@
|
|||
package generators
|
||||
package dsl
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"crypto/sha1"
|
||||
"crypto/sha256"
|
||||
|
@ -16,166 +17,160 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/Knetic/govaluate"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/collaborator"
|
||||
"github.com/projectdiscovery/nuclei/v2/internal/collaborator"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/types"
|
||||
"github.com/spaolacci/murmur3"
|
||||
)
|
||||
|
||||
const (
|
||||
numbers = "1234567890"
|
||||
letters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
||||
withCutSetArgsSize = 2
|
||||
withMaxRandArgsSize = withCutSetArgsSize
|
||||
withBaseRandArgsSize = 3
|
||||
withMaxRandArgsSize = withCutSetArgsSize
|
||||
)
|
||||
|
||||
var letters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
||||
var numbers = "1234567890"
|
||||
// HelperFunctions contains the dsl helper functions
|
||||
func HelperFunctions() map[string]govaluate.ExpressionFunction {
|
||||
functions := make(map[string]govaluate.ExpressionFunction)
|
||||
|
||||
// HelperFunctions contains the dsl functions
|
||||
func HelperFunctions() (functions map[string]govaluate.ExpressionFunction) {
|
||||
functions = make(map[string]govaluate.ExpressionFunction)
|
||||
|
||||
// strings
|
||||
functions["len"] = func(args ...interface{}) (interface{}, error) {
|
||||
length := len(toString(args[0]))
|
||||
|
||||
length := len(types.ToString(args[0]))
|
||||
return float64(length), nil
|
||||
}
|
||||
|
||||
functions["toupper"] = func(args ...interface{}) (interface{}, error) {
|
||||
return strings.ToUpper(toString(args[0])), nil
|
||||
return strings.ToUpper(types.ToString(args[0])), nil
|
||||
}
|
||||
|
||||
functions["tolower"] = func(args ...interface{}) (interface{}, error) {
|
||||
return strings.ToLower(toString(args[0])), nil
|
||||
return strings.ToLower(types.ToString(args[0])), nil
|
||||
}
|
||||
|
||||
functions["replace"] = func(args ...interface{}) (interface{}, error) {
|
||||
return strings.ReplaceAll(toString(args[0]), toString(args[1]), toString(args[2])), nil
|
||||
return strings.ReplaceAll(types.ToString(args[0]), types.ToString(args[1]), types.ToString(args[2])), nil
|
||||
}
|
||||
|
||||
functions["replace_regex"] = func(args ...interface{}) (interface{}, error) {
|
||||
compiled, err := regexp.Compile(toString(args[1]))
|
||||
compiled, err := regexp.Compile(types.ToString(args[1]))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return compiled.ReplaceAllString(toString(args[0]), toString(args[2])), nil
|
||||
return compiled.ReplaceAllString(types.ToString(args[0]), types.ToString(args[2])), nil
|
||||
}
|
||||
|
||||
functions["trim"] = func(args ...interface{}) (interface{}, error) {
|
||||
return strings.Trim(toString(args[0]), toString(args[2])), nil
|
||||
return strings.Trim(types.ToString(args[0]), types.ToString(args[2])), nil
|
||||
}
|
||||
|
||||
functions["trimleft"] = func(args ...interface{}) (interface{}, error) {
|
||||
return strings.TrimLeft(toString(args[0]), toString(args[1])), nil
|
||||
return strings.TrimLeft(types.ToString(args[0]), types.ToString(args[1])), nil
|
||||
}
|
||||
|
||||
functions["trimright"] = func(args ...interface{}) (interface{}, error) {
|
||||
return strings.TrimRight(toString(args[0]), toString(args[1])), nil
|
||||
return strings.TrimRight(types.ToString(args[0]), types.ToString(args[1])), nil
|
||||
}
|
||||
|
||||
functions["trimspace"] = func(args ...interface{}) (interface{}, error) {
|
||||
return strings.TrimSpace(toString(args[0])), nil
|
||||
return strings.TrimSpace(types.ToString(args[0])), nil
|
||||
}
|
||||
|
||||
functions["trimprefix"] = func(args ...interface{}) (interface{}, error) {
|
||||
return strings.TrimPrefix(toString(args[0]), toString(args[1])), nil
|
||||
return strings.TrimPrefix(types.ToString(args[0]), types.ToString(args[1])), nil
|
||||
}
|
||||
|
||||
functions["trimsuffix"] = func(args ...interface{}) (interface{}, error) {
|
||||
return strings.TrimSuffix(toString(args[0]), toString(args[1])), nil
|
||||
return strings.TrimSuffix(types.ToString(args[0]), types.ToString(args[1])), nil
|
||||
}
|
||||
|
||||
functions["reverse"] = func(args ...interface{}) (interface{}, error) {
|
||||
return reverseString(toString(args[0])), nil
|
||||
return reverseString(types.ToString(args[0])), nil
|
||||
}
|
||||
|
||||
// encoding
|
||||
functions["base64"] = func(args ...interface{}) (interface{}, error) {
|
||||
sEnc := base64.StdEncoding.EncodeToString([]byte(toString(args[0])))
|
||||
sEnc := base64.StdEncoding.EncodeToString([]byte(types.ToString(args[0])))
|
||||
|
||||
return sEnc, nil
|
||||
}
|
||||
|
||||
// python encodes to base64 with lines of 76 bytes terminated by new line "\n"
|
||||
functions["base64_py"] = func(args ...interface{}) (interface{}, error) {
|
||||
sEnc := base64.StdEncoding.EncodeToString([]byte(toString(args[0])))
|
||||
|
||||
sEnc := base64.StdEncoding.EncodeToString([]byte(types.ToString(args[0])))
|
||||
return insertInto(sEnc, 76, '\n'), nil
|
||||
}
|
||||
|
||||
functions["base64_decode"] = func(args ...interface{}) (interface{}, error) {
|
||||
return base64.StdEncoding.DecodeString(toString(args[0]))
|
||||
return base64.StdEncoding.DecodeString(types.ToString(args[0]))
|
||||
}
|
||||
|
||||
functions["url_encode"] = func(args ...interface{}) (interface{}, error) {
|
||||
return url.PathEscape(toString(args[0])), nil
|
||||
return url.PathEscape(types.ToString(args[0])), nil
|
||||
}
|
||||
|
||||
functions["url_decode"] = func(args ...interface{}) (interface{}, error) {
|
||||
return url.PathUnescape(toString(args[0]))
|
||||
return url.PathUnescape(types.ToString(args[0]))
|
||||
}
|
||||
|
||||
functions["hex_encode"] = func(args ...interface{}) (interface{}, error) {
|
||||
return hex.EncodeToString([]byte(toString(args[0]))), nil
|
||||
return hex.EncodeToString([]byte(types.ToString(args[0]))), nil
|
||||
}
|
||||
|
||||
functions["hex_decode"] = func(args ...interface{}) (interface{}, error) {
|
||||
hx, _ := hex.DecodeString(toString(args[0]))
|
||||
hx, _ := hex.DecodeString(types.ToString(args[0]))
|
||||
return string(hx), nil
|
||||
}
|
||||
|
||||
functions["html_escape"] = func(args ...interface{}) (interface{}, error) {
|
||||
return html.EscapeString(toString(args[0])), nil
|
||||
return html.EscapeString(types.ToString(args[0])), nil
|
||||
}
|
||||
|
||||
functions["html_unescape"] = func(args ...interface{}) (interface{}, error) {
|
||||
return html.UnescapeString(toString(args[0])), nil
|
||||
return html.UnescapeString(types.ToString(args[0])), nil
|
||||
}
|
||||
|
||||
// hashing
|
||||
functions["md5"] = func(args ...interface{}) (interface{}, error) {
|
||||
hash := md5.Sum([]byte(toString(args[0])))
|
||||
hash := md5.Sum([]byte(types.ToString(args[0])))
|
||||
|
||||
return hex.EncodeToString(hash[:]), nil
|
||||
}
|
||||
|
||||
functions["sha256"] = func(args ...interface{}) (interface{}, error) {
|
||||
h := sha256.New()
|
||||
_, err := h.Write([]byte(toString(args[0])))
|
||||
_, err := h.Write([]byte(types.ToString(args[0])))
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return hex.EncodeToString(h.Sum(nil)), nil
|
||||
}
|
||||
|
||||
functions["sha1"] = func(args ...interface{}) (interface{}, error) {
|
||||
h := sha1.New()
|
||||
_, err := h.Write([]byte(toString(args[0])))
|
||||
_, err := h.Write([]byte(types.ToString(args[0])))
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return hex.EncodeToString(h.Sum(nil)), nil
|
||||
}
|
||||
|
||||
functions["mmh3"] = func(args ...interface{}) (interface{}, error) {
|
||||
return fmt.Sprintf("%d", int32(murmur3.Sum32WithSeed([]byte(toString(args[0])), 0))), nil
|
||||
return fmt.Sprintf("%d", int32(murmur3.Sum32WithSeed([]byte(types.ToString(args[0])), 0))), nil
|
||||
}
|
||||
|
||||
// search
|
||||
functions["contains"] = func(args ...interface{}) (interface{}, error) {
|
||||
return strings.Contains(toString(args[0]), toString(args[1])), nil
|
||||
return strings.Contains(types.ToString(args[0]), types.ToString(args[1])), nil
|
||||
}
|
||||
|
||||
functions["regex"] = func(args ...interface{}) (interface{}, error) {
|
||||
compiled, err := regexp.Compile(toString(args[0]))
|
||||
compiled, err := regexp.Compile(types.ToString(args[0]))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return compiled.MatchString(toString(args[1])), nil
|
||||
return compiled.MatchString(types.ToString(args[1])), nil
|
||||
}
|
||||
|
||||
// random generators
|
||||
|
@ -183,14 +178,12 @@ func HelperFunctions() (functions map[string]govaluate.ExpressionFunction) {
|
|||
chars := letters + numbers
|
||||
bad := ""
|
||||
if len(args) >= 1 {
|
||||
chars = toString(args[0])
|
||||
chars = types.ToString(args[0])
|
||||
}
|
||||
if len(args) >= withCutSetArgsSize {
|
||||
bad = toString(args[1])
|
||||
bad = types.ToString(args[1])
|
||||
}
|
||||
|
||||
chars = TrimAll(chars, bad)
|
||||
|
||||
chars = trimAll(chars, bad)
|
||||
return chars[rand.Intn(len(chars))], nil
|
||||
}
|
||||
|
||||
|
@ -203,15 +196,13 @@ func HelperFunctions() (functions map[string]govaluate.ExpressionFunction) {
|
|||
l = args[0].(int)
|
||||
}
|
||||
if len(args) >= withCutSetArgsSize {
|
||||
bad = toString(args[1])
|
||||
bad = types.ToString(args[1])
|
||||
}
|
||||
if len(args) >= withBaseRandArgsSize {
|
||||
base = toString(args[2])
|
||||
base = types.ToString(args[2])
|
||||
}
|
||||
|
||||
base = TrimAll(base, bad)
|
||||
|
||||
return RandSeq(base, l), nil
|
||||
base = trimAll(base, bad)
|
||||
return randSeq(base, l), nil
|
||||
}
|
||||
|
||||
functions["rand_text_alphanumeric"] = func(args ...interface{}) (interface{}, error) {
|
||||
|
@ -223,12 +214,10 @@ func HelperFunctions() (functions map[string]govaluate.ExpressionFunction) {
|
|||
l = args[0].(int)
|
||||
}
|
||||
if len(args) >= withCutSetArgsSize {
|
||||
bad = toString(args[1])
|
||||
bad = types.ToString(args[1])
|
||||
}
|
||||
|
||||
chars = TrimAll(chars, bad)
|
||||
|
||||
return RandSeq(chars, l), nil
|
||||
chars = trimAll(chars, bad)
|
||||
return randSeq(chars, l), nil
|
||||
}
|
||||
|
||||
functions["rand_text_alpha"] = func(args ...interface{}) (interface{}, error) {
|
||||
|
@ -240,12 +229,10 @@ func HelperFunctions() (functions map[string]govaluate.ExpressionFunction) {
|
|||
l = args[0].(int)
|
||||
}
|
||||
if len(args) >= withCutSetArgsSize {
|
||||
bad = toString(args[1])
|
||||
bad = types.ToString(args[1])
|
||||
}
|
||||
|
||||
chars = TrimAll(chars, bad)
|
||||
|
||||
return RandSeq(chars, l), nil
|
||||
chars = trimAll(chars, bad)
|
||||
return randSeq(chars, l), nil
|
||||
}
|
||||
|
||||
functions["rand_text_numeric"] = func(args ...interface{}) (interface{}, error) {
|
||||
|
@ -257,12 +244,10 @@ func HelperFunctions() (functions map[string]govaluate.ExpressionFunction) {
|
|||
l = args[0].(int)
|
||||
}
|
||||
if len(args) >= withCutSetArgsSize {
|
||||
bad = toString(args[1])
|
||||
bad = types.ToString(args[1])
|
||||
}
|
||||
|
||||
chars = TrimAll(chars, bad)
|
||||
|
||||
return RandSeq(chars, l), nil
|
||||
chars = trimAll(chars, bad)
|
||||
return randSeq(chars, l), nil
|
||||
}
|
||||
|
||||
functions["rand_int"] = func(args ...interface{}) (interface{}, error) {
|
||||
|
@ -275,7 +260,6 @@ func HelperFunctions() (functions map[string]govaluate.ExpressionFunction) {
|
|||
if len(args) >= withMaxRandArgsSize {
|
||||
max = args[1].(int)
|
||||
}
|
||||
|
||||
return rand.Intn(max-min) + min, nil
|
||||
}
|
||||
|
||||
|
@ -289,8 +273,44 @@ func HelperFunctions() (functions map[string]govaluate.ExpressionFunction) {
|
|||
// Collaborator
|
||||
functions["collab"] = func(args ...interface{}) (interface{}, error) {
|
||||
// check if collaborator contains a specific pattern
|
||||
return collaborator.DefaultCollaborator.Has(toString(args[0])), nil
|
||||
return collaborator.DefaultCollaborator.Has(types.ToString(args[0])), nil
|
||||
}
|
||||
|
||||
return functions
|
||||
}
|
||||
|
||||
func reverseString(s string) string {
|
||||
runes := []rune(s)
|
||||
for i, j := 0, len(runes)-1; i < j; i, j = i+1, j-1 {
|
||||
runes[i], runes[j] = runes[j], runes[i]
|
||||
}
|
||||
return string(runes)
|
||||
}
|
||||
|
||||
func trimAll(s, cutset string) string {
|
||||
for _, c := range cutset {
|
||||
s = strings.ReplaceAll(s, string(c), "")
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func randSeq(base string, n int) string {
|
||||
b := make([]rune, n)
|
||||
for i := range b {
|
||||
b[i] = rune(base[rand.Intn(len(base))])
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
|
||||
func insertInto(s string, interval int, sep rune) string {
|
||||
var buffer bytes.Buffer
|
||||
before := interval - 1
|
||||
last := len(s) - 1
|
||||
for i, char := range s {
|
||||
buffer.WriteRune(char)
|
||||
if i%interval == before && i != last {
|
||||
buffer.WriteRune(sep)
|
||||
}
|
||||
}
|
||||
buffer.WriteRune(sep)
|
||||
return buffer.String()
|
||||
}
|
|
@ -3,6 +3,7 @@ package extractors
|
|||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// CompileExtractors performs the initial setup operation on a extractor
|
||||
|
@ -24,15 +25,13 @@ func (e *Extractor) CompileExtractors() error {
|
|||
e.regexCompiled = append(e.regexCompiled, compiled)
|
||||
}
|
||||
|
||||
// Setup the part of the request to match, if any.
|
||||
if e.Part != "" {
|
||||
e.part, ok = PartTypes[e.Part]
|
||||
if !ok {
|
||||
return fmt.Errorf("unknown matcher part specified: %s", e.Part)
|
||||
}
|
||||
} else {
|
||||
e.part = BodyPart
|
||||
for i, kval := range e.KVal {
|
||||
e.KVal[i] = strings.ToLower(kval)
|
||||
}
|
||||
|
||||
// Setup the part of the request to match, if any.
|
||||
if e.Part == "" {
|
||||
e.Part = "body"
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,44 @@
|
|||
package extractors
|
||||
|
||||
import (
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/types"
|
||||
)
|
||||
|
||||
// ExtractRegex extracts text from a corpus and returns it
|
||||
func (e *Extractor) ExtractRegex(corpus string) map[string]struct{} {
|
||||
results := make(map[string]struct{})
|
||||
|
||||
groupPlusOne := e.RegexGroup + 1
|
||||
for _, regex := range e.regexCompiled {
|
||||
matches := regex.FindAllStringSubmatch(corpus, -1)
|
||||
|
||||
for _, match := range matches {
|
||||
if len(match) < groupPlusOne {
|
||||
continue
|
||||
}
|
||||
matchString := match[e.RegexGroup]
|
||||
|
||||
if _, ok := results[matchString]; !ok {
|
||||
results[matchString] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
return results
|
||||
}
|
||||
|
||||
// ExtractKval extracts key value pairs from a data map
|
||||
func (e *Extractor) ExtractKval(data map[string]interface{}) map[string]struct{} {
|
||||
results := make(map[string]struct{})
|
||||
|
||||
for _, k := range e.KVal {
|
||||
item, ok := data[k]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
itemString := types.ToString(item)
|
||||
if _, ok := results[itemString]; !ok {
|
||||
results[itemString] = struct{}{}
|
||||
}
|
||||
}
|
||||
return results
|
||||
}
|
|
@ -25,8 +25,6 @@ type Extractor struct {
|
|||
//
|
||||
// By default, matching is performed in request body.
|
||||
Part string `yaml:"part,omitempty"`
|
||||
// part is the part of the request to match
|
||||
part Part
|
||||
// Internal defines if this is used internally
|
||||
Internal bool `yaml:"internal,omitempty"`
|
||||
}
|
||||
|
@ -47,26 +45,7 @@ var ExtractorTypes = map[string]ExtractorType{
|
|||
"kval": KValExtractor,
|
||||
}
|
||||
|
||||
// Part is the part of the request to match
|
||||
type Part int
|
||||
|
||||
const (
|
||||
// BodyPart matches body of the response.
|
||||
BodyPart Part = iota + 1
|
||||
// HeaderPart matches headers of the response.
|
||||
HeaderPart
|
||||
// AllPart matches both response body and headers of the response.
|
||||
AllPart
|
||||
)
|
||||
|
||||
// PartTypes is an table for conversion of part type from string.
|
||||
var PartTypes = map[string]Part{
|
||||
"body": BodyPart,
|
||||
"header": HeaderPart,
|
||||
"all": AllPart,
|
||||
}
|
||||
|
||||
// GetPart returns the part of the matcher
|
||||
func (e *Extractor) GetPart() Part {
|
||||
return e.part
|
||||
// GetType returns the type of the matcher
|
||||
func (e *Extractor) GetType() ExtractorType {
|
||||
return e.extractorType
|
||||
}
|
|
@ -5,7 +5,7 @@ import (
|
|||
"regexp"
|
||||
|
||||
"github.com/Knetic/govaluate"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/generators"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/operators/common/dsl"
|
||||
)
|
||||
|
||||
// CompileMatchers performs the initial setup operation on a matcher
|
||||
|
@ -17,6 +17,10 @@ func (m *Matcher) CompileMatchers() error {
|
|||
if !ok {
|
||||
return fmt.Errorf("unknown matcher type specified: %s", m.Type)
|
||||
}
|
||||
// By default, match on body if user hasn't provided any specific items
|
||||
if m.Part == "" {
|
||||
m.Part = "body"
|
||||
}
|
||||
|
||||
// Compile the regexes
|
||||
for _, regex := range m.Regex {
|
||||
|
@ -24,17 +28,15 @@ func (m *Matcher) CompileMatchers() error {
|
|||
if err != nil {
|
||||
return fmt.Errorf("could not compile regex: %s", regex)
|
||||
}
|
||||
|
||||
m.regexCompiled = append(m.regexCompiled, compiled)
|
||||
}
|
||||
|
||||
// Compile the dsl expressions
|
||||
for _, dsl := range m.DSL {
|
||||
compiled, err := govaluate.NewEvaluableExpressionWithFunctions(dsl, generators.HelperFunctions())
|
||||
for _, expr := range m.DSL {
|
||||
compiled, err := govaluate.NewEvaluableExpressionWithFunctions(expr, dsl.HelperFunctions())
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not compile dsl: %s", dsl)
|
||||
return fmt.Errorf("could not compile dsl: %s", expr)
|
||||
}
|
||||
|
||||
m.dslCompiled = append(m.dslCompiled, compiled)
|
||||
}
|
||||
|
||||
|
@ -47,16 +49,5 @@ func (m *Matcher) CompileMatchers() error {
|
|||
} else {
|
||||
m.condition = ORCondition
|
||||
}
|
||||
|
||||
// Setup the part of the request to match, if any.
|
||||
if m.Part != "" {
|
||||
m.part, ok = PartTypes[m.Part]
|
||||
if !ok {
|
||||
return fmt.Errorf("unknown matcher part specified: %s", m.Part)
|
||||
}
|
||||
} else {
|
||||
m.part = BodyPart
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -2,81 +2,11 @@ package matchers
|
|||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/miekg/dns"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/generators"
|
||||
)
|
||||
|
||||
// Match matches a http response again a given matcher
|
||||
func (m *Matcher) Match(resp *http.Response, body, headers string, duration time.Duration, data map[string]interface{}) bool {
|
||||
switch m.matcherType {
|
||||
case StatusMatcher:
|
||||
return m.isNegative(m.matchStatusCode(resp.StatusCode))
|
||||
case SizeMatcher:
|
||||
return m.isNegative(m.matchSizeCode(len(body)))
|
||||
case WordsMatcher:
|
||||
// Match the parts as required for word check
|
||||
if m.part == BodyPart {
|
||||
return m.isNegative(m.matchWords(body))
|
||||
} else if m.part == HeaderPart {
|
||||
return m.isNegative(m.matchWords(headers))
|
||||
} else {
|
||||
return m.isNegative(m.matchWords(headers) || m.matchWords(body))
|
||||
}
|
||||
case RegexMatcher:
|
||||
// Match the parts as required for regex check
|
||||
if m.part == BodyPart {
|
||||
return m.isNegative(m.matchRegex(body))
|
||||
} else if m.part == HeaderPart {
|
||||
return m.isNegative(m.matchRegex(headers))
|
||||
} else {
|
||||
return m.isNegative(m.matchRegex(headers) || m.matchRegex(body))
|
||||
}
|
||||
case BinaryMatcher:
|
||||
// Match the parts as required for binary characters check
|
||||
if m.part == BodyPart {
|
||||
return m.isNegative(m.matchBinary(body))
|
||||
} else if m.part == HeaderPart {
|
||||
return m.isNegative(m.matchBinary(headers))
|
||||
} else {
|
||||
return m.isNegative(m.matchBinary(headers) || m.matchBinary(body))
|
||||
}
|
||||
case DSLMatcher:
|
||||
// Match complex query
|
||||
return m.isNegative(m.matchDSL(generators.MergeMaps(HTTPToMap(resp, body, headers, duration, ""), data)))
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// MatchDNS matches a dns response against a given matcher
|
||||
func (m *Matcher) MatchDNS(msg *dns.Msg) bool {
|
||||
switch m.matcherType {
|
||||
// [WIP] add dns status code matcher
|
||||
case SizeMatcher:
|
||||
return m.matchSizeCode(msg.Len())
|
||||
case WordsMatcher:
|
||||
// Match for word check
|
||||
return m.matchWords(msg.String())
|
||||
case RegexMatcher:
|
||||
// Match regex check
|
||||
return m.matchRegex(msg.String())
|
||||
case BinaryMatcher:
|
||||
// Match binary characters check
|
||||
return m.matchBinary(msg.String())
|
||||
case DSLMatcher:
|
||||
// Match complex query
|
||||
return m.matchDSL(DNSToMap(msg, ""))
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// matchStatusCode matches a status code check against an HTTP Response
|
||||
func (m *Matcher) matchStatusCode(statusCode int) bool {
|
||||
// MatchStatusCode matches a status code check against a corpus
|
||||
func (m *Matcher) MatchStatusCode(statusCode int) bool {
|
||||
// Iterate over all the status codes accepted as valid
|
||||
//
|
||||
// Status codes don't support AND conditions.
|
||||
|
@ -88,12 +18,11 @@ func (m *Matcher) matchStatusCode(statusCode int) bool {
|
|||
// Return on the first match.
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// matchStatusCode matches a size check against an HTTP Response
|
||||
func (m *Matcher) matchSizeCode(length int) bool {
|
||||
// MatchSize matches a size check against a corpus
|
||||
func (m *Matcher) MatchSize(length int) bool {
|
||||
// Iterate over all the sizes accepted as valid
|
||||
//
|
||||
// Sizes codes don't support AND conditions.
|
||||
|
@ -105,12 +34,11 @@ func (m *Matcher) matchSizeCode(length int) bool {
|
|||
// Return on the first match.
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// matchWords matches a word check against an HTTP Response/Headers.
|
||||
func (m *Matcher) matchWords(corpus string) bool {
|
||||
// MatchWords matches a word check against a corpus.
|
||||
func (m *Matcher) MatchWords(corpus string) bool {
|
||||
// Iterate over all the words accepted as valid
|
||||
for i, word := range m.Words {
|
||||
// Continue if the word doesn't match
|
||||
|
@ -134,12 +62,11 @@ func (m *Matcher) matchWords(corpus string) bool {
|
|||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// matchRegex matches a regex check against an HTTP Response/Headers.
|
||||
func (m *Matcher) matchRegex(corpus string) bool {
|
||||
// MatchRegex matches a regex check against a corpus
|
||||
func (m *Matcher) MatchRegex(corpus string) bool {
|
||||
// Iterate over all the regexes accepted as valid
|
||||
for i, regex := range m.regexCompiled {
|
||||
// Continue if the regex doesn't match
|
||||
|
@ -163,12 +90,11 @@ func (m *Matcher) matchRegex(corpus string) bool {
|
|||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// matchWords matches a word check against an HTTP Response/Headers.
|
||||
func (m *Matcher) matchBinary(corpus string) bool {
|
||||
// MatchBinary matches a binary check against a corpus
|
||||
func (m *Matcher) MatchBinary(corpus string) bool {
|
||||
// Iterate over all the words accepted as valid
|
||||
for i, binary := range m.Binary {
|
||||
// Continue if the word doesn't match
|
||||
|
@ -193,15 +119,14 @@ func (m *Matcher) matchBinary(corpus string) bool {
|
|||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// matchDSL matches on a generic map result
|
||||
func (m *Matcher) matchDSL(mp map[string]interface{}) bool {
|
||||
// MatchDSL matches on a generic map result
|
||||
func (m *Matcher) MatchDSL(data map[string]interface{}) bool {
|
||||
// Iterate over all the expressions accepted as valid
|
||||
for i, expression := range m.dslCompiled {
|
||||
result, err := expression.Evaluate(mp)
|
||||
result, err := expression.Evaluate(data)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
@ -230,6 +155,5 @@ func (m *Matcher) matchDSL(mp map[string]interface{}) bool {
|
|||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
|
@ -9,22 +9,22 @@ import (
|
|||
func TestANDCondition(t *testing.T) {
|
||||
m := &Matcher{condition: ANDCondition, Words: []string{"a", "b"}}
|
||||
|
||||
matched := m.matchWords("a b")
|
||||
matched := m.MatchWords("a b")
|
||||
require.True(t, matched, "Could not match valid AND condition")
|
||||
|
||||
matched = m.matchWords("b")
|
||||
matched = m.MatchWords("b")
|
||||
require.False(t, matched, "Could match invalid AND condition")
|
||||
}
|
||||
|
||||
func TestORCondition(t *testing.T) {
|
||||
m := &Matcher{condition: ORCondition, Words: []string{"a", "b"}}
|
||||
|
||||
matched := m.matchWords("a b")
|
||||
matched := m.MatchWords("a b")
|
||||
require.True(t, matched, "Could not match valid OR condition")
|
||||
|
||||
matched = m.matchWords("b")
|
||||
matched = m.MatchWords("b")
|
||||
require.True(t, matched, "Could not match valid OR condition")
|
||||
|
||||
matched = m.matchWords("c")
|
||||
matched = m.MatchWords("c")
|
||||
require.False(t, matched, "Could match invalid OR condition")
|
||||
}
|
|
@ -6,12 +6,21 @@ import (
|
|||
"github.com/Knetic/govaluate"
|
||||
)
|
||||
|
||||
// Matcher is used to identify whether a template was successful.
|
||||
// Matcher is used to match a part in the output from a protocol.
|
||||
type Matcher struct {
|
||||
// Type is the type of the matcher
|
||||
Type string `yaml:"type"`
|
||||
// matcherType is the internal type of the matcher
|
||||
matcherType MatcherType
|
||||
// Condition is the optional condition between two matcher variables
|
||||
//
|
||||
// By default, the condition is assumed to be OR.
|
||||
Condition string `yaml:"condition,omitempty"`
|
||||
|
||||
// Part is the part of the data to match
|
||||
Part string `yaml:"part,omitempty"`
|
||||
|
||||
// Negative specifies if the match should be reversed
|
||||
// It will only match if the condition is not true.
|
||||
Negative bool `yaml:"negative,omitempty"`
|
||||
|
||||
// Name is matcher Name
|
||||
Name string `yaml:"name,omitempty"`
|
||||
|
@ -23,32 +32,16 @@ type Matcher struct {
|
|||
Words []string `yaml:"words,omitempty"`
|
||||
// Regex are the regex pattern required to be present in the response
|
||||
Regex []string `yaml:"regex,omitempty"`
|
||||
// regexCompiled is the compiled variant
|
||||
regexCompiled []*regexp.Regexp
|
||||
// Binary are the binary characters required to be present in the response
|
||||
Binary []string `yaml:"binary,omitempty"`
|
||||
// DSL are the dsl queries
|
||||
DSL []string `yaml:"dsl,omitempty"`
|
||||
// dslCompiled is the compiled variant
|
||||
dslCompiled []*govaluate.EvaluableExpression
|
||||
|
||||
// Condition is the optional condition between two matcher variables
|
||||
//
|
||||
// By default, the condition is assumed to be OR.
|
||||
Condition string `yaml:"condition,omitempty"`
|
||||
// condition is the condition of the matcher
|
||||
// cached data for the compiled matcher
|
||||
condition ConditionType
|
||||
|
||||
// Part is the part of the request to match
|
||||
//
|
||||
// By default, matching is performed in request body.
|
||||
Part string `yaml:"part,omitempty"`
|
||||
// part is the part of the request to match
|
||||
part Part
|
||||
|
||||
// Negative specifies if the match should be reversed
|
||||
// It will only match if the condition is not true.
|
||||
Negative bool `yaml:"negative,omitempty"`
|
||||
matcherType MatcherType
|
||||
regexCompiled []*regexp.Regexp
|
||||
dslCompiled []*govaluate.EvaluableExpression
|
||||
}
|
||||
|
||||
// MatcherType is the type of the matcher specified
|
||||
|
@ -95,36 +88,15 @@ var ConditionTypes = map[string]ConditionType{
|
|||
"or": ORCondition,
|
||||
}
|
||||
|
||||
// Part is the part of the request to match
|
||||
type Part int
|
||||
|
||||
const (
|
||||
// BodyPart matches body of the response.
|
||||
BodyPart Part = iota + 1
|
||||
// HeaderPart matches headers of the response.
|
||||
HeaderPart
|
||||
// AllPart matches both response body and headers of the response.
|
||||
AllPart
|
||||
)
|
||||
|
||||
// PartTypes is an table for conversion of part type from string.
|
||||
var PartTypes = map[string]Part{
|
||||
"body": BodyPart,
|
||||
"header": HeaderPart,
|
||||
"all": AllPart,
|
||||
}
|
||||
|
||||
// GetPart returns the part of the matcher
|
||||
func (m *Matcher) GetPart() Part {
|
||||
return m.part
|
||||
}
|
||||
|
||||
// isNegative reverts the results of the match if the matcher
|
||||
// is of type negative.
|
||||
func (m *Matcher) isNegative(data bool) bool {
|
||||
// Result reverts the results of the match if the matcher is of type negative.
|
||||
func (m *Matcher) Result(data bool) bool {
|
||||
if m.Negative {
|
||||
return !data
|
||||
}
|
||||
|
||||
return data
|
||||
}
|
||||
|
||||
// GetType returns the type of the matcher
|
||||
func (m *Matcher) GetType() MatcherType {
|
||||
return m.matcherType
|
||||
}
|
|
@ -0,0 +1,128 @@
|
|||
package operators
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/operators/extractors"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/operators/matchers"
|
||||
)
|
||||
|
||||
// Operators contains the operators that can be applied on protocols
|
||||
type Operators struct {
|
||||
// Matchers contains the detection mechanism for the request to identify
|
||||
// whether the request was successful
|
||||
Matchers []*matchers.Matcher `yaml:"matchers"`
|
||||
// Extractors contains the extraction mechanism for the request to identify
|
||||
// and extract parts of the response.
|
||||
Extractors []*extractors.Extractor `yaml:"extractors"`
|
||||
// MatchersCondition is the condition of the matchers
|
||||
// whether to use AND or OR. Default is OR.
|
||||
MatchersCondition string `yaml:"matchers-condition"`
|
||||
// cached variables that may be used along with request.
|
||||
matchersCondition matchers.ConditionType
|
||||
}
|
||||
|
||||
// Compile compiles the operators as well as their corresponding matchers and extractors
|
||||
func (r *Operators) Compile() error {
|
||||
if r.MatchersCondition != "" {
|
||||
r.matchersCondition = matchers.ConditionTypes[r.MatchersCondition]
|
||||
} else {
|
||||
r.matchersCondition = matchers.ORCondition
|
||||
}
|
||||
|
||||
for _, matcher := range r.Matchers {
|
||||
if err := matcher.CompileMatchers(); err != nil {
|
||||
return errors.Wrap(err, "could not compile matcher")
|
||||
}
|
||||
}
|
||||
for _, extractor := range r.Extractors {
|
||||
if err := extractor.CompileExtractors(); err != nil {
|
||||
return errors.Wrap(err, "could not compile extractor")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetMatchersCondition returns the condition for the matchers
|
||||
func (r *Operators) GetMatchersCondition() matchers.ConditionType {
|
||||
return r.matchersCondition
|
||||
}
|
||||
|
||||
// Result is a result structure created from operators running on data.
|
||||
type Result struct {
|
||||
// Matches is a map of matcher names that we matched
|
||||
Matches map[string]struct{}
|
||||
// Extracts contains all the data extracted from inputs
|
||||
Extracts map[string][]string
|
||||
// OutputExtracts is the list of extracts to be displayed on screen.
|
||||
OutputExtracts []string
|
||||
// DynamicValues contains any dynamic values to be templated
|
||||
DynamicValues map[string]interface{}
|
||||
// PayloadValues contains payload values provided by user. (Optional)
|
||||
PayloadValues map[string]interface{}
|
||||
}
|
||||
|
||||
// MatchFunc performs matching operation for a matcher on model and returns true or false.
|
||||
type MatchFunc func(data map[string]interface{}, matcher *matchers.Matcher) bool
|
||||
|
||||
// ExtractFunc performs extracting operation for a extractor on model and returns true or false.
|
||||
type ExtractFunc func(data map[string]interface{}, matcher *extractors.Extractor) map[string]struct{}
|
||||
|
||||
// Execute executes the operators on data and returns a result structure
|
||||
func (r *Operators) Execute(data map[string]interface{}, match MatchFunc, extract ExtractFunc) (*Result, bool) {
|
||||
matcherCondition := r.GetMatchersCondition()
|
||||
|
||||
var matches bool
|
||||
result := &Result{
|
||||
Matches: make(map[string]struct{}),
|
||||
Extracts: make(map[string][]string),
|
||||
DynamicValues: make(map[string]interface{}),
|
||||
}
|
||||
for _, matcher := range r.Matchers {
|
||||
// Check if the matcher matched
|
||||
if !match(data, matcher) {
|
||||
// If the condition is AND we haven't matched, try next request.
|
||||
if matcherCondition == matchers.ANDCondition {
|
||||
return nil, false
|
||||
}
|
||||
} else {
|
||||
// If the matcher has matched, and its an OR
|
||||
// write the first output then move to next matcher.
|
||||
if matcherCondition == matchers.ORCondition && matcher.Name != "" {
|
||||
result.Matches[matcher.Name] = struct{}{}
|
||||
}
|
||||
matches = true
|
||||
}
|
||||
}
|
||||
|
||||
// All matchers have successfully completed so now start with the
|
||||
// next task which is extraction of input from matchers.
|
||||
for _, extractor := range r.Extractors {
|
||||
var extractorResults []string
|
||||
|
||||
for match := range extract(data, extractor) {
|
||||
extractorResults = append(extractorResults, match)
|
||||
|
||||
if extractor.Internal {
|
||||
if _, ok := result.DynamicValues[extractor.Name]; !ok {
|
||||
result.DynamicValues[extractor.Name] = match
|
||||
}
|
||||
} else {
|
||||
result.OutputExtracts = append(result.OutputExtracts, match)
|
||||
}
|
||||
}
|
||||
if len(extractorResults) > 0 && !extractor.Internal && extractor.Name != "" {
|
||||
result.Extracts[extractor.Name] = extractorResults
|
||||
}
|
||||
}
|
||||
|
||||
// Don't print if we have matchers and they have not matched, irregardless of extractor
|
||||
if len(r.Matchers) > 0 && !matches {
|
||||
return nil, false
|
||||
}
|
||||
// Write a final string of output if matcher type is
|
||||
// AND or if we have extractors for the mechanism too.
|
||||
if len(result.Extracts) > 0 || len(result.OutputExtracts) > 0 || matches {
|
||||
return result, true
|
||||
}
|
||||
return nil, false
|
||||
}
|
|
@ -0,0 +1,2 @@
|
|||
// Package output implements output writing interfaces for nuclei.
|
||||
package output
|
|
@ -0,0 +1,39 @@
|
|||
package output
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"os"
|
||||
)
|
||||
|
||||
// fileWriter is a concurrent file based output writer.
|
||||
type fileWriter struct {
|
||||
file *os.File
|
||||
writer *bufio.Writer
|
||||
}
|
||||
|
||||
// NewFileOutputWriter creates a new buffered writer for a file
|
||||
func newFileOutputWriter(file string) (*fileWriter, error) {
|
||||
output, err := os.Create(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &fileWriter{file: output, writer: bufio.NewWriter(output)}, nil
|
||||
}
|
||||
|
||||
// WriteString writes an output to the underlying file
|
||||
func (w *fileWriter) Write(data []byte) error {
|
||||
_, err := w.writer.Write(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = w.writer.WriteRune('\n')
|
||||
return err
|
||||
}
|
||||
|
||||
// Close closes the underlying writer flushing everything to disk
|
||||
func (w *fileWriter) Close() error {
|
||||
w.writer.Flush()
|
||||
//nolint:errcheck // we don't care whether sync failed or succeeded.
|
||||
w.file.Sync()
|
||||
return w.file.Close()
|
||||
}
|
|
@ -0,0 +1,13 @@
|
|||
package output
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
)
|
||||
|
||||
// formatJSON formats the output for json based formatting
|
||||
func (w *StandardWriter) formatJSON(output *ResultEvent) ([]byte, error) {
|
||||
output.Timestamp = time.Now()
|
||||
return jsoniter.Marshal(output)
|
||||
}
|
|
@ -0,0 +1,64 @@
|
|||
package output
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/types"
|
||||
)
|
||||
|
||||
// formatScreen formats the output for showing on screen.
|
||||
func (w *StandardWriter) formatScreen(output *ResultEvent) ([]byte, error) {
|
||||
builder := &bytes.Buffer{}
|
||||
|
||||
if !w.noMetadata {
|
||||
builder.WriteRune('[')
|
||||
builder.WriteString(w.aurora.BrightGreen(output.TemplateID).String())
|
||||
|
||||
if output.MatcherName != "" {
|
||||
builder.WriteString(":")
|
||||
builder.WriteString(w.aurora.BrightGreen(output.MatcherName).Bold().String())
|
||||
}
|
||||
|
||||
builder.WriteString("] [")
|
||||
builder.WriteString(w.aurora.BrightBlue(output.Type).String())
|
||||
builder.WriteString("] ")
|
||||
|
||||
builder.WriteString("[")
|
||||
builder.WriteString(w.severityColors.Data[output.Info["severity"]])
|
||||
builder.WriteString("] ")
|
||||
}
|
||||
builder.WriteString(output.Matched)
|
||||
|
||||
// If any extractors, write the results
|
||||
if len(output.ExtractedResults) > 0 {
|
||||
builder.WriteString(" [")
|
||||
|
||||
for i, item := range output.ExtractedResults {
|
||||
builder.WriteString(w.aurora.BrightCyan(item).String())
|
||||
|
||||
if i != len(output.ExtractedResults)-1 {
|
||||
builder.WriteRune(',')
|
||||
}
|
||||
}
|
||||
builder.WriteString("]")
|
||||
}
|
||||
|
||||
// Write meta if any
|
||||
if len(output.Metadata) > 0 {
|
||||
builder.WriteString(" [")
|
||||
|
||||
var first bool = true
|
||||
for name, value := range output.Metadata {
|
||||
if !first {
|
||||
builder.WriteRune(',')
|
||||
}
|
||||
first = false
|
||||
|
||||
builder.WriteString(w.aurora.BrightYellow(name).String())
|
||||
builder.WriteRune('=')
|
||||
builder.WriteString(w.aurora.BrightYellow(types.ToString(value)).String())
|
||||
}
|
||||
builder.WriteString("]")
|
||||
}
|
||||
return builder.Bytes(), nil
|
||||
}
|
|
@ -0,0 +1,188 @@
|
|||
package output
|
||||
|
||||
import (
|
||||
"os"
|
||||
"regexp"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
"github.com/logrusorgru/aurora"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/projectdiscovery/nuclei/v2/internal/colorizer"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/operators"
|
||||
)
|
||||
|
||||
// Writer is an interface which writes output to somewhere for nuclei events.
|
||||
type Writer interface {
|
||||
// Close closes the output writer interface
|
||||
Close()
|
||||
// Colorizer returns the colorizer instance for writer
|
||||
Colorizer() aurora.Aurora
|
||||
// Write writes the event to file and/or screen.
|
||||
Write(*ResultEvent) error
|
||||
// Request writes a log the requests trace log
|
||||
Request(templateID, url, requestType string, err error)
|
||||
}
|
||||
|
||||
// StandardWriter is a writer writing output to file and screen for results.
|
||||
type StandardWriter struct {
|
||||
json bool
|
||||
noMetadata bool
|
||||
aurora aurora.Aurora
|
||||
outputFile *fileWriter
|
||||
outputMutex *sync.Mutex
|
||||
traceFile *fileWriter
|
||||
traceMutex *sync.Mutex
|
||||
severityColors *colorizer.Colorizer
|
||||
}
|
||||
|
||||
var decolorizerRegex = regexp.MustCompile(`\x1B\[[0-9;]*[a-zA-Z]`)
|
||||
|
||||
// InternalEvent is an internal output generation structure for nuclei.
|
||||
type InternalEvent map[string]interface{}
|
||||
|
||||
// InternalWrappedEvent is a wrapped event with operators result added to it.
|
||||
type InternalWrappedEvent struct {
|
||||
InternalEvent InternalEvent
|
||||
Results []*ResultEvent
|
||||
OperatorsResult *operators.Result
|
||||
}
|
||||
|
||||
// ResultEvent is a wrapped result event for a single nuclei output.
|
||||
type ResultEvent struct {
|
||||
// TemplateID is the ID of the template for the result.
|
||||
TemplateID string `json:"templateID"`
|
||||
// Info contains information block of the template for the result.
|
||||
Info map[string]string `json:"info,inline"`
|
||||
// MatcherName is the name of the matcher matched if any.
|
||||
MatcherName string `json:"matcher_name,omitempty"`
|
||||
// ExtractorName is the name of the extractor matched if any.
|
||||
ExtractorName string `json:"extractor_name,omitempty"`
|
||||
// Type is the type of the result event.
|
||||
Type string `json:"type"`
|
||||
// Host is the host input on which match was found.
|
||||
Host string `json:"host,omitempty"`
|
||||
// Matched contains the matched input in its transformed form.
|
||||
Matched string `json:"matched,omitempty"`
|
||||
// ExtractedResults contains the extraction result from the inputs.
|
||||
ExtractedResults []string `json:"extracted_results,omitempty"`
|
||||
// Request is the optional dumped request for the match.
|
||||
Request string `json:"request,omitempty"`
|
||||
// Response is the optional dumped response for the match.
|
||||
Response string `json:"response,omitempty"`
|
||||
// Metadata contains any optional metadata for the event
|
||||
Metadata map[string]interface{} `json:"meta,omitempty"`
|
||||
// IP is the IP address for the found result event.
|
||||
IP string `json:"ip,omitempty"`
|
||||
// Timestamp is the time the result was found at.
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
}
|
||||
|
||||
// NewStandardWriter creates a new output writer based on user configurations
|
||||
func NewStandardWriter(colors, noMetadata, json bool, file, traceFile string) (*StandardWriter, error) {
|
||||
auroraColorizer := aurora.NewAurora(colors)
|
||||
|
||||
var outputFile *fileWriter
|
||||
if file != "" {
|
||||
output, err := newFileOutputWriter(file)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not create output file")
|
||||
}
|
||||
outputFile = output
|
||||
}
|
||||
var traceOutput *fileWriter
|
||||
if traceFile != "" {
|
||||
output, err := newFileOutputWriter(traceFile)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not create output file")
|
||||
}
|
||||
traceOutput = output
|
||||
}
|
||||
writer := &StandardWriter{
|
||||
json: json,
|
||||
noMetadata: noMetadata,
|
||||
aurora: auroraColorizer,
|
||||
outputFile: outputFile,
|
||||
outputMutex: &sync.Mutex{},
|
||||
traceFile: traceOutput,
|
||||
traceMutex: &sync.Mutex{},
|
||||
severityColors: colorizer.New(auroraColorizer),
|
||||
}
|
||||
return writer, nil
|
||||
}
|
||||
|
||||
// Write writes the event to file and/or screen.
|
||||
func (w *StandardWriter) Write(event *ResultEvent) error {
|
||||
var data []byte
|
||||
var err error
|
||||
|
||||
if w.json {
|
||||
data, err = w.formatJSON(event)
|
||||
} else {
|
||||
data, err = w.formatScreen(event)
|
||||
}
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not format output")
|
||||
}
|
||||
_, _ = os.Stdout.Write(data)
|
||||
_, _ = os.Stdout.Write([]byte("\n"))
|
||||
if w.outputFile != nil {
|
||||
if !w.json {
|
||||
data = decolorizerRegex.ReplaceAll(data, []byte(""))
|
||||
}
|
||||
if writeErr := w.outputFile.Write(data); writeErr != nil {
|
||||
return errors.Wrap(err, "could not write to output")
|
||||
}
|
||||
_ = w.outputFile.Write([]byte("\n"))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// JSONTraceRequest is a trace log request written to file
|
||||
type JSONTraceRequest struct {
|
||||
ID string `json:"id"`
|
||||
URL string `json:"url"`
|
||||
Error string `json:"error"`
|
||||
Type string `json:"type"`
|
||||
}
|
||||
|
||||
// Request writes a log the requests trace log
|
||||
func (w *StandardWriter) Request(templateID, url, requestType string, err error) {
|
||||
if w.traceFile == nil {
|
||||
return
|
||||
}
|
||||
request := &JSONTraceRequest{
|
||||
ID: templateID,
|
||||
URL: url,
|
||||
Type: requestType,
|
||||
}
|
||||
if err != nil {
|
||||
request.Error = err.Error()
|
||||
} else {
|
||||
request.Error = "none"
|
||||
}
|
||||
|
||||
data, err := jsoniter.Marshal(request)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
w.traceMutex.Lock()
|
||||
_ = w.traceFile.Write(data)
|
||||
w.traceMutex.Unlock()
|
||||
}
|
||||
|
||||
// Colorizer returns the colorizer instance for writer
|
||||
func (w *StandardWriter) Colorizer() aurora.Aurora {
|
||||
return w.aurora
|
||||
}
|
||||
|
||||
// Close closes the output writing interface
|
||||
func (w *StandardWriter) Close() {
|
||||
if w.outputFile != nil {
|
||||
w.outputFile.Close()
|
||||
}
|
||||
if w.traceFile != nil {
|
||||
w.traceFile.Close()
|
||||
}
|
||||
}
|
|
@ -0,0 +1,49 @@
|
|||
package clusterer
|
||||
|
||||
import (
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/templates"
|
||||
)
|
||||
|
||||
// Cluster clusters a list of templates into a lesser number if possible based
|
||||
// on the similarity between the sent requests.
|
||||
//
|
||||
// If the attributes match, multiple requests can be clustered into a single
|
||||
// request which saves time and network resources during execution.
|
||||
func Cluster(list map[string]*templates.Template) [][]*templates.Template {
|
||||
final := [][]*templates.Template{}
|
||||
|
||||
// Each protocol that can be clustered should be handled here.
|
||||
for key, template := range list {
|
||||
// We only cluster http requests as of now.
|
||||
// Take care of requests that can't be clustered first.
|
||||
if len(template.RequestsHTTP) == 0 {
|
||||
delete(list, key)
|
||||
final = append(final, []*templates.Template{template})
|
||||
continue
|
||||
}
|
||||
|
||||
delete(list, key) // delete element first so it's not found later.
|
||||
// Find any/all similar matching request that is identical to
|
||||
// this one and cluster them together for http protocol only.
|
||||
if len(template.RequestsHTTP) == 1 {
|
||||
cluster := []*templates.Template{}
|
||||
|
||||
for otherKey, other := range list {
|
||||
if len(other.RequestsHTTP) == 0 {
|
||||
continue
|
||||
}
|
||||
if template.RequestsHTTP[0].CanCluster(other.RequestsHTTP[0]) {
|
||||
delete(list, otherKey)
|
||||
cluster = append(cluster, other)
|
||||
}
|
||||
}
|
||||
if len(cluster) > 0 {
|
||||
cluster = append(cluster, template)
|
||||
final = append(final, cluster)
|
||||
continue
|
||||
}
|
||||
}
|
||||
final = append(final, []*templates.Template{template})
|
||||
}
|
||||
return final
|
||||
}
|
|
@ -0,0 +1,78 @@
|
|||
package clusterer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"testing"
|
||||
|
||||
"github.com/logrusorgru/aurora"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/catalogue"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/output"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/protocols"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/protocols/common/protocolinit"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/templates"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/types"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestHTTPRequestsCluster(t *testing.T) {
|
||||
catalogue := catalogue.New("/Users/ice3man/nuclei-templates")
|
||||
templatesList, err := catalogue.GetTemplatePath("/Users/ice3man/nuclei-templates")
|
||||
require.Nil(t, err, "could not get templates")
|
||||
|
||||
protocolinit.Init(&types.Options{})
|
||||
list := make(map[string]*templates.Template)
|
||||
for _, template := range templatesList {
|
||||
executerOpts := &protocols.ExecuterOptions{
|
||||
Output: &mockOutput{},
|
||||
Options: &types.Options{},
|
||||
Progress: nil,
|
||||
Catalogue: catalogue,
|
||||
RateLimiter: nil,
|
||||
ProjectFile: nil,
|
||||
}
|
||||
t, err := templates.Parse(template, executerOpts)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if _, ok := list[t.ID]; !ok {
|
||||
list[t.ID] = t
|
||||
} else {
|
||||
log.Printf("Duplicate template found: %v\n", t)
|
||||
}
|
||||
}
|
||||
|
||||
totalClusterCount := 0
|
||||
totalRequestsSentNew := 0
|
||||
new := Cluster(list)
|
||||
for i, cluster := range new {
|
||||
if len(cluster) == 1 {
|
||||
continue
|
||||
}
|
||||
fmt.Printf("[%d] cluster created:\n", i)
|
||||
for _, request := range cluster {
|
||||
totalClusterCount++
|
||||
fmt.Printf("\t%v\n", request.ID)
|
||||
}
|
||||
totalRequestsSentNew++
|
||||
}
|
||||
fmt.Printf("Reduced %d requests to %d via clustering\n", totalClusterCount, totalRequestsSentNew)
|
||||
}
|
||||
|
||||
type mockOutput struct{}
|
||||
|
||||
// Close closes the output writer interface
|
||||
func (m *mockOutput) Close() {}
|
||||
|
||||
// Colorizer returns the colorizer instance for writer
|
||||
func (m *mockOutput) Colorizer() aurora.Aurora {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Write writes the event to file and/or screen.
|
||||
func (m *mockOutput) Write(*output.ResultEvent) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Request writes a log the requests trace log
|
||||
func (m *mockOutput) Request(templateID, url, requestType string, err error) {}
|
|
@ -0,0 +1,101 @@
|
|||
package clusterer
|
||||
|
||||
import (
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/operators"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/output"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/protocols"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/protocols/http"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/templates"
|
||||
)
|
||||
|
||||
// Executer executes a group of requests for a protocol for a clustered
|
||||
// request. It is different from normal executers since the original
|
||||
// operators are all combined and post processed after making the request.
|
||||
//
|
||||
// TODO: We only cluster http requests as of now.
|
||||
type Executer struct {
|
||||
requests *http.Request
|
||||
operators []*clusteredOperator
|
||||
options *protocols.ExecuterOptions
|
||||
}
|
||||
|
||||
type clusteredOperator struct {
|
||||
templateID string
|
||||
templateInfo map[string]string
|
||||
operator *operators.Operators
|
||||
}
|
||||
|
||||
var _ protocols.Executer = &Executer{}
|
||||
|
||||
// NewExecuter creates a new request executer for list of requests
|
||||
func NewExecuter(requests []*templates.Template, options *protocols.ExecuterOptions) *Executer {
|
||||
executer := &Executer{
|
||||
options: options,
|
||||
requests: requests[0].RequestsHTTP[0],
|
||||
}
|
||||
for _, req := range requests {
|
||||
executer.operators = append(executer.operators, &clusteredOperator{
|
||||
templateID: req.ID,
|
||||
templateInfo: req.Info,
|
||||
operator: req.RequestsHTTP[0].CompiledOperators,
|
||||
})
|
||||
}
|
||||
return executer
|
||||
}
|
||||
|
||||
// Compile compiles the execution generators preparing any requests possible.
|
||||
func (e *Executer) Compile() error {
|
||||
return e.requests.Compile(e.options)
|
||||
}
|
||||
|
||||
// Requests returns the total number of requests the rule will perform
|
||||
func (e *Executer) Requests() int {
|
||||
var count int
|
||||
count += e.requests.Requests()
|
||||
return count
|
||||
}
|
||||
|
||||
// Execute executes the protocol group and returns true or false if results were found.
|
||||
func (e *Executer) Execute(input string) (bool, error) {
|
||||
var results bool
|
||||
|
||||
dynamicValues := make(map[string]interface{})
|
||||
err := e.requests.ExecuteWithResults(input, dynamicValues, func(event *output.InternalWrappedEvent) {
|
||||
for _, operator := range e.operators {
|
||||
result, matched := operator.operator.Execute(event.InternalEvent, e.requests.Match, e.requests.Extract)
|
||||
if matched && result != nil {
|
||||
event.OperatorsResult = result
|
||||
event.InternalEvent["template-id"] = operator.templateID
|
||||
event.InternalEvent["template-info"] = operator.templateInfo
|
||||
event.Results = e.requests.MakeResultEvent(event)
|
||||
results = true
|
||||
for _, r := range event.Results {
|
||||
e.options.Output.Write(r)
|
||||
e.options.Progress.IncrementMatched()
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
return results, err
|
||||
}
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// ExecuteWithResults executes the protocol requests and returns results instead of writing them.
|
||||
func (e *Executer) ExecuteWithResults(input string, callback protocols.OutputEventCallback) error {
|
||||
dynamicValues := make(map[string]interface{})
|
||||
_ = e.requests.ExecuteWithResults(input, dynamicValues, func(event *output.InternalWrappedEvent) {
|
||||
for _, operator := range e.operators {
|
||||
result, matched := operator.operator.Execute(event.InternalEvent, e.requests.Match, e.requests.Extract)
|
||||
if matched && result != nil {
|
||||
event.OperatorsResult = result
|
||||
event.InternalEvent["template-id"] = operator.templateID
|
||||
event.InternalEvent["template-info"] = operator.templateInfo
|
||||
event.Results = e.requests.MakeResultEvent(event)
|
||||
callback(event)
|
||||
}
|
||||
}
|
||||
})
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,37 @@
|
|||
package compare
|
||||
|
||||
import "strings"
|
||||
|
||||
// StringSlice compares two string slices for equality
|
||||
func StringSlice(a, b []string) bool {
|
||||
// If one is nil, the other must also be nil.
|
||||
if (a == nil) != (b == nil) {
|
||||
return false
|
||||
}
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
for i := range a {
|
||||
if !strings.EqualFold(a[i], b[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// StringMap compares two string maps for equality
|
||||
func StringMap(a, b map[string]string) bool {
|
||||
// If one is nil, the other must also be nil.
|
||||
if (a == nil) != (b == nil) {
|
||||
return false
|
||||
}
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
for k, v := range a {
|
||||
if w, ok := b[k]; !ok || !strings.EqualFold(v, w) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
|
@ -0,0 +1,76 @@
|
|||
package executer
|
||||
|
||||
import (
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/output"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/protocols"
|
||||
)
|
||||
|
||||
// Executer executes a group of requests for a protocol
|
||||
type Executer struct {
|
||||
requests []protocols.Request
|
||||
options *protocols.ExecuterOptions
|
||||
}
|
||||
|
||||
var _ protocols.Executer = &Executer{}
|
||||
|
||||
// NewExecuter creates a new request executer for list of requests
|
||||
func NewExecuter(requests []protocols.Request, options *protocols.ExecuterOptions) *Executer {
|
||||
return &Executer{requests: requests, options: options}
|
||||
}
|
||||
|
||||
// Compile compiles the execution generators preparing any requests possible.
|
||||
func (e *Executer) Compile() error {
|
||||
for _, request := range e.requests {
|
||||
err := request.Compile(e.options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Requests returns the total number of requests the rule will perform
|
||||
func (e *Executer) Requests() int {
|
||||
var count int
|
||||
for _, request := range e.requests {
|
||||
count += request.Requests()
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// Execute executes the protocol group and returns true or false if results were found.
|
||||
func (e *Executer) Execute(input string) (bool, error) {
|
||||
var results bool
|
||||
|
||||
dynamicValues := make(map[string]interface{})
|
||||
for _, req := range e.requests {
|
||||
err := req.ExecuteWithResults(input, dynamicValues, func(event *output.InternalWrappedEvent) {
|
||||
if event.OperatorsResult == nil {
|
||||
return
|
||||
}
|
||||
for _, result := range event.Results {
|
||||
results = true
|
||||
e.options.Output.Write(result)
|
||||
e.options.Progress.IncrementMatched()
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
}
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// ExecuteWithResults executes the protocol requests and returns results instead of writing them.
|
||||
func (e *Executer) ExecuteWithResults(input string, callback protocols.OutputEventCallback) error {
|
||||
dynamicValues := make(map[string]interface{})
|
||||
for _, req := range e.requests {
|
||||
_ = req.ExecuteWithResults(input, dynamicValues, func(event *output.InternalWrappedEvent) {
|
||||
if event.OperatorsResult == nil {
|
||||
return
|
||||
}
|
||||
callback(event)
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,240 @@
|
|||
// Inspired from https://github.com/ffuf/ffuf/blob/master/pkg/input/input.go
|
||||
|
||||
package generators
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
// Generator is the generator struct for generating payloads
|
||||
type Generator struct {
|
||||
Type Type
|
||||
payloads map[string][]string
|
||||
}
|
||||
|
||||
// Type is type of attack
|
||||
type Type int
|
||||
|
||||
const (
|
||||
// Sniper replaces each variables with values at a time.
|
||||
Sniper Type = iota + 1
|
||||
// PitchFork replaces variables with positional value from multiple wordlists
|
||||
PitchFork
|
||||
// ClusterBomb replaces variables with all possible combinations of values
|
||||
ClusterBomb
|
||||
)
|
||||
|
||||
// StringToType is an table for conversion of attack type from string.
|
||||
var StringToType = map[string]Type{
|
||||
"sniper": Sniper,
|
||||
"pitchfork": PitchFork,
|
||||
"clusterbomb": ClusterBomb,
|
||||
}
|
||||
|
||||
// New creates a new generator structure for payload generation
|
||||
func New(payloads map[string]interface{}, Type Type, templatePath string) (*Generator, error) {
|
||||
generator := &Generator{}
|
||||
if err := generator.validate(payloads, templatePath); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
compiled, err := loadPayloads(payloads)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
generator.Type = Type
|
||||
generator.payloads = compiled
|
||||
|
||||
// Validate the payload types
|
||||
if Type == PitchFork {
|
||||
var totalLength int
|
||||
for v := range compiled {
|
||||
if totalLength != 0 && totalLength != len(v) {
|
||||
return nil, errors.New("pitchfork payloads must be of equal number")
|
||||
}
|
||||
totalLength = len(v)
|
||||
}
|
||||
}
|
||||
return generator, nil
|
||||
}
|
||||
|
||||
// Iterator is a single instance of an iterator for a generator structure
|
||||
type Iterator struct {
|
||||
Type Type
|
||||
position int
|
||||
msbIterator int
|
||||
total int
|
||||
payloads []*payloadIterator
|
||||
}
|
||||
|
||||
// NewIterator creates a new iterator for the payloads generator
|
||||
func (g *Generator) NewIterator() *Iterator {
|
||||
var payloads []*payloadIterator
|
||||
|
||||
for name, values := range g.payloads {
|
||||
payloads = append(payloads, &payloadIterator{name: name, values: values})
|
||||
}
|
||||
iterator := &Iterator{
|
||||
Type: g.Type,
|
||||
payloads: payloads,
|
||||
}
|
||||
iterator.total = iterator.Total()
|
||||
return iterator
|
||||
}
|
||||
|
||||
// Reset resets the iterator back to its initial value
|
||||
func (i *Iterator) Reset() {
|
||||
i.position = 0
|
||||
i.msbIterator = 0
|
||||
|
||||
for _, payload := range i.payloads {
|
||||
payload.resetPosition()
|
||||
}
|
||||
}
|
||||
|
||||
// Remaining returns the amount of requests left for the generator.
|
||||
func (i *Iterator) Remaining() int {
|
||||
return i.total - i.position
|
||||
}
|
||||
|
||||
// Total returns the amount of input combinations available
|
||||
func (i *Iterator) Total() int {
|
||||
count := 0
|
||||
switch i.Type {
|
||||
case Sniper:
|
||||
for _, p := range i.payloads {
|
||||
count += len(p.values)
|
||||
}
|
||||
case PitchFork:
|
||||
count = len(i.payloads[0].values)
|
||||
case ClusterBomb:
|
||||
count = 1
|
||||
for _, p := range i.payloads {
|
||||
count = count * len(p.values)
|
||||
}
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// Value returns the next value for an iterator
|
||||
func (i *Iterator) Value() (map[string]interface{}, bool) {
|
||||
switch i.Type {
|
||||
case Sniper:
|
||||
return i.sniperValue()
|
||||
case PitchFork:
|
||||
return i.pitchforkValue()
|
||||
case ClusterBomb:
|
||||
return i.clusterbombValue()
|
||||
default:
|
||||
return i.sniperValue()
|
||||
}
|
||||
}
|
||||
|
||||
// sniperValue returns a list of all payloads for the iterator
|
||||
func (i *Iterator) sniperValue() (map[string]interface{}, bool) {
|
||||
values := make(map[string]interface{}, 1)
|
||||
|
||||
currentIndex := i.msbIterator
|
||||
payload := i.payloads[currentIndex]
|
||||
if !payload.next() {
|
||||
i.msbIterator++
|
||||
if i.msbIterator == len(i.payloads) {
|
||||
return nil, false
|
||||
}
|
||||
return i.sniperValue()
|
||||
}
|
||||
values[payload.name] = payload.value()
|
||||
payload.incrementPosition()
|
||||
i.position++
|
||||
return values, true
|
||||
}
|
||||
|
||||
// pitchforkValue returns a map of keyword:value pairs in same index
|
||||
func (i *Iterator) pitchforkValue() (map[string]interface{}, bool) {
|
||||
values := make(map[string]interface{}, len(i.payloads))
|
||||
|
||||
for _, p := range i.payloads {
|
||||
if !p.next() {
|
||||
return nil, false
|
||||
}
|
||||
values[p.name] = p.value()
|
||||
p.incrementPosition()
|
||||
}
|
||||
i.position++
|
||||
return values, true
|
||||
}
|
||||
|
||||
// clusterbombValue returns a combination of all input pairs in key:value format.
|
||||
func (i *Iterator) clusterbombValue() (map[string]interface{}, bool) {
|
||||
if i.position >= i.total {
|
||||
return nil, false
|
||||
}
|
||||
values := make(map[string]interface{}, len(i.payloads))
|
||||
|
||||
// Should we signal the next InputProvider in the slice to increment
|
||||
signalNext := false
|
||||
first := true
|
||||
for index, p := range i.payloads {
|
||||
if signalNext {
|
||||
p.incrementPosition()
|
||||
signalNext = false
|
||||
}
|
||||
if !p.next() {
|
||||
// No more inputs in this inputprovider
|
||||
if index == i.msbIterator {
|
||||
// Reset all previous wordlists and increment the msb counter
|
||||
i.msbIterator++
|
||||
i.clusterbombIteratorReset()
|
||||
// Start again
|
||||
return i.clusterbombValue()
|
||||
}
|
||||
p.resetPosition()
|
||||
signalNext = true
|
||||
}
|
||||
values[p.name] = p.value()
|
||||
if first {
|
||||
p.incrementPosition()
|
||||
first = false
|
||||
}
|
||||
}
|
||||
i.position++
|
||||
return values, true
|
||||
}
|
||||
|
||||
func (i *Iterator) clusterbombIteratorReset() {
|
||||
for index, p := range i.payloads {
|
||||
if index < i.msbIterator {
|
||||
p.resetPosition()
|
||||
}
|
||||
if index == i.msbIterator {
|
||||
p.incrementPosition()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// payloadIterator is a single instance of an iterator for a single payload list.
|
||||
type payloadIterator struct {
|
||||
index int
|
||||
name string
|
||||
values []string
|
||||
}
|
||||
|
||||
// next returns true if there are more values in payload iterator
|
||||
func (i *payloadIterator) next() bool {
|
||||
return i.index < len(i.values)
|
||||
}
|
||||
|
||||
// resetPosition resets the position of the payload iterator
|
||||
func (i *payloadIterator) resetPosition() {
|
||||
i.index = 0
|
||||
}
|
||||
|
||||
// incrementPosition increments the position of the payload iterator
|
||||
func (i *payloadIterator) incrementPosition() {
|
||||
i.index++
|
||||
}
|
||||
|
||||
// value returns the value of the payload at an index
|
||||
func (i *payloadIterator) value() string {
|
||||
return i.values[i.index]
|
||||
}
|
|
@ -0,0 +1,81 @@
|
|||
package generators
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestSniperGenerator(t *testing.T) {
|
||||
usernames := []string{"admin", "password"}
|
||||
moreUsernames := []string{"login", "test"}
|
||||
|
||||
generator, err := New(map[string]interface{}{"username": usernames, "aliases": moreUsernames}, Sniper, "")
|
||||
require.Nil(t, err, "could not create generator")
|
||||
|
||||
iterator := generator.NewIterator()
|
||||
count := 0
|
||||
for {
|
||||
_, ok := iterator.Value()
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
count++
|
||||
}
|
||||
require.Equal(t, len(usernames)+len(moreUsernames), count, "could not get correct sniper counts")
|
||||
}
|
||||
|
||||
func TestPitchforkGenerator(t *testing.T) {
|
||||
usernames := []string{"admin", "token"}
|
||||
passwords := []string{"admin", "password"}
|
||||
|
||||
generator, err := New(map[string]interface{}{"username": usernames, "password": passwords}, PitchFork, "")
|
||||
require.Nil(t, err, "could not create generator")
|
||||
|
||||
iterator := generator.NewIterator()
|
||||
count := 0
|
||||
for {
|
||||
value, ok := iterator.Value()
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
count++
|
||||
require.Contains(t, usernames, value["username"], "Could not get correct pitchfork username")
|
||||
require.Contains(t, passwords, value["password"], "Could not get correct pitchfork password")
|
||||
}
|
||||
require.Equal(t, len(passwords), count, "could not get correct pitchfork counts")
|
||||
}
|
||||
|
||||
func TestClusterbombGenerator(t *testing.T) {
|
||||
usernames := []string{"admin"}
|
||||
passwords := []string{"admin", "password", "token"}
|
||||
|
||||
generator, err := New(map[string]interface{}{"username": usernames, "password": passwords}, ClusterBomb, "")
|
||||
require.Nil(t, err, "could not create generator")
|
||||
|
||||
iterator := generator.NewIterator()
|
||||
count := 0
|
||||
for {
|
||||
value, ok := iterator.Value()
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
count++
|
||||
require.Contains(t, usernames, value["username"], "Could not get correct clusterbomb username")
|
||||
require.Contains(t, passwords, value["password"], "Could not get correct clusterbomb password")
|
||||
}
|
||||
require.Equal(t, 3, count, "could not get correct clusterbomb counts")
|
||||
|
||||
iterator.Reset()
|
||||
count = 0
|
||||
for {
|
||||
value, ok := iterator.Value()
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
count++
|
||||
require.Contains(t, usernames, value["username"], "Could not get correct clusterbomb username")
|
||||
require.Contains(t, passwords, value["password"], "Could not get correct clusterbomb password")
|
||||
}
|
||||
require.Equal(t, 3, count, "could not get correct clusterbomb counts")
|
||||
}
|
|
@ -0,0 +1,60 @@
|
|||
package generators
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cast"
|
||||
)
|
||||
|
||||
// loadPayloads loads the input payloads from a map to a data map
|
||||
func loadPayloads(payloads map[string]interface{}) (map[string][]string, error) {
|
||||
loadedPayloads := make(map[string][]string)
|
||||
|
||||
for name, payload := range payloads {
|
||||
switch pt := payload.(type) {
|
||||
case string:
|
||||
elements := strings.Split(pt, "\n")
|
||||
//golint:gomnd // this is not a magic number
|
||||
if len(elements) >= 2 {
|
||||
loadedPayloads[name] = elements
|
||||
} else {
|
||||
payloads, err := loadPayloadsFromFile(pt)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not load payloads")
|
||||
}
|
||||
loadedPayloads[name] = payloads
|
||||
}
|
||||
case interface{}:
|
||||
loadedPayloads[name] = cast.ToStringSlice(pt)
|
||||
}
|
||||
}
|
||||
return loadedPayloads, nil
|
||||
}
|
||||
|
||||
// loadPayloadsFromFile loads a file to a string slice
|
||||
func loadPayloadsFromFile(filepath string) ([]string, error) {
|
||||
var lines []string
|
||||
|
||||
file, err := os.Open(filepath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
scanner := bufio.NewScanner(file)
|
||||
for scanner.Scan() {
|
||||
text := scanner.Text()
|
||||
if text == "" {
|
||||
continue
|
||||
}
|
||||
lines = append(lines, text)
|
||||
}
|
||||
if err := scanner.Err(); err != nil && err != io.EOF {
|
||||
return lines, scanner.Err()
|
||||
}
|
||||
return lines, nil
|
||||
}
|
|
@ -0,0 +1,47 @@
|
|||
package generators
|
||||
|
||||
import "strings"
|
||||
|
||||
// MergeMaps merges two maps into a new map
|
||||
func MergeMaps(m1, m2 map[string]interface{}) map[string]interface{} {
|
||||
m := make(map[string]interface{}, len(m1)+len(m2))
|
||||
for k, v := range m1 {
|
||||
m[k] = v
|
||||
}
|
||||
for k, v := range m2 {
|
||||
m[k] = v
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// ExpandMapValues converts values from flat string to strings slice
|
||||
func ExpandMapValues(m map[string]string) map[string][]string {
|
||||
m1 := make(map[string][]string, len(m))
|
||||
for k, v := range m {
|
||||
m1[k] = []string{v}
|
||||
}
|
||||
return m1
|
||||
}
|
||||
|
||||
// CopyMap creates a new copy of an existing map
|
||||
func CopyMap(originalMap map[string]interface{}) map[string]interface{} {
|
||||
newMap := make(map[string]interface{})
|
||||
for key, value := range originalMap {
|
||||
newMap[key] = value
|
||||
}
|
||||
return newMap
|
||||
}
|
||||
|
||||
// CopyMapWithDefaultValue creates a new copy of an existing map and set a default value
|
||||
func CopyMapWithDefaultValue(originalMap map[string][]string, defaultValue interface{}) map[string]interface{} {
|
||||
newMap := make(map[string]interface{})
|
||||
for key := range originalMap {
|
||||
newMap[key] = defaultValue
|
||||
}
|
||||
return newMap
|
||||
}
|
||||
|
||||
// TrimDelimiters removes trailing brackets
|
||||
func TrimDelimiters(s string) string {
|
||||
return strings.TrimSuffix(strings.TrimPrefix(s, "{{"), "}}")
|
||||
}
|
|
@ -0,0 +1,61 @@
|
|||
package generators
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/types"
|
||||
)
|
||||
|
||||
// validate validates the payloads if any.
|
||||
func (g *Generator) validate(payloads map[string]interface{}, templatePath string) error {
|
||||
for name, payload := range payloads {
|
||||
switch pt := payload.(type) {
|
||||
case string:
|
||||
// check if it's a multiline string list
|
||||
if len(strings.Split(pt, "\n")) != 1 {
|
||||
return errors.New("invalid number of lines in payload")
|
||||
}
|
||||
|
||||
// check if it's a worldlist file and try to load it
|
||||
if fileExists(pt) {
|
||||
continue
|
||||
}
|
||||
|
||||
changed := false
|
||||
pathTokens := strings.Split(templatePath, "/")
|
||||
|
||||
for i := range pathTokens {
|
||||
tpath := path.Join(strings.Join(pathTokens[:i], "/"), pt)
|
||||
if fileExists(tpath) {
|
||||
payloads[name] = tpath
|
||||
changed = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !changed {
|
||||
return fmt.Errorf("the %s file for payload %s does not exist or does not contain enough elements", pt, name)
|
||||
}
|
||||
case interface{}:
|
||||
loadedPayloads := types.ToStringSlice(pt)
|
||||
if len(loadedPayloads) == 0 {
|
||||
return fmt.Errorf("the payload %s does not contain enough elements", name)
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("the payload %s has invalid type", name)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// fileExists checks if a file exists and is not a directory
|
||||
func fileExists(filename string) bool {
|
||||
info, err := os.Stat(filename)
|
||||
if os.IsNotExist(err) {
|
||||
return false
|
||||
}
|
||||
return !info.IsDir()
|
||||
}
|
|
@ -0,0 +1,22 @@
|
|||
package protocolinit
|
||||
|
||||
import (
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/protocols/dns/dnsclientpool"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/protocols/http/httpclientpool"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/protocols/network/networkclientpool"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/types"
|
||||
)
|
||||
|
||||
// Init initializes the client pools for the protocols
|
||||
func Init(options *types.Options) error {
|
||||
if err := dnsclientpool.Init(options); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := httpclientpool.Init(options); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := networkclientpool.Init(options); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,32 @@
|
|||
package replacer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Payload marker constants
|
||||
const (
|
||||
MarkerGeneral = "§"
|
||||
MarkerParenthesisOpen = "{{"
|
||||
MarkerParenthesisClose = "}}"
|
||||
)
|
||||
|
||||
// New creates a new replacer structure for values replacement on the fly.
|
||||
func New(values map[string]interface{}) *strings.Replacer {
|
||||
replacerItems := make([]string, 0, len(values)*4)
|
||||
|
||||
for key, val := range values {
|
||||
valueStr := fmt.Sprintf("%s", val)
|
||||
|
||||
replacerItems = append(replacerItems,
|
||||
fmt.Sprintf("%s%s%s", MarkerParenthesisOpen, key, MarkerParenthesisClose),
|
||||
valueStr,
|
||||
)
|
||||
replacerItems = append(replacerItems,
|
||||
fmt.Sprintf("%s%s%s", MarkerGeneral, key, MarkerGeneral),
|
||||
valueStr,
|
||||
)
|
||||
}
|
||||
return strings.NewReplacer(replacerItems...)
|
||||
}
|
|
@ -0,0 +1,8 @@
|
|||
package tostring
|
||||
|
||||
import "unsafe"
|
||||
|
||||
// UnsafeToString converts byte slice to string with zero allocations
|
||||
func UnsafeToString(bs []byte) string {
|
||||
return *(*string)(unsafe.Pointer(&bs))
|
||||
}
|
|
@ -0,0 +1,136 @@
|
|||
package dns
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/miekg/dns"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/operators"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/protocols"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/protocols/common/replacer"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/protocols/dns/dnsclientpool"
|
||||
"github.com/projectdiscovery/retryabledns"
|
||||
)
|
||||
|
||||
// Request contains a DNS protocol request to be made from a template
|
||||
type Request struct {
|
||||
// Recursion specifies whether to recurse all the answers.
|
||||
Recursion bool `yaml:"recursion"`
|
||||
// Path contains the path/s for the request
|
||||
Name string `yaml:"name"`
|
||||
// Type is the type of DNS request to make
|
||||
Type string `yaml:"type"`
|
||||
// Class is the class of the DNS request
|
||||
Class string `yaml:"class"`
|
||||
// Retries is the number of retries for the DNS request
|
||||
Retries int `yaml:"retries"`
|
||||
// Raw contains a raw request
|
||||
Raw string `yaml:"raw,omitempty"`
|
||||
|
||||
// Operators for the current request go here.
|
||||
operators.Operators `yaml:",inline"`
|
||||
CompiledOperators *operators.Operators
|
||||
|
||||
// cache any variables that may be needed for operation.
|
||||
class uint16
|
||||
question uint16
|
||||
dnsClient *retryabledns.Client
|
||||
options *protocols.ExecuterOptions
|
||||
}
|
||||
|
||||
// Compile compiles the protocol request for further execution.
|
||||
func (r *Request) Compile(options *protocols.ExecuterOptions) error {
|
||||
// Create a dns client for the class
|
||||
client, err := dnsclientpool.Get(options.Options, &dnsclientpool.Configuration{
|
||||
Retries: r.Retries,
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get dns client")
|
||||
}
|
||||
r.dnsClient = client
|
||||
|
||||
if len(r.Matchers) > 0 || len(r.Extractors) > 0 {
|
||||
compiled := &r.Operators
|
||||
if err := compiled.Compile(); err != nil {
|
||||
return errors.Wrap(err, "could not compile operators")
|
||||
}
|
||||
r.CompiledOperators = compiled
|
||||
}
|
||||
r.class = classToInt(r.Class)
|
||||
r.options = options
|
||||
r.question = questionTypeToInt(r.Type)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Requests returns the total number of requests the YAML rule will perform
|
||||
func (r *Request) Requests() int {
|
||||
return 1
|
||||
}
|
||||
|
||||
// Make returns the request to be sent for the protocol
|
||||
func (r *Request) Make(domain string) (*dns.Msg, error) {
|
||||
domain = dns.Fqdn(domain)
|
||||
|
||||
// Build a request on the specified URL
|
||||
req := new(dns.Msg)
|
||||
req.Id = dns.Id()
|
||||
req.RecursionDesired = r.Recursion
|
||||
|
||||
var q dns.Question
|
||||
|
||||
replacer := replacer.New(map[string]interface{}{"FQDN": domain})
|
||||
|
||||
q.Name = dns.Fqdn(replacer.Replace(r.Name))
|
||||
q.Qclass = r.class
|
||||
q.Qtype = r.question
|
||||
req.Question = append(req.Question, q)
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// questionTypeToInt converts DNS question type to internal representation
|
||||
func questionTypeToInt(Type string) uint16 {
|
||||
Type = strings.TrimSpace(strings.ToUpper(Type))
|
||||
question := dns.TypeA
|
||||
|
||||
switch Type {
|
||||
case "A":
|
||||
question = dns.TypeA
|
||||
case "NS":
|
||||
question = dns.TypeNS
|
||||
case "CNAME":
|
||||
question = dns.TypeCNAME
|
||||
case "SOA":
|
||||
question = dns.TypeSOA
|
||||
case "PTR":
|
||||
question = dns.TypePTR
|
||||
case "MX":
|
||||
question = dns.TypeMX
|
||||
case "TXT":
|
||||
question = dns.TypeTXT
|
||||
case "AAAA":
|
||||
question = dns.TypeAAAA
|
||||
}
|
||||
return uint16(question)
|
||||
}
|
||||
|
||||
// classToInt converts a dns class name to it's internal representation
|
||||
func classToInt(class string) uint16 {
|
||||
class = strings.TrimSpace(strings.ToUpper(class))
|
||||
result := dns.ClassINET
|
||||
|
||||
switch class {
|
||||
case "INET":
|
||||
result = dns.ClassINET
|
||||
case "CSNET":
|
||||
result = dns.ClassCSNET
|
||||
case "CHAOS":
|
||||
result = dns.ClassCHAOS
|
||||
case "HESIOD":
|
||||
result = dns.ClassHESIOD
|
||||
case "NONE":
|
||||
result = dns.ClassNONE
|
||||
case "ANY":
|
||||
result = dns.ClassANY
|
||||
}
|
||||
return uint16(result)
|
||||
}
|
|
@ -0,0 +1,74 @@
|
|||
package dnsclientpool
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/types"
|
||||
"github.com/projectdiscovery/retryabledns"
|
||||
)
|
||||
|
||||
var (
|
||||
poolMutex *sync.RWMutex
|
||||
normalClient *retryabledns.Client
|
||||
clientPool map[string]*retryabledns.Client
|
||||
)
|
||||
|
||||
// defaultResolvers contains the list of resolvers known to be trusted.
|
||||
var defaultResolvers = []string{
|
||||
"1.1.1.1:53", // Cloudflare
|
||||
"1.0.0.1:53", // Cloudflare
|
||||
"8.8.8.8:53", // Google
|
||||
"8.8.4.4:53", // Google
|
||||
}
|
||||
|
||||
// Init initializes the clientpool implementation
|
||||
func Init(options *types.Options) error {
|
||||
// Don't create clients if already created in past.
|
||||
if normalClient != nil {
|
||||
return nil
|
||||
}
|
||||
poolMutex = &sync.RWMutex{}
|
||||
clientPool = make(map[string]*retryabledns.Client)
|
||||
|
||||
normalClient = retryabledns.New(defaultResolvers, 1)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Configuration contains the custom configuration options for a client
|
||||
type Configuration struct {
|
||||
// Retries contains the retries for the dns client
|
||||
Retries int
|
||||
}
|
||||
|
||||
// Hash returns the hash of the configuration to allow client pooling
|
||||
func (c *Configuration) Hash() string {
|
||||
builder := &strings.Builder{}
|
||||
builder.Grow(8)
|
||||
builder.WriteString("r")
|
||||
builder.WriteString(strconv.Itoa(c.Retries))
|
||||
hash := builder.String()
|
||||
return hash
|
||||
}
|
||||
|
||||
// Get creates or gets a client for the protocol based on custom configuration
|
||||
func Get(options *types.Options, configuration *Configuration) (*retryabledns.Client, error) {
|
||||
if !(configuration.Retries > 1) {
|
||||
return normalClient, nil
|
||||
}
|
||||
hash := configuration.Hash()
|
||||
poolMutex.RLock()
|
||||
if client, ok := clientPool[hash]; ok {
|
||||
poolMutex.RUnlock()
|
||||
return client, nil
|
||||
}
|
||||
poolMutex.RUnlock()
|
||||
|
||||
client := retryabledns.New(defaultResolvers, configuration.Retries)
|
||||
|
||||
poolMutex.Lock()
|
||||
clientPool[hash] = client
|
||||
poolMutex.Unlock()
|
||||
return client, nil
|
||||
}
|
|
@ -0,0 +1,160 @@
|
|||
package dns
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"github.com/miekg/dns"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/operators/extractors"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/operators/matchers"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/output"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/types"
|
||||
)
|
||||
|
||||
// Match matches a generic data response again a given matcher
|
||||
func (r *Request) Match(data map[string]interface{}, matcher *matchers.Matcher) bool {
|
||||
partString := matcher.Part
|
||||
switch partString {
|
||||
case "body", "all", "":
|
||||
partString = "raw"
|
||||
}
|
||||
|
||||
item, ok := data[partString]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
itemStr := types.ToString(item)
|
||||
|
||||
switch matcher.GetType() {
|
||||
case matchers.StatusMatcher:
|
||||
statusCode, ok := data["rcode"]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return matcher.Result(matcher.MatchStatusCode(statusCode.(int)))
|
||||
case matchers.SizeMatcher:
|
||||
return matcher.Result(matcher.MatchSize(len(itemStr)))
|
||||
case matchers.WordsMatcher:
|
||||
return matcher.Result(matcher.MatchWords(itemStr))
|
||||
case matchers.RegexMatcher:
|
||||
return matcher.Result(matcher.MatchRegex(itemStr))
|
||||
case matchers.BinaryMatcher:
|
||||
return matcher.Result(matcher.MatchBinary(itemStr))
|
||||
case matchers.DSLMatcher:
|
||||
return matcher.Result(matcher.MatchDSL(data))
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Extract performs extracting operation for a extractor on model and returns true or false.
|
||||
func (r *Request) Extract(data map[string]interface{}, extractor *extractors.Extractor) map[string]struct{} {
|
||||
part, ok := data[extractor.Part]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
partString := part.(string)
|
||||
|
||||
switch partString {
|
||||
case "body", "all":
|
||||
partString = "raw"
|
||||
}
|
||||
|
||||
item, ok := data[partString]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
itemStr := types.ToString(item)
|
||||
|
||||
switch extractor.GetType() {
|
||||
case extractors.RegexExtractor:
|
||||
return extractor.ExtractRegex(itemStr)
|
||||
case extractors.KValExtractor:
|
||||
return extractor.ExtractKval(data)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// responseToDSLMap converts a DNS response to a map for use in DSL matching
|
||||
func (r *Request) responseToDSLMap(req, resp *dns.Msg, host, matched string) output.InternalEvent {
|
||||
data := make(output.InternalEvent, 8)
|
||||
|
||||
// Some data regarding the request metadata
|
||||
data["host"] = host
|
||||
data["matched"] = matched
|
||||
|
||||
if r.options.Options.JSONRequests {
|
||||
data["request"] = req.String()
|
||||
}
|
||||
|
||||
data["rcode"] = resp.Rcode
|
||||
buffer := &bytes.Buffer{}
|
||||
for _, question := range resp.Question {
|
||||
buffer.WriteString(question.String())
|
||||
}
|
||||
data["question"] = buffer.String()
|
||||
buffer.Reset()
|
||||
|
||||
for _, extra := range resp.Extra {
|
||||
buffer.WriteString(extra.String())
|
||||
}
|
||||
data["extra"] = buffer.String()
|
||||
buffer.Reset()
|
||||
|
||||
for _, answer := range resp.Answer {
|
||||
buffer.WriteString(answer.String())
|
||||
}
|
||||
data["answer"] = buffer.String()
|
||||
buffer.Reset()
|
||||
|
||||
for _, ns := range resp.Ns {
|
||||
buffer.WriteString(ns.String())
|
||||
}
|
||||
data["ns"] = buffer.String()
|
||||
buffer.Reset()
|
||||
|
||||
rawData := resp.String()
|
||||
data["raw"] = rawData
|
||||
data["template-id"] = r.options.TemplateID
|
||||
data["template-info"] = r.options.TemplateInfo
|
||||
return data
|
||||
}
|
||||
|
||||
// MakeResultEvent creates a result event from internal wrapped event
|
||||
func (r *Request) MakeResultEvent(wrapped *output.InternalWrappedEvent) []*output.ResultEvent {
|
||||
results := make([]*output.ResultEvent, 0, len(wrapped.OperatorsResult.Matches)+1)
|
||||
|
||||
// If we have multiple matchers with names, write each of them separately.
|
||||
if len(wrapped.OperatorsResult.Matches) > 0 {
|
||||
for k := range wrapped.OperatorsResult.Matches {
|
||||
data := r.makeResultEventItem(wrapped)
|
||||
data.MatcherName = k
|
||||
results = append(results, data)
|
||||
}
|
||||
} else if len(wrapped.OperatorsResult.Extracts) > 0 {
|
||||
for k, v := range wrapped.OperatorsResult.Extracts {
|
||||
data := r.makeResultEventItem(wrapped)
|
||||
data.ExtractedResults = v
|
||||
data.ExtractorName = k
|
||||
results = append(results, data)
|
||||
}
|
||||
} else {
|
||||
data := r.makeResultEventItem(wrapped)
|
||||
results = append(results, data)
|
||||
}
|
||||
return results
|
||||
}
|
||||
|
||||
func (r *Request) makeResultEventItem(wrapped *output.InternalWrappedEvent) *output.ResultEvent {
|
||||
data := &output.ResultEvent{
|
||||
TemplateID: wrapped.InternalEvent["template-id"].(string),
|
||||
Info: wrapped.InternalEvent["template-info"].(map[string]string),
|
||||
Type: "dns",
|
||||
Host: wrapped.InternalEvent["host"].(string),
|
||||
Matched: wrapped.InternalEvent["matched"].(string),
|
||||
ExtractedResults: wrapped.OperatorsResult.OutputExtracts,
|
||||
}
|
||||
if r.options.Options.JSONRequests {
|
||||
data.Request = wrapped.InternalEvent["request"].(string)
|
||||
data.Response = wrapped.InternalEvent["raw"].(string)
|
||||
}
|
||||
return data
|
||||
}
|
|
@ -0,0 +1,89 @@
|
|||
package dns
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/projectdiscovery/gologger"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/output"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/protocols"
|
||||
)
|
||||
|
||||
var _ protocols.Request = &Request{}
|
||||
|
||||
// ExecuteWithResults executes the protocol requests and returns results instead of writing them.
|
||||
func (r *Request) ExecuteWithResults(input string, metadata output.InternalEvent, callback protocols.OutputEventCallback) error {
|
||||
// Parse the URL and return domain if URL.
|
||||
var domain string
|
||||
if isURL(input) {
|
||||
domain = extractDomain(input)
|
||||
} else {
|
||||
domain = input
|
||||
}
|
||||
|
||||
// Compile each request for the template based on the URL
|
||||
compiledRequest, err := r.Make(domain)
|
||||
if err != nil {
|
||||
r.options.Output.Request(r.options.TemplateID, domain, "dns", err)
|
||||
r.options.Progress.DecrementRequests(1)
|
||||
return errors.Wrap(err, "could not build request")
|
||||
}
|
||||
|
||||
if r.options.Options.Debug || r.options.Options.DebugRequests {
|
||||
gologger.Info().Str("domain", domain).Msgf("[%s] Dumped DNS request for %s", r.options.TemplateID, domain)
|
||||
fmt.Fprintf(os.Stderr, "%s\n", compiledRequest.String())
|
||||
}
|
||||
|
||||
// Send the request to the target servers
|
||||
resp, err := r.dnsClient.Do(compiledRequest)
|
||||
if err != nil {
|
||||
r.options.Output.Request(r.options.TemplateID, domain, "dns", err)
|
||||
r.options.Progress.DecrementRequests(1)
|
||||
return errors.Wrap(err, "could not send dns request")
|
||||
}
|
||||
r.options.Progress.IncrementRequests()
|
||||
|
||||
r.options.Output.Request(r.options.TemplateID, domain, "dns", err)
|
||||
gologger.Verbose().Msgf("[%s] Sent DNS request to %s", r.options.TemplateID, domain)
|
||||
|
||||
if r.options.Options.Debug || r.options.Options.DebugResponse {
|
||||
gologger.Debug().Msgf("[%s] Dumped DNS response for %s", r.options.TemplateID, domain)
|
||||
fmt.Fprintf(os.Stderr, "%s\n", resp.String())
|
||||
}
|
||||
ouputEvent := r.responseToDSLMap(compiledRequest, resp, input, input)
|
||||
|
||||
event := &output.InternalWrappedEvent{InternalEvent: ouputEvent}
|
||||
if r.CompiledOperators != nil {
|
||||
result, ok := r.CompiledOperators.Execute(ouputEvent, r.Match, r.Extract)
|
||||
if ok && result != nil {
|
||||
event.OperatorsResult = result
|
||||
event.Results = r.MakeResultEvent(event)
|
||||
}
|
||||
}
|
||||
callback(event)
|
||||
return nil
|
||||
}
|
||||
|
||||
// isURL tests a string to determine if it is a well-structured url or not.
|
||||
func isURL(toTest string) bool {
|
||||
_, err := url.ParseRequestURI(toTest)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
u, err := url.Parse(toTest)
|
||||
if err != nil || u.Scheme == "" || u.Host == "" {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// extractDomain extracts the domain name of a URL
|
||||
func extractDomain(theURL string) string {
|
||||
u, err := url.Parse(theURL)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return u.Hostname()
|
||||
}
|
|
@ -0,0 +1,78 @@
|
|||
package file
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/operators"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/protocols"
|
||||
)
|
||||
|
||||
// Request contains a File matching mechanism for local disk operations.
|
||||
type Request struct {
|
||||
// MaxSize is the maximum size of the file to run request on.
|
||||
// By default, nuclei will process 5MB files and not go more than that.
|
||||
// It can be set to much lower or higher depending on use.
|
||||
MaxSize int `yaml:"max-size"`
|
||||
// NoRecursive specifies whether to not do recursive checks if folders are provided.
|
||||
NoRecursive bool `yaml:"no-recursive"`
|
||||
// Extensions is the list of extensions to perform matching on.
|
||||
Extensions []string `yaml:"extensions"`
|
||||
// ExtensionAllowlist is the list of file extensions to enforce allowing.
|
||||
ExtensionAllowlist []string `yaml:"allowlist"`
|
||||
// ExtensionDenylist is the list of file extensions to deny during matching.
|
||||
ExtensionDenylist []string `yaml:"denylist"`
|
||||
|
||||
// Operators for the current request go here.
|
||||
operators.Operators `yaml:",inline"`
|
||||
CompiledOperators *operators.Operators
|
||||
|
||||
// cache any variables that may be needed for operation.
|
||||
options *protocols.ExecuterOptions
|
||||
extensions map[string]struct{}
|
||||
allExtensions bool
|
||||
extensionDenylist map[string]struct{}
|
||||
}
|
||||
|
||||
// defaultDenylist is the default list of extensions to be denied
|
||||
var defaultDenylist = []string{".3g2", ".3gp", ".7z", ".apk", ".arj", ".avi", ".axd", ".bmp", ".css", ".csv", ".deb", ".dll", ".doc", ".drv", ".eot", ".exe", ".flv", ".gif", ".gifv", ".gz", ".h264", ".ico", ".iso", ".jar", ".jpeg", ".jpg", ".lock", ".m4a", ".m4v", ".map", ".mkv", ".mov", ".mp3", ".mp4", ".mpeg", ".mpg", ".msi", ".ogg", ".ogm", ".ogv", ".otf", ".pdf", ".pkg", ".png", ".ppt", ".psd", ".rar", ".rm", ".rpm", ".svg", ".swf", ".sys", ".tar.gz", ".tar", ".tif", ".tiff", ".ttf", ".txt", ".vob", ".wav", ".webm", ".wmv", ".woff", ".woff2", ".xcf", ".xls", ".xlsx", ".zip"}
|
||||
|
||||
// Compile compiles the protocol request for further execution.
|
||||
func (r *Request) Compile(options *protocols.ExecuterOptions) error {
|
||||
if len(r.Matchers) > 0 || len(r.Extractors) > 0 {
|
||||
compiled := &r.Operators
|
||||
if err := compiled.Compile(); err != nil {
|
||||
return errors.Wrap(err, "could not compile operators")
|
||||
}
|
||||
r.CompiledOperators = compiled
|
||||
}
|
||||
// By default use 5mb as max size to read.
|
||||
if r.MaxSize == 0 {
|
||||
r.MaxSize = 5 * 1024 * 1024
|
||||
}
|
||||
r.options = options
|
||||
|
||||
r.extensions = make(map[string]struct{})
|
||||
r.extensionDenylist = make(map[string]struct{})
|
||||
|
||||
for _, extension := range r.Extensions {
|
||||
if extension == "*" {
|
||||
r.allExtensions = true
|
||||
} else {
|
||||
r.extensions[extension] = struct{}{}
|
||||
}
|
||||
}
|
||||
for _, extension := range defaultDenylist {
|
||||
r.extensionDenylist[extension] = struct{}{}
|
||||
}
|
||||
for _, extension := range r.ExtensionDenylist {
|
||||
r.extensionDenylist[extension] = struct{}{}
|
||||
}
|
||||
for _, extension := range r.ExtensionAllowlist {
|
||||
delete(r.extensionDenylist, extension)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Requests returns the total number of requests the YAML rule will perform
|
||||
func (r *Request) Requests() int {
|
||||
return 1
|
||||
}
|
|
@ -0,0 +1,117 @@
|
|||
package file
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/karrick/godirwalk"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/projectdiscovery/gologger"
|
||||
)
|
||||
|
||||
// getInputPaths parses the specified input paths and returns a compiled
|
||||
// list of finished absolute paths to the files evaluating any allowlist, denylist,
|
||||
// glob, file or folders, etc.
|
||||
func (r *Request) getInputPaths(target string, callback func(string)) error {
|
||||
processed := make(map[string]struct{})
|
||||
|
||||
// Template input includes a wildcard
|
||||
if strings.Contains(target, "*") {
|
||||
err := r.findGlobPathMatches(target, processed, callback)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not find glob matches")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Template input is either a file or a directory
|
||||
file, err := r.findFileMatches(target, processed, callback)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not find file")
|
||||
}
|
||||
if file {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Recursively walk down the Templates directory and run all
|
||||
// the template file checks
|
||||
err = r.findDirectoryMatches(target, processed, callback)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not find directory matches")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// findGlobPathMatches returns the matched files from a glob path
|
||||
func (r *Request) findGlobPathMatches(absPath string, processed map[string]struct{}, callback func(string)) error {
|
||||
matches, err := filepath.Glob(absPath)
|
||||
if err != nil {
|
||||
return errors.Errorf("wildcard found, but unable to glob: %s\n", err)
|
||||
}
|
||||
for _, match := range matches {
|
||||
if !r.validatePath(match) {
|
||||
continue
|
||||
}
|
||||
if _, ok := processed[match]; !ok {
|
||||
processed[match] = struct{}{}
|
||||
callback(match)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// findFileMatches finds if a path is an absolute file. If the path
|
||||
// is a file, it returns true otherwise false with no errors.
|
||||
func (r *Request) findFileMatches(absPath string, processed map[string]struct{}, callback func(string)) (bool, error) {
|
||||
info, err := os.Stat(absPath)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !info.Mode().IsRegular() {
|
||||
return false, nil
|
||||
}
|
||||
if _, ok := processed[absPath]; !ok {
|
||||
processed[absPath] = struct{}{}
|
||||
callback(absPath)
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// findDirectoryMatches finds matches for templates from a directory
|
||||
func (r *Request) findDirectoryMatches(absPath string, processed map[string]struct{}, callback func(string)) error {
|
||||
err := godirwalk.Walk(absPath, &godirwalk.Options{
|
||||
Unsorted: true,
|
||||
ErrorCallback: func(fsPath string, err error) godirwalk.ErrorAction {
|
||||
return godirwalk.SkipNode
|
||||
},
|
||||
Callback: func(path string, d *godirwalk.Dirent) error {
|
||||
if !r.validatePath(path) {
|
||||
return nil
|
||||
}
|
||||
if _, ok := processed[path]; !ok {
|
||||
callback(path)
|
||||
processed[path] = struct{}{}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// validatePath validates a file path for blacklist and whitelist options
|
||||
func (r *Request) validatePath(item string) bool {
|
||||
extension := path.Ext(item)
|
||||
if len(r.extensions) > 0 && !r.allExtensions {
|
||||
if _, ok := r.extensions[extension]; ok {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
if _, ok := r.extensionDenylist[extension]; ok {
|
||||
gologger.Verbose().Msgf("Ignoring path %s due to denylist item %s\n", item, extension)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
|
@ -0,0 +1,118 @@
|
|||
package file
|
||||
|
||||
import (
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/operators/extractors"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/operators/matchers"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/output"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/types"
|
||||
)
|
||||
|
||||
// Match matches a generic data response again a given matcher
|
||||
func (r *Request) Match(data map[string]interface{}, matcher *matchers.Matcher) bool {
|
||||
partString := matcher.Part
|
||||
switch partString {
|
||||
case "body", "all", "":
|
||||
partString = "raw"
|
||||
}
|
||||
|
||||
item, ok := data[partString]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
itemStr := types.ToString(item)
|
||||
|
||||
switch matcher.GetType() {
|
||||
case matchers.SizeMatcher:
|
||||
return matcher.Result(matcher.MatchSize(len(itemStr)))
|
||||
case matchers.WordsMatcher:
|
||||
return matcher.Result(matcher.MatchWords(itemStr))
|
||||
case matchers.RegexMatcher:
|
||||
return matcher.Result(matcher.MatchRegex(itemStr))
|
||||
case matchers.BinaryMatcher:
|
||||
return matcher.Result(matcher.MatchBinary(itemStr))
|
||||
case matchers.DSLMatcher:
|
||||
return matcher.Result(matcher.MatchDSL(data))
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Extract performs extracting operation for a extractor on model and returns true or false.
|
||||
func (r *Request) Extract(data map[string]interface{}, extractor *extractors.Extractor) map[string]struct{} {
|
||||
part, ok := data[extractor.Part]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
partString := part.(string)
|
||||
|
||||
switch partString {
|
||||
case "body", "all", "":
|
||||
partString = "raw"
|
||||
}
|
||||
|
||||
item, ok := data[partString]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
itemStr := types.ToString(item)
|
||||
|
||||
switch extractor.GetType() {
|
||||
case extractors.RegexExtractor:
|
||||
return extractor.ExtractRegex(itemStr)
|
||||
case extractors.KValExtractor:
|
||||
return extractor.ExtractKval(data)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// responseToDSLMap converts a DNS response to a map for use in DSL matching
|
||||
func (r *Request) responseToDSLMap(raw string, host, matched string) output.InternalEvent {
|
||||
data := make(output.InternalEvent, 3)
|
||||
|
||||
// Some data regarding the request metadata
|
||||
data["host"] = host
|
||||
data["matched"] = matched
|
||||
data["raw"] = raw
|
||||
data["template-id"] = r.options.TemplateID
|
||||
data["template-info"] = r.options.TemplateInfo
|
||||
return data
|
||||
}
|
||||
|
||||
// MakeResultEvent creates a result event from internal wrapped event
|
||||
func (r *Request) MakeResultEvent(wrapped *output.InternalWrappedEvent) []*output.ResultEvent {
|
||||
results := make([]*output.ResultEvent, 0, len(wrapped.OperatorsResult.Matches)+1)
|
||||
|
||||
// If we have multiple matchers with names, write each of them separately.
|
||||
if len(wrapped.OperatorsResult.Matches) > 0 {
|
||||
for k := range wrapped.OperatorsResult.Matches {
|
||||
data := r.makeResultEventItem(wrapped)
|
||||
data.MatcherName = k
|
||||
results = append(results, data)
|
||||
}
|
||||
} else if len(wrapped.OperatorsResult.Extracts) > 0 {
|
||||
for k, v := range wrapped.OperatorsResult.Extracts {
|
||||
data := r.makeResultEventItem(wrapped)
|
||||
data.ExtractedResults = v
|
||||
data.ExtractorName = k
|
||||
results = append(results, data)
|
||||
}
|
||||
} else {
|
||||
data := r.makeResultEventItem(wrapped)
|
||||
results = append(results, data)
|
||||
}
|
||||
return results
|
||||
}
|
||||
|
||||
func (r *Request) makeResultEventItem(wrapped *output.InternalWrappedEvent) *output.ResultEvent {
|
||||
data := &output.ResultEvent{
|
||||
TemplateID: wrapped.InternalEvent["template-id"].(string),
|
||||
Info: wrapped.InternalEvent["template-info"].(map[string]string),
|
||||
Type: "file",
|
||||
Host: wrapped.InternalEvent["host"].(string),
|
||||
Matched: wrapped.InternalEvent["matched"].(string),
|
||||
ExtractedResults: wrapped.OperatorsResult.OutputExtracts,
|
||||
}
|
||||
if r.options.Options.JSONRequests {
|
||||
data.Response = wrapped.InternalEvent["raw"].(string)
|
||||
}
|
||||
return data
|
||||
}
|
|
@ -0,0 +1,78 @@
|
|||
package file
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/projectdiscovery/gologger"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/output"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/protocols"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/protocols/common/tostring"
|
||||
"github.com/remeh/sizedwaitgroup"
|
||||
)
|
||||
|
||||
var _ protocols.Request = &Request{}
|
||||
|
||||
// ExecuteWithResults executes the protocol requests and returns results instead of writing them.
|
||||
func (r *Request) ExecuteWithResults(input string, metadata output.InternalEvent, callback protocols.OutputEventCallback) error {
|
||||
wg := sizedwaitgroup.New(r.options.Options.RateLimit)
|
||||
|
||||
err := r.getInputPaths(input, func(data string) {
|
||||
wg.Add()
|
||||
|
||||
go func(data string) {
|
||||
defer wg.Done()
|
||||
|
||||
file, err := os.Open(data)
|
||||
if err != nil {
|
||||
gologger.Error().Msgf("Could not open file path %s: %s\n", data, err)
|
||||
return
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
stat, err := file.Stat()
|
||||
if err != nil {
|
||||
gologger.Error().Msgf("Could not stat file path %s: %s\n", data, err)
|
||||
return
|
||||
}
|
||||
if stat.Size() >= int64(r.MaxSize) {
|
||||
gologger.Verbose().Msgf("Could not process path %s: exceeded max size\n", data)
|
||||
return
|
||||
}
|
||||
|
||||
buffer, err := ioutil.ReadAll(file)
|
||||
if err != nil {
|
||||
gologger.Error().Msgf("Could not read file path %s: %s\n", data, err)
|
||||
return
|
||||
}
|
||||
dataStr := tostring.UnsafeToString(buffer)
|
||||
|
||||
if r.options.Options.Debug || r.options.Options.DebugRequests {
|
||||
gologger.Info().Msgf("[%s] Dumped file request for %s", r.options.TemplateID, data)
|
||||
fmt.Fprintf(os.Stderr, "%s\n", dataStr)
|
||||
}
|
||||
gologger.Verbose().Msgf("[%s] Sent FILE request to %s", r.options.TemplateID, data)
|
||||
ouputEvent := r.responseToDSLMap(dataStr, input, data)
|
||||
|
||||
event := &output.InternalWrappedEvent{InternalEvent: ouputEvent}
|
||||
if r.CompiledOperators != nil {
|
||||
result, ok := r.CompiledOperators.Execute(ouputEvent, r.Match, r.Extract)
|
||||
if ok && result != nil {
|
||||
event.OperatorsResult = result
|
||||
event.Results = r.MakeResultEvent(event)
|
||||
}
|
||||
}
|
||||
callback(event)
|
||||
}(data)
|
||||
})
|
||||
wg.Wait()
|
||||
if err != nil {
|
||||
r.options.Output.Request(r.options.TemplateID, input, "file", err)
|
||||
r.options.Progress.DecrementRequests(1)
|
||||
return errors.Wrap(err, "could not send file request")
|
||||
}
|
||||
r.options.Progress.IncrementRequests()
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,324 @@
|
|||
package http
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Knetic/govaluate"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/operators/common/dsl"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/protocols/common/generators"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/protocols/common/replacer"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/protocols/http/race"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/protocols/http/raw"
|
||||
"github.com/projectdiscovery/rawhttp"
|
||||
"github.com/projectdiscovery/retryablehttp-go"
|
||||
)
|
||||
|
||||
var (
|
||||
urlWithPortRegex = regexp.MustCompile(`{{BaseURL}}:(\d+)`)
|
||||
templateExpressionRegex = regexp.MustCompile(`(?m)\{\{[^}]+\}\}`)
|
||||
)
|
||||
|
||||
// requestGenerator generates requests sequentially based on various
|
||||
// configurations for a http request template.
|
||||
//
|
||||
// If payload values are present, an iterator is created for the payload
|
||||
// values. Paths and Raw requests are supported as base input, so
|
||||
// it will automatically select between them based on the template.
|
||||
type requestGenerator struct {
|
||||
currentIndex int
|
||||
request *Request
|
||||
payloadIterator *generators.Iterator
|
||||
}
|
||||
|
||||
// newGenerator creates a new request generator instance
|
||||
func (r *Request) newGenerator() *requestGenerator {
|
||||
generator := &requestGenerator{request: r}
|
||||
|
||||
if len(r.Payloads) > 0 {
|
||||
generator.payloadIterator = r.generator.NewIterator()
|
||||
}
|
||||
return generator
|
||||
}
|
||||
|
||||
// nextValue returns the next path or the next raw request depending on user input
|
||||
// It returns false if all the inputs have been exhausted by the generator instance.
|
||||
func (r *requestGenerator) nextValue() (string, map[string]interface{}, bool) {
|
||||
// If we have paths, return the next path.
|
||||
if len(r.request.Path) > 0 && r.currentIndex < len(r.request.Path) {
|
||||
if item := r.request.Path[r.currentIndex]; item != "" {
|
||||
r.currentIndex++
|
||||
return item, nil, true
|
||||
}
|
||||
}
|
||||
|
||||
// If we have raw requests, start with the request at current index.
|
||||
// If we are not at the start, then check if the iterator for payloads
|
||||
// has finished if there are any.
|
||||
//
|
||||
// If the iterator has finished for the current raw request
|
||||
// then reset it and move on to the next value, otherwise use the last request.
|
||||
if len(r.request.Raw) > 0 && r.currentIndex < len(r.request.Raw) {
|
||||
if r.payloadIterator != nil {
|
||||
payload, ok := r.payloadIterator.Value()
|
||||
if !ok {
|
||||
r.currentIndex++
|
||||
r.payloadIterator.Reset()
|
||||
|
||||
// No more payloads request for us now.
|
||||
if len(r.request.Raw) == r.currentIndex {
|
||||
return "", nil, false
|
||||
}
|
||||
if item := r.request.Raw[r.currentIndex]; item != "" {
|
||||
newPayload, ok := r.payloadIterator.Value()
|
||||
return item, newPayload, ok
|
||||
}
|
||||
return "", nil, false
|
||||
}
|
||||
return r.request.Raw[r.currentIndex], payload, true
|
||||
}
|
||||
if item := r.request.Raw[r.currentIndex]; item != "" {
|
||||
r.currentIndex++
|
||||
return item, nil, true
|
||||
}
|
||||
}
|
||||
return "", nil, false
|
||||
}
|
||||
|
||||
// generatedRequest is a single wrapped generated request for a template request
|
||||
type generatedRequest struct {
|
||||
original *Request
|
||||
rawRequest *raw.Request
|
||||
meta map[string]interface{}
|
||||
pipelinedClient *rawhttp.PipelineClient
|
||||
request *retryablehttp.Request
|
||||
}
|
||||
|
||||
// Make creates a http request for the provided input.
|
||||
// It returns io.EOF as error when all the requests have been exhausted.
|
||||
func (r *requestGenerator) Make(baseURL string, dynamicValues map[string]interface{}) (*generatedRequest, error) {
|
||||
baseURL = strings.TrimSuffix(baseURL, "/")
|
||||
|
||||
data, payloads, ok := r.nextValue()
|
||||
if !ok {
|
||||
return nil, io.EOF
|
||||
}
|
||||
ctx := context.Background()
|
||||
|
||||
parsed, err := url.Parse(baseURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hostname := parsed.Host
|
||||
values := generators.MergeMaps(dynamicValues, map[string]interface{}{
|
||||
"BaseURL": baseURLWithTemplatePrefs(data, parsed),
|
||||
"Hostname": hostname,
|
||||
})
|
||||
|
||||
// If data contains \n it's a raw request, process it like that. Else
|
||||
// continue with the template based request flow.
|
||||
if strings.Contains(data, "\n") {
|
||||
return r.makeHTTPRequestFromRaw(ctx, baseURL, data, values, payloads)
|
||||
}
|
||||
return r.makeHTTPRequestFromModel(ctx, data, values)
|
||||
}
|
||||
|
||||
// Total returns the total number of requests for the generator
|
||||
func (r *requestGenerator) Total() int {
|
||||
if r.payloadIterator != nil {
|
||||
return len(r.request.Raw) * r.payloadIterator.Remaining()
|
||||
}
|
||||
return len(r.request.Path)
|
||||
}
|
||||
|
||||
// baseURLWithTemplatePrefs returns the url for BaseURL keeping
|
||||
// the template port and path preference
|
||||
func baseURLWithTemplatePrefs(data string, parsedURL *url.URL) string {
|
||||
// template port preference over input URL port
|
||||
// template has port
|
||||
hasPort := len(urlWithPortRegex.FindStringSubmatch(data)) > 0
|
||||
if hasPort {
|
||||
// check if also the input contains port, in this case extracts the url
|
||||
if hostname, _, err := net.SplitHostPort(parsedURL.Host); err == nil {
|
||||
parsedURL.Host = hostname
|
||||
}
|
||||
}
|
||||
return parsedURL.String()
|
||||
}
|
||||
|
||||
// MakeHTTPRequestFromModel creates a *http.Request from a request template
|
||||
func (r *requestGenerator) makeHTTPRequestFromModel(ctx context.Context, data string, values map[string]interface{}) (*generatedRequest, error) {
|
||||
URL := replacer.New(values).Replace(data)
|
||||
|
||||
// Build a request on the specified URL
|
||||
req, err := http.NewRequestWithContext(ctx, r.request.Method, URL, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
request, err := r.fillRequest(req, values)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &generatedRequest{request: request, original: r.request}, nil
|
||||
}
|
||||
|
||||
// makeHTTPRequestFromRaw creates a *http.Request from a raw request
|
||||
func (r *requestGenerator) makeHTTPRequestFromRaw(ctx context.Context, baseURL, data string, values, payloads map[string]interface{}) (*generatedRequest, error) {
|
||||
// Add trailing line
|
||||
data += "\n"
|
||||
|
||||
// If we have payloads, handle them by evaluating them at runtime.
|
||||
if len(r.request.Payloads) > 0 {
|
||||
finalPayloads, err := r.getPayloadValues(baseURL, payloads)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return r.handleRawWithPaylods(ctx, data, baseURL, values, finalPayloads)
|
||||
}
|
||||
return r.handleRawWithPaylods(ctx, data, baseURL, values, nil)
|
||||
}
|
||||
|
||||
// handleRawWithPaylods handles raw requests along with paylaods
|
||||
func (r *requestGenerator) handleRawWithPaylods(ctx context.Context, rawRequest, baseURL string, values, generatorValues map[string]interface{}) (*generatedRequest, error) {
|
||||
baseValues := generators.CopyMap(values)
|
||||
finalValues := generators.MergeMaps(baseValues, generatorValues)
|
||||
|
||||
// Replace the dynamic variables in the URL if any
|
||||
rawRequest = replacer.New(finalValues).Replace(rawRequest)
|
||||
|
||||
dynamicValues := make(map[string]interface{})
|
||||
for _, match := range templateExpressionRegex.FindAllString(rawRequest, -1) {
|
||||
// check if the match contains a dynamic variable
|
||||
expr := generators.TrimDelimiters(match)
|
||||
compiled, err := govaluate.NewEvaluableExpressionWithFunctions(expr, dsl.HelperFunctions())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result, err := compiled.Evaluate(finalValues)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dynamicValues[expr] = result
|
||||
}
|
||||
|
||||
// Replacer dynamic values if any in raw request and parse it
|
||||
rawRequest = replacer.New(dynamicValues).Replace(rawRequest)
|
||||
rawRequestData, err := raw.Parse(rawRequest, baseURL, r.request.Unsafe)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// rawhttp
|
||||
if r.request.Unsafe {
|
||||
unsafeReq := &generatedRequest{rawRequest: rawRequestData, meta: generatorValues, original: r.request}
|
||||
return unsafeReq, nil
|
||||
}
|
||||
|
||||
// retryablehttp
|
||||
var body io.ReadCloser
|
||||
body = ioutil.NopCloser(strings.NewReader(rawRequestData.Data))
|
||||
if r.request.Race {
|
||||
// More or less this ensures that all requests hit the endpoint at the same approximated time
|
||||
// Todo: sync internally upon writing latest request byte
|
||||
body = race.NewOpenGateWithTimeout(body, time.Duration(2)*time.Second)
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, rawRequestData.Method, rawRequestData.FullURL, body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// copy headers
|
||||
for key, value := range rawRequestData.Headers {
|
||||
req.Header[key] = []string{value}
|
||||
}
|
||||
|
||||
request, err := r.fillRequest(req, values)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &generatedRequest{request: request, meta: generatorValues, original: r.request}, nil
|
||||
}
|
||||
|
||||
// fillRequest fills various headers in the request with values
|
||||
func (r *requestGenerator) fillRequest(req *http.Request, values map[string]interface{}) (*retryablehttp.Request, error) {
|
||||
// Set the header values requested
|
||||
replacer := replacer.New(values)
|
||||
for header, value := range r.request.Headers {
|
||||
req.Header[header] = []string{replacer.Replace(value)}
|
||||
}
|
||||
|
||||
// In case of multiple threads the underlying connection should remain open to allow reuse
|
||||
if r.request.Threads <= 0 && req.Header.Get("Connection") == "" {
|
||||
req.Close = true
|
||||
}
|
||||
|
||||
// Check if the user requested a request body
|
||||
if r.request.Body != "" {
|
||||
req.Body = ioutil.NopCloser(strings.NewReader(r.request.Body))
|
||||
}
|
||||
setHeader(req, "User-Agent", "Nuclei - Open-source project (github.com/projectdiscovery/nuclei)")
|
||||
|
||||
// raw requests are left untouched
|
||||
if len(r.request.Raw) > 0 {
|
||||
return retryablehttp.FromRequest(req)
|
||||
}
|
||||
setHeader(req, "Accept", "*/*")
|
||||
setHeader(req, "Accept-Language", "en")
|
||||
|
||||
return retryablehttp.FromRequest(req)
|
||||
}
|
||||
|
||||
// setHeader sets some headers only if the header wasn't supplied by the user
|
||||
func setHeader(req *http.Request, name, value string) {
|
||||
if _, ok := req.Header[name]; !ok {
|
||||
req.Header.Set(name, value)
|
||||
}
|
||||
}
|
||||
|
||||
// getPayloadValues returns current payload values for a request
|
||||
func (r *requestGenerator) getPayloadValues(reqURL string, templatePayloads map[string]interface{}) (map[string]interface{}, error) {
|
||||
payloadProcessedValues := make(map[string]interface{})
|
||||
|
||||
for k, v := range templatePayloads {
|
||||
kexp := v.(string)
|
||||
// if it doesn't containing markups, we just continue
|
||||
if !strings.Contains(kexp, replacer.MarkerParenthesisOpen) || strings.Contains(kexp, replacer.MarkerParenthesisClose) || strings.Contains(kexp, replacer.MarkerGeneral) {
|
||||
payloadProcessedValues[k] = v
|
||||
continue
|
||||
}
|
||||
// attempts to expand expressions
|
||||
compiled, err := govaluate.NewEvaluableExpressionWithFunctions(kexp, dsl.HelperFunctions())
|
||||
if err != nil {
|
||||
// it is a simple literal payload => proceed with literal value
|
||||
payloadProcessedValues[k] = v
|
||||
continue
|
||||
}
|
||||
// it is an expression - try to solve it
|
||||
expValue, err := compiled.Evaluate(templatePayloads)
|
||||
if err != nil {
|
||||
// an error occurred => proceed with literal value
|
||||
payloadProcessedValues[k] = v
|
||||
continue
|
||||
}
|
||||
payloadProcessedValues[k] = fmt.Sprint(expValue)
|
||||
}
|
||||
var err error
|
||||
if len(payloadProcessedValues) == 0 {
|
||||
err = ErrNoPayload
|
||||
}
|
||||
return payloadProcessedValues, err
|
||||
}
|
||||
|
||||
// ErrNoPayload error to avoid the additional base null request
|
||||
var ErrNoPayload = fmt.Errorf("no payload found")
|
|
@ -0,0 +1,70 @@
|
|||
package http
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/protocols/common/generators"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestRequestGeneratorPaths(t *testing.T) {
|
||||
req := &Request{
|
||||
Path: []string{"{{BaseURL}}/test", "{{BaseURL}}/test.php"},
|
||||
}
|
||||
generator := req.newGenerator()
|
||||
var payloads []string
|
||||
for {
|
||||
raw, _, ok := generator.nextValue()
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
payloads = append(payloads, raw)
|
||||
}
|
||||
require.Equal(t, req.Path, payloads, "Could not get correct paths")
|
||||
}
|
||||
|
||||
func TestRequestGeneratorClusterSingle(t *testing.T) {
|
||||
var err error
|
||||
|
||||
req := &Request{
|
||||
Payloads: map[string]interface{}{"username": []string{"admin", "tomcat", "manager"}, "password": []string{"password", "test", "secret"}},
|
||||
attackType: generators.ClusterBomb,
|
||||
Raw: []string{`GET /{{username}}:{{password}} HTTP/1.1`},
|
||||
}
|
||||
req.generator, err = generators.New(req.Payloads, req.attackType, "")
|
||||
require.Nil(t, err, "could not create generator")
|
||||
|
||||
generator := req.newGenerator()
|
||||
var payloads []map[string]interface{}
|
||||
for {
|
||||
_, data, ok := generator.nextValue()
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
payloads = append(payloads, data)
|
||||
}
|
||||
require.Equal(t, 9, len(payloads), "Could not get correct number of payloads")
|
||||
}
|
||||
|
||||
func TestRequestGeneratorClusterMultipleRaw(t *testing.T) {
|
||||
var err error
|
||||
|
||||
req := &Request{
|
||||
Payloads: map[string]interface{}{"username": []string{"admin", "tomcat", "manager"}, "password": []string{"password", "test", "secret"}},
|
||||
attackType: generators.ClusterBomb,
|
||||
Raw: []string{`GET /{{username}}:{{password}} HTTP/1.1`, `GET /{{username}}@{{password}} HTTP/1.1`},
|
||||
}
|
||||
req.generator, err = generators.New(req.Payloads, req.attackType, "")
|
||||
require.Nil(t, err, "could not create generator")
|
||||
|
||||
generator := req.newGenerator()
|
||||
var payloads []map[string]interface{}
|
||||
for {
|
||||
_, data, ok := generator.nextValue()
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
payloads = append(payloads, data)
|
||||
}
|
||||
require.Equal(t, 18, len(payloads), "Could not get correct number of payloads")
|
||||
}
|
|
@ -0,0 +1,29 @@
|
|||
package http
|
||||
|
||||
import (
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/protocols/common/compare"
|
||||
)
|
||||
|
||||
// CanCluster returns true if the request can be clustered.
|
||||
//
|
||||
// This used by the clustering engine to decide whether two requests
|
||||
// are similar enough to be considered one and can be checked by
|
||||
// just adding the matcher/extractors for the request and the correct IDs.
|
||||
func (r *Request) CanCluster(other *Request) bool {
|
||||
if len(r.Payloads) > 0 || len(r.Raw) > 0 || len(r.Body) > 0 || r.Unsafe {
|
||||
return false
|
||||
}
|
||||
if r.Method != other.Method ||
|
||||
r.MaxRedirects != other.MaxRedirects ||
|
||||
r.CookieReuse != other.CookieReuse ||
|
||||
r.Redirects != other.Redirects {
|
||||
return false
|
||||
}
|
||||
if !compare.StringSlice(r.Path, other.Path) {
|
||||
return false
|
||||
}
|
||||
if !compare.StringMap(r.Headers, other.Headers) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
|
@ -0,0 +1,149 @@
|
|||
package http
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/operators"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/protocols"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/protocols/common/generators"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/protocols/http/httpclientpool"
|
||||
"github.com/projectdiscovery/rawhttp"
|
||||
"github.com/projectdiscovery/retryablehttp-go"
|
||||
)
|
||||
|
||||
// Request contains a http request to be made from a template
|
||||
type Request struct {
|
||||
// Name is the name of the request
|
||||
Name string `yaml:"Name"`
|
||||
// AttackType is the attack type
|
||||
// Sniper, PitchFork and ClusterBomb. Default is Sniper
|
||||
AttackType string `yaml:"attack"`
|
||||
// Method is the request method, whether GET, POST, PUT, etc
|
||||
Method string `yaml:"method"`
|
||||
// Body is an optional parameter which contains the request body for POST methods, etc
|
||||
Body string `yaml:"body"`
|
||||
// Path contains the path/s for the request
|
||||
Path []string `yaml:"path"`
|
||||
// Raw contains raw requests
|
||||
Raw []string `yaml:"raw"`
|
||||
// Path contains the path/s for the request variables
|
||||
Payloads map[string]interface{} `yaml:"payloads"`
|
||||
// Headers contains headers to send with the request
|
||||
Headers map[string]string `yaml:"headers"`
|
||||
// RaceNumberRequests is the number of same request to send in race condition attack
|
||||
RaceNumberRequests int `yaml:"race_count"`
|
||||
// MaxRedirects is the maximum number of redirects that should be followed.
|
||||
MaxRedirects int `yaml:"max-redirects"`
|
||||
// PipelineConcurrentConnections is number of connections in pipelining
|
||||
PipelineConcurrentConnections int `yaml:"pipeline-concurrent-connections"`
|
||||
// PipelineRequestsPerConnection is number of requests in pipelining
|
||||
PipelineRequestsPerConnection int `yaml:"pipeline-requests-per-connection"`
|
||||
// Threads specifies number of threads for sending requests
|
||||
Threads int `yaml:"threads"`
|
||||
// CookieReuse is an optional setting that makes cookies shared within requests
|
||||
CookieReuse bool `yaml:"cookie-reuse"`
|
||||
// Redirects specifies whether redirects should be followed.
|
||||
Redirects bool `yaml:"redirects"`
|
||||
// Pipeline defines if the attack should be performed with HTTP 1.1 Pipelining (race conditions/billions requests)
|
||||
// All requests must be indempotent (GET/POST)
|
||||
Pipeline bool `yaml:"pipeline"`
|
||||
// Specify in order to skip request RFC normalization
|
||||
Unsafe bool `yaml:"unsafe"`
|
||||
// DisableAutoHostname Enable/Disable Host header for unsafe raw requests
|
||||
DisableAutoHostname bool `yaml:"disable-automatic-host-header"`
|
||||
// DisableAutoContentLength Enable/Disable Content-Length header for unsafe raw requests
|
||||
DisableAutoContentLength bool `yaml:"disable-automatic-content-length-header"`
|
||||
// Race determines if all the request have to be attempted at the same time
|
||||
// The minimum number fof requests is determined by threads
|
||||
Race bool `yaml:"race"`
|
||||
|
||||
// Operators for the current request go here.
|
||||
operators.Operators `yaml:",inline"`
|
||||
CompiledOperators *operators.Operators
|
||||
|
||||
options *protocols.ExecuterOptions
|
||||
attackType generators.Type
|
||||
totalRequests int
|
||||
customHeaders []string
|
||||
generator *generators.Generator // optional, only enabled when using payloads
|
||||
httpClient *retryablehttp.Client
|
||||
rawhttpClient *rawhttp.Client
|
||||
}
|
||||
|
||||
// Compile compiles the protocol request for further execution.
|
||||
func (r *Request) Compile(options *protocols.ExecuterOptions) error {
|
||||
client, err := httpclientpool.Get(options.Options, &httpclientpool.Configuration{
|
||||
Threads: r.Threads,
|
||||
MaxRedirects: r.MaxRedirects,
|
||||
FollowRedirects: r.Redirects,
|
||||
CookieReuse: r.CookieReuse,
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get dns client")
|
||||
}
|
||||
r.httpClient = client
|
||||
r.options = options
|
||||
for _, option := range r.options.Options.CustomHeaders {
|
||||
r.customHeaders = append(r.customHeaders, option)
|
||||
}
|
||||
|
||||
if len(r.Raw) > 0 {
|
||||
r.rawhttpClient = httpclientpool.GetRawHTTP()
|
||||
}
|
||||
if len(r.Matchers) > 0 || len(r.Extractors) > 0 {
|
||||
compiled := &r.Operators
|
||||
if err := compiled.Compile(); err != nil {
|
||||
return errors.Wrap(err, "could not compile operators")
|
||||
}
|
||||
r.CompiledOperators = compiled
|
||||
}
|
||||
|
||||
if len(r.Payloads) > 0 {
|
||||
attackType := r.AttackType
|
||||
if attackType == "" {
|
||||
attackType = "sniper"
|
||||
}
|
||||
r.attackType = generators.StringToType[attackType]
|
||||
|
||||
// Resolve payload paths if they are files.
|
||||
for name, payload := range r.Payloads {
|
||||
switch pt := payload.(type) {
|
||||
case string:
|
||||
elements := strings.Split(pt, "\n")
|
||||
//golint:gomnd // this is not a magic number
|
||||
if len(elements) < 2 {
|
||||
final, err := options.Catalogue.ResolvePath(elements[0], options.TemplatePath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not read payload file")
|
||||
}
|
||||
r.Payloads[name] = final
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
r.generator, err = generators.New(r.Payloads, r.attackType, r.options.TemplatePath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not parse payloads")
|
||||
}
|
||||
}
|
||||
r.options = options
|
||||
r.totalRequests = r.Requests()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Requests returns the total number of requests the YAML rule will perform
|
||||
func (r *Request) Requests() int {
|
||||
if r.generator != nil {
|
||||
payloadRequests := r.generator.NewIterator().Total() * len(r.Raw)
|
||||
return payloadRequests
|
||||
}
|
||||
if len(r.Raw) > 0 {
|
||||
requests := len(r.Raw)
|
||||
if requests == 1 && r.RaceNumberRequests != 0 {
|
||||
requests = requests * r.RaceNumberRequests
|
||||
}
|
||||
return requests
|
||||
}
|
||||
return len(r.Path)
|
||||
}
|
|
@ -0,0 +1,223 @@
|
|||
package httpclientpool
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/cookiejar"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/projectdiscovery/fastdialer/fastdialer"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/types"
|
||||
"github.com/projectdiscovery/rawhttp"
|
||||
"github.com/projectdiscovery/retryablehttp-go"
|
||||
"golang.org/x/net/proxy"
|
||||
"golang.org/x/net/publicsuffix"
|
||||
)
|
||||
|
||||
var (
|
||||
Dialer *fastdialer.Dialer
|
||||
rawhttpClient *rawhttp.Client
|
||||
poolMutex *sync.RWMutex
|
||||
normalClient *retryablehttp.Client
|
||||
clientPool map[string]*retryablehttp.Client
|
||||
)
|
||||
|
||||
// Init initializes the clientpool implementation
|
||||
func Init(options *types.Options) error {
|
||||
// Don't create clients if already created in past.
|
||||
if normalClient != nil {
|
||||
return nil
|
||||
}
|
||||
poolMutex = &sync.RWMutex{}
|
||||
clientPool = make(map[string]*retryablehttp.Client)
|
||||
|
||||
client, err := wrappedGet(options, &Configuration{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
normalClient = client
|
||||
return nil
|
||||
}
|
||||
|
||||
// Configuration contains the custom configuration options for a client
|
||||
type Configuration struct {
|
||||
// CookieReuse enables cookie reuse for the http client (cookiejar impl)
|
||||
CookieReuse bool
|
||||
// Threads contains the threads for the client
|
||||
Threads int
|
||||
// MaxRedirects is the maximum number of redirects to follow
|
||||
MaxRedirects int
|
||||
// FollowRedirects specifies whether to follow redirects
|
||||
FollowRedirects bool
|
||||
}
|
||||
|
||||
// Hash returns the hash of the configuration to allow client pooling
|
||||
func (c *Configuration) Hash() string {
|
||||
builder := &strings.Builder{}
|
||||
builder.Grow(16)
|
||||
builder.WriteString("t")
|
||||
builder.WriteString(strconv.Itoa(c.Threads))
|
||||
builder.WriteString("m")
|
||||
builder.WriteString(strconv.Itoa(c.MaxRedirects))
|
||||
builder.WriteString("f")
|
||||
builder.WriteString(strconv.FormatBool(c.FollowRedirects))
|
||||
builder.WriteString("r")
|
||||
builder.WriteString(strconv.FormatBool(c.CookieReuse))
|
||||
hash := builder.String()
|
||||
return hash
|
||||
}
|
||||
|
||||
// GetRawHTTP returns the rawhttp request client
|
||||
func GetRawHTTP() *rawhttp.Client {
|
||||
if rawhttpClient == nil {
|
||||
rawhttpClient = rawhttp.NewClient(rawhttp.DefaultOptions)
|
||||
}
|
||||
return rawhttpClient
|
||||
}
|
||||
|
||||
// Get creates or gets a client for the protocol based on custom configuration
|
||||
func Get(options *types.Options, configuration *Configuration) (*retryablehttp.Client, error) {
|
||||
if configuration.Threads == 0 && configuration.MaxRedirects == 0 && !configuration.FollowRedirects && !configuration.CookieReuse {
|
||||
return normalClient, nil
|
||||
}
|
||||
return wrappedGet(options, configuration)
|
||||
}
|
||||
|
||||
// wrappedGet wraps a get operation without normal cliet check
|
||||
func wrappedGet(options *types.Options, configuration *Configuration) (*retryablehttp.Client, error) {
|
||||
var proxyURL *url.URL
|
||||
var err error
|
||||
|
||||
if Dialer == nil {
|
||||
Dialer, err = fastdialer.NewDialer(fastdialer.DefaultOptions)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not create dialer")
|
||||
}
|
||||
|
||||
hash := configuration.Hash()
|
||||
poolMutex.RLock()
|
||||
if client, ok := clientPool[hash]; ok {
|
||||
poolMutex.RUnlock()
|
||||
return client, nil
|
||||
}
|
||||
poolMutex.RUnlock()
|
||||
|
||||
if options.ProxyURL != "" {
|
||||
proxyURL, err = url.Parse(options.ProxyURL)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Multiple Host
|
||||
retryablehttpOptions := retryablehttp.DefaultOptionsSpraying
|
||||
disableKeepAlives := true
|
||||
maxIdleConns := 0
|
||||
maxConnsPerHost := 0
|
||||
maxIdleConnsPerHost := -1
|
||||
|
||||
if configuration.Threads > 0 {
|
||||
// Single host
|
||||
retryablehttpOptions = retryablehttp.DefaultOptionsSingle
|
||||
disableKeepAlives = false
|
||||
maxIdleConnsPerHost = 500
|
||||
maxConnsPerHost = 500
|
||||
}
|
||||
|
||||
retryablehttpOptions.RetryWaitMax = 10 * time.Second
|
||||
retryablehttpOptions.RetryMax = options.Retries
|
||||
followRedirects := configuration.FollowRedirects
|
||||
maxRedirects := configuration.MaxRedirects
|
||||
|
||||
transport := &http.Transport{
|
||||
DialContext: Dialer.Dial,
|
||||
MaxIdleConns: maxIdleConns,
|
||||
MaxIdleConnsPerHost: maxIdleConnsPerHost,
|
||||
MaxConnsPerHost: maxConnsPerHost,
|
||||
TLSClientConfig: &tls.Config{
|
||||
Renegotiation: tls.RenegotiateOnceAsClient,
|
||||
InsecureSkipVerify: true,
|
||||
},
|
||||
DisableKeepAlives: disableKeepAlives,
|
||||
}
|
||||
|
||||
// Attempts to overwrite the dial function with the socks proxied version
|
||||
if options.ProxySocksURL != "" {
|
||||
var proxyAuth *proxy.Auth
|
||||
|
||||
socksURL, err := url.Parse(options.ProxySocksURL)
|
||||
if err == nil {
|
||||
proxyAuth = &proxy.Auth{}
|
||||
proxyAuth.User = socksURL.User.Username()
|
||||
proxyAuth.Password, _ = socksURL.User.Password()
|
||||
}
|
||||
dialer, err := proxy.SOCKS5("tcp", fmt.Sprintf("%s:%s", socksURL.Hostname(), socksURL.Port()), proxyAuth, proxy.Direct)
|
||||
dc := dialer.(interface {
|
||||
DialContext(ctx context.Context, network, addr string) (net.Conn, error)
|
||||
})
|
||||
if err == nil {
|
||||
transport.DialContext = dc.DialContext
|
||||
}
|
||||
}
|
||||
if proxyURL != nil {
|
||||
transport.Proxy = http.ProxyURL(proxyURL)
|
||||
}
|
||||
|
||||
var jar *cookiejar.Jar
|
||||
if configuration.CookieReuse {
|
||||
if jar, err = cookiejar.New(&cookiejar.Options{PublicSuffixList: publicsuffix.List}); err != nil {
|
||||
return nil, errors.Wrap(err, "could not create cookiejar")
|
||||
}
|
||||
}
|
||||
|
||||
client := retryablehttp.NewWithHTTPClient(&http.Client{
|
||||
Transport: transport,
|
||||
Timeout: time.Duration(options.Timeout) * time.Second,
|
||||
CheckRedirect: makeCheckRedirectFunc(followRedirects, maxRedirects),
|
||||
}, retryablehttpOptions)
|
||||
if jar != nil {
|
||||
client.HTTPClient.Jar = jar
|
||||
}
|
||||
client.CheckRetry = retryablehttp.HostSprayRetryPolicy()
|
||||
|
||||
// Only add to client pool if we don't have a cookie jar in place.
|
||||
if jar == nil {
|
||||
poolMutex.Lock()
|
||||
clientPool[hash] = client
|
||||
poolMutex.Unlock()
|
||||
}
|
||||
return client, nil
|
||||
}
|
||||
|
||||
const defaultMaxRedirects = 10
|
||||
|
||||
type checkRedirectFunc func(req *http.Request, via []*http.Request) error
|
||||
|
||||
func makeCheckRedirectFunc(followRedirects bool, maxRedirects int) checkRedirectFunc {
|
||||
return func(req *http.Request, via []*http.Request) error {
|
||||
if !followRedirects {
|
||||
return http.ErrUseLastResponse
|
||||
}
|
||||
|
||||
if maxRedirects == 0 {
|
||||
if len(via) > defaultMaxRedirects {
|
||||
return http.ErrUseLastResponse
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(via) > maxRedirects {
|
||||
return http.ErrUseLastResponse
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
|
@ -0,0 +1,158 @@
|
|||
package http
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/operators/extractors"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/operators/matchers"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/output"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/types"
|
||||
)
|
||||
|
||||
// Match matches a generic data response again a given matcher
|
||||
func (r *Request) Match(data map[string]interface{}, matcher *matchers.Matcher) bool {
|
||||
item, ok := getMatchPart(matcher.Part, data)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
switch matcher.GetType() {
|
||||
case matchers.StatusMatcher:
|
||||
statusCode, ok := data["status_code"]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return matcher.Result(matcher.MatchStatusCode(statusCode.(int)))
|
||||
case matchers.SizeMatcher:
|
||||
return matcher.Result(matcher.MatchSize(len(item)))
|
||||
case matchers.WordsMatcher:
|
||||
return matcher.Result(matcher.MatchWords(item))
|
||||
case matchers.RegexMatcher:
|
||||
return matcher.Result(matcher.MatchRegex(item))
|
||||
case matchers.BinaryMatcher:
|
||||
return matcher.Result(matcher.MatchBinary(item))
|
||||
case matchers.DSLMatcher:
|
||||
return matcher.Result(matcher.MatchDSL(data))
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Extract performs extracting operation for a extractor on model and returns true or false.
|
||||
func (r *Request) Extract(data map[string]interface{}, extractor *extractors.Extractor) map[string]struct{} {
|
||||
item, ok := getMatchPart(extractor.Part, data)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
switch extractor.GetType() {
|
||||
case extractors.RegexExtractor:
|
||||
return extractor.ExtractRegex(item)
|
||||
case extractors.KValExtractor:
|
||||
return extractor.ExtractKval(data)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getMatchPart returns the match part honoring "all" matchers + others.
|
||||
func getMatchPart(part string, data output.InternalEvent) (string, bool) {
|
||||
if part == "header" {
|
||||
part = "all_headers"
|
||||
}
|
||||
var itemStr string
|
||||
|
||||
if part == "all" {
|
||||
builder := &strings.Builder{}
|
||||
builder.WriteString(data["body"].(string))
|
||||
builder.WriteString(data["all_headers"].(string))
|
||||
itemStr = builder.String()
|
||||
} else {
|
||||
item, ok := data[part]
|
||||
if !ok {
|
||||
return "", false
|
||||
}
|
||||
itemStr = types.ToString(item)
|
||||
}
|
||||
return itemStr, true
|
||||
}
|
||||
|
||||
// responseToDSLMap converts a HTTP response to a map for use in DSL matching
|
||||
func (r *Request) responseToDSLMap(resp *http.Response, host, matched, rawReq, rawResp, body, headers string, duration time.Duration, extra map[string]interface{}) map[string]interface{} {
|
||||
data := make(map[string]interface{}, len(extra)+8+len(resp.Header)+len(resp.Cookies()))
|
||||
for k, v := range extra {
|
||||
data[k] = v
|
||||
}
|
||||
|
||||
data["host"] = host
|
||||
data["matched"] = matched
|
||||
if r.options.Options.JSONRequests {
|
||||
data["request"] = rawReq
|
||||
data["response"] = rawResp
|
||||
}
|
||||
|
||||
data["content_length"] = resp.ContentLength
|
||||
data["status_code"] = resp.StatusCode
|
||||
|
||||
data["body"] = body
|
||||
for _, cookie := range resp.Cookies() {
|
||||
data[strings.ToLower(cookie.Name)] = cookie.Value
|
||||
}
|
||||
for k, v := range resp.Header {
|
||||
k = strings.ToLower(strings.TrimSpace(strings.ReplaceAll(k, "-", "_")))
|
||||
data[k] = strings.Join(v, " ")
|
||||
}
|
||||
data["all_headers"] = headers
|
||||
|
||||
if r, err := httputil.DumpResponse(resp, true); err == nil {
|
||||
rawString := string(r)
|
||||
data["raw"] = rawString
|
||||
}
|
||||
data["duration"] = duration.Seconds()
|
||||
data["template-id"] = r.options.TemplateID
|
||||
data["template-info"] = r.options.TemplateInfo
|
||||
return data
|
||||
}
|
||||
|
||||
// MakeResultEvent creates a result event from internal wrapped event
|
||||
func (r *Request) MakeResultEvent(wrapped *output.InternalWrappedEvent) []*output.ResultEvent {
|
||||
results := make([]*output.ResultEvent, 0, len(wrapped.OperatorsResult.Matches)+1)
|
||||
|
||||
// If we have multiple matchers with names, write each of them separately.
|
||||
if len(wrapped.OperatorsResult.Matches) > 0 {
|
||||
for k := range wrapped.OperatorsResult.Matches {
|
||||
data := r.makeResultEventItem(wrapped)
|
||||
data.MatcherName = k
|
||||
results = append(results, data)
|
||||
}
|
||||
} else if len(wrapped.OperatorsResult.Extracts) > 0 {
|
||||
for k, v := range wrapped.OperatorsResult.Extracts {
|
||||
data := r.makeResultEventItem(wrapped)
|
||||
data.ExtractedResults = v
|
||||
data.ExtractorName = k
|
||||
results = append(results, data)
|
||||
}
|
||||
} else {
|
||||
data := r.makeResultEventItem(wrapped)
|
||||
results = append(results, data)
|
||||
}
|
||||
return results
|
||||
}
|
||||
|
||||
func (r *Request) makeResultEventItem(wrapped *output.InternalWrappedEvent) *output.ResultEvent {
|
||||
data := &output.ResultEvent{
|
||||
TemplateID: wrapped.InternalEvent["template-id"].(string),
|
||||
Info: wrapped.InternalEvent["template-info"].(map[string]string),
|
||||
Type: "http",
|
||||
Host: wrapped.InternalEvent["host"].(string),
|
||||
Matched: wrapped.InternalEvent["matched"].(string),
|
||||
Metadata: wrapped.OperatorsResult.PayloadValues,
|
||||
ExtractedResults: wrapped.OperatorsResult.OutputExtracts,
|
||||
IP: wrapped.InternalEvent["ip"].(string),
|
||||
}
|
||||
if r.options.Options.JSONRequests {
|
||||
data.Request = wrapped.InternalEvent["request"].(string)
|
||||
data.Response = wrapped.InternalEvent["raw"].(string)
|
||||
}
|
||||
return data
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
package syncedreadcloser
|
||||
package race
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
@ -7,7 +7,8 @@ import (
|
|||
"time"
|
||||
)
|
||||
|
||||
// compatible with ReadSeeker
|
||||
// SyncedReadCloser is compatible with io.ReadSeeker and performs
|
||||
// gate-based synced writes to enable race condition testing.
|
||||
type SyncedReadCloser struct {
|
||||
data []byte
|
||||
p int64
|
||||
|
@ -16,7 +17,8 @@ type SyncedReadCloser struct {
|
|||
enableBlocking bool
|
||||
}
|
||||
|
||||
func New(r io.ReadCloser) *SyncedReadCloser {
|
||||
// NewSyncedReadCloser creates a new SyncedReadCloser instance.
|
||||
func NewSyncedReadCloser(r io.ReadCloser) *SyncedReadCloser {
|
||||
var (
|
||||
s SyncedReadCloser
|
||||
err error
|
||||
|
@ -29,31 +31,34 @@ func New(r io.ReadCloser) *SyncedReadCloser {
|
|||
s.length = int64(len(s.data))
|
||||
s.opengate = make(chan struct{})
|
||||
s.enableBlocking = true
|
||||
|
||||
return &s
|
||||
}
|
||||
|
||||
// NewOpenGateWithTimeout creates a new open gate with a timeout
|
||||
func NewOpenGateWithTimeout(r io.ReadCloser, d time.Duration) *SyncedReadCloser {
|
||||
s := New(r)
|
||||
s := NewSyncedReadCloser(r)
|
||||
s.OpenGateAfter(d)
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// SetOpenGate sets the status of the blocking gate
|
||||
func (s *SyncedReadCloser) SetOpenGate(status bool) {
|
||||
s.enableBlocking = status
|
||||
}
|
||||
|
||||
// OpenGate opens the gate allowing all requests to be completed
|
||||
func (s *SyncedReadCloser) OpenGate() {
|
||||
s.opengate <- struct{}{}
|
||||
}
|
||||
|
||||
// OpenGateAfter schedules gate to be opened after a duration
|
||||
func (s *SyncedReadCloser) OpenGateAfter(d time.Duration) {
|
||||
time.AfterFunc(d, func() {
|
||||
s.opengate <- struct{}{}
|
||||
})
|
||||
}
|
||||
|
||||
// Seek implements seek method for io.ReadSeeker
|
||||
func (s *SyncedReadCloser) Seek(offset int64, whence int) (int64, error) {
|
||||
var err error
|
||||
switch whence {
|
||||
|
@ -75,6 +80,7 @@ func (s *SyncedReadCloser) Seek(offset int64, whence int) (int64, error) {
|
|||
return s.p, err
|
||||
}
|
||||
|
||||
// Read implements read method for io.ReadSeeker
|
||||
func (s *SyncedReadCloser) Read(p []byte) (n int, err error) {
|
||||
// If the data fits in the buffer blocks awaiting the sync instruction
|
||||
if s.p+int64(len(p)) >= s.length && s.enableBlocking {
|
||||
|
@ -88,10 +94,12 @@ func (s *SyncedReadCloser) Read(p []byte) (n int, err error) {
|
|||
return n, err
|
||||
}
|
||||
|
||||
// Close implements close method for io.ReadSeeker
|
||||
func (s *SyncedReadCloser) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Len returns the length of data in reader
|
||||
func (s *SyncedReadCloser) Len() int {
|
||||
return int(s.length)
|
||||
}
|
|
@ -0,0 +1,2 @@
|
|||
// Package raw provides raw http request parsing abilities for nuclei.
|
||||
package raw
|
|
@ -0,0 +1,108 @@
|
|||
package raw
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/url"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Request defines a basic HTTP raw request
|
||||
type Request struct {
|
||||
FullURL string
|
||||
Method string
|
||||
Path string
|
||||
Data string
|
||||
Headers map[string]string
|
||||
}
|
||||
|
||||
// Parse parses the raw request as supplied by the user
|
||||
func Parse(request, baseURL string, unsafe bool) (*Request, error) {
|
||||
reader := bufio.NewReader(strings.NewReader(request))
|
||||
rawRequest := &Request{
|
||||
Headers: make(map[string]string),
|
||||
}
|
||||
|
||||
s, err := reader.ReadString('\n')
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not read request: %s", err)
|
||||
}
|
||||
|
||||
parts := strings.Split(s, " ")
|
||||
//nolint:gomnd // this is not a magic number
|
||||
if len(parts) < 3 {
|
||||
return nil, fmt.Errorf("malformed request supplied")
|
||||
}
|
||||
// Set the request Method
|
||||
rawRequest.Method = parts[0]
|
||||
|
||||
// Accepts all malformed headers
|
||||
var key, value string
|
||||
for {
|
||||
line, readErr := reader.ReadString('\n')
|
||||
line = strings.TrimSpace(line)
|
||||
|
||||
if readErr != nil || line == "" {
|
||||
break
|
||||
}
|
||||
|
||||
p := strings.SplitN(line, ":", 2)
|
||||
key = p[0]
|
||||
if len(p) > 1 {
|
||||
value = p[1]
|
||||
}
|
||||
|
||||
// in case of unsafe requests multiple headers should be accepted
|
||||
// therefore use the full line as key
|
||||
_, found := rawRequest.Headers[key]
|
||||
if unsafe && found {
|
||||
rawRequest.Headers[line] = ""
|
||||
} else {
|
||||
rawRequest.Headers[key] = value
|
||||
}
|
||||
}
|
||||
|
||||
// Handle case with the full http url in path. In that case,
|
||||
// ignore any host header that we encounter and use the path as request URL
|
||||
if !unsafe && strings.HasPrefix(parts[1], "http") {
|
||||
parsed, parseErr := url.Parse(parts[1])
|
||||
if parseErr != nil {
|
||||
return nil, fmt.Errorf("could not parse request URL: %s", parseErr)
|
||||
}
|
||||
|
||||
rawRequest.Path = parts[1]
|
||||
rawRequest.Headers["Host"] = parsed.Host
|
||||
} else {
|
||||
rawRequest.Path = parts[1]
|
||||
}
|
||||
|
||||
// If raw request doesn't have a Host header and/ path,
|
||||
// this will be generated from the parsed baseURL
|
||||
parsedURL, err := url.Parse(baseURL)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not parse request URL: %s", err)
|
||||
}
|
||||
|
||||
var hostURL string
|
||||
if rawRequest.Headers["Host"] == "" {
|
||||
hostURL = parsedURL.Host
|
||||
} else {
|
||||
hostURL = rawRequest.Headers["Host"]
|
||||
}
|
||||
|
||||
if rawRequest.Path == "" {
|
||||
rawRequest.Path = parsedURL.Path
|
||||
} else if strings.HasPrefix(rawRequest.Path, "?") {
|
||||
rawRequest.Path = fmt.Sprintf("%s%s", parsedURL.Path, rawRequest.Path)
|
||||
}
|
||||
rawRequest.FullURL = fmt.Sprintf("%s://%s%s", parsedURL.Scheme, strings.TrimSpace(hostURL), rawRequest.Path)
|
||||
|
||||
// Set the request body
|
||||
b, err := ioutil.ReadAll(reader)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not read request body: %s", err)
|
||||
}
|
||||
rawRequest.Data = string(b)
|
||||
return rawRequest, nil
|
||||
}
|
|
@ -0,0 +1,28 @@
|
|||
package raw
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestParseRawRequest(t *testing.T) {
|
||||
request, err := Parse(`GET /manager/html HTTP/1.1
|
||||
Host: {{Hostname}}
|
||||
Authorization: Basic {{base64('username:password')}}
|
||||
User-Agent: Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0
|
||||
Accept-Language: en-US,en;q=0.9
|
||||
Connection: close`, "https://test.com", true)
|
||||
require.Nil(t, err, "could not parse GET request")
|
||||
require.Equal(t, "GET", request.Method, "Could not parse GET method request correctly")
|
||||
require.Equal(t, "/manager/html", request.Path, "Could not parse request path correctly")
|
||||
|
||||
request, err = Parse(`POST /login HTTP/1.1
|
||||
Host: {{Hostname}}
|
||||
Connection: close
|
||||
|
||||
username=admin&password=login`, "https://test.com", true)
|
||||
require.Nil(t, err, "could not parse POST request")
|
||||
require.Equal(t, "POST", request.Method, "Could not parse POST method request correctly")
|
||||
require.Equal(t, "username=admin&password=login", request.Data, "Could not parse request data correctly")
|
||||
}
|
|
@ -0,0 +1,377 @@
|
|||
package http
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/corpix/uarand"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/projectdiscovery/gologger"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/output"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/protocols"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/protocols/common/generators"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/protocols/common/tostring"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/protocols/http/httpclientpool"
|
||||
"github.com/projectdiscovery/rawhttp"
|
||||
"github.com/remeh/sizedwaitgroup"
|
||||
"go.uber.org/multierr"
|
||||
)
|
||||
|
||||
const defaultMaxWorkers = 150
|
||||
|
||||
// executeRaceRequest executes race condition request for a URL
|
||||
func (e *Request) executeRaceRequest(reqURL string, dynamicValues map[string]interface{}, callback protocols.OutputEventCallback) error {
|
||||
generator := e.newGenerator()
|
||||
|
||||
maxWorkers := e.RaceNumberRequests
|
||||
swg := sizedwaitgroup.New(maxWorkers)
|
||||
|
||||
var requestErr error
|
||||
mutex := &sync.Mutex{}
|
||||
|
||||
request, err := generator.Make(reqURL, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for i := 0; i < e.RaceNumberRequests; i++ {
|
||||
swg.Add()
|
||||
go func(httpRequest *generatedRequest) {
|
||||
err := e.executeRequest(reqURL, httpRequest, dynamicValues, callback)
|
||||
mutex.Lock()
|
||||
if err != nil {
|
||||
requestErr = multierr.Append(requestErr, err)
|
||||
}
|
||||
mutex.Unlock()
|
||||
swg.Done()
|
||||
}(request)
|
||||
}
|
||||
swg.Wait()
|
||||
return requestErr
|
||||
}
|
||||
|
||||
// executeRaceRequest executes race condition request for a URL
|
||||
func (e *Request) executeParallelHTTP(reqURL string, dynamicValues map[string]interface{}, callback protocols.OutputEventCallback) error {
|
||||
generator := e.newGenerator()
|
||||
|
||||
// Workers that keeps enqueuing new requests
|
||||
maxWorkers := e.Threads
|
||||
swg := sizedwaitgroup.New(maxWorkers)
|
||||
|
||||
var requestErr error
|
||||
mutex := &sync.Mutex{}
|
||||
for {
|
||||
request, err := generator.Make(reqURL, dynamicValues)
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
e.options.Progress.DecrementRequests(int64(generator.Total()))
|
||||
return err
|
||||
}
|
||||
swg.Add()
|
||||
go func(httpRequest *generatedRequest) {
|
||||
defer swg.Done()
|
||||
|
||||
e.options.RateLimiter.Take()
|
||||
err := e.executeRequest(reqURL, httpRequest, dynamicValues, callback)
|
||||
mutex.Lock()
|
||||
if err != nil {
|
||||
requestErr = multierr.Append(requestErr, err)
|
||||
}
|
||||
mutex.Unlock()
|
||||
}(request)
|
||||
e.options.Progress.IncrementRequests()
|
||||
}
|
||||
swg.Wait()
|
||||
return requestErr
|
||||
}
|
||||
|
||||
// executeRaceRequest executes race condition request for a URL
|
||||
func (e *Request) executeTurboHTTP(reqURL string, dynamicValues map[string]interface{}, callback protocols.OutputEventCallback) error {
|
||||
generator := e.newGenerator()
|
||||
|
||||
// need to extract the target from the url
|
||||
URL, err := url.Parse(reqURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pipeOptions := rawhttp.DefaultPipelineOptions
|
||||
pipeOptions.Host = URL.Host
|
||||
pipeOptions.MaxConnections = 1
|
||||
if e.PipelineConcurrentConnections > 0 {
|
||||
pipeOptions.MaxConnections = e.PipelineConcurrentConnections
|
||||
}
|
||||
if e.PipelineRequestsPerConnection > 0 {
|
||||
pipeOptions.MaxPendingRequests = e.PipelineRequestsPerConnection
|
||||
}
|
||||
pipeclient := rawhttp.NewPipelineClient(pipeOptions)
|
||||
|
||||
// defaultMaxWorkers should be a sufficient value to keep queues always full
|
||||
maxWorkers := defaultMaxWorkers
|
||||
// in case the queue is bigger increase the workers
|
||||
if pipeOptions.MaxPendingRequests > maxWorkers {
|
||||
maxWorkers = pipeOptions.MaxPendingRequests
|
||||
}
|
||||
swg := sizedwaitgroup.New(maxWorkers)
|
||||
|
||||
var requestErr error
|
||||
mutex := &sync.Mutex{}
|
||||
for {
|
||||
request, err := generator.Make(reqURL, dynamicValues)
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
e.options.Progress.DecrementRequests(int64(generator.Total()))
|
||||
return err
|
||||
}
|
||||
request.pipelinedClient = pipeclient
|
||||
|
||||
swg.Add()
|
||||
go func(httpRequest *generatedRequest) {
|
||||
defer swg.Done()
|
||||
|
||||
err := e.executeRequest(reqURL, httpRequest, dynamicValues, callback)
|
||||
mutex.Lock()
|
||||
if err != nil {
|
||||
requestErr = multierr.Append(requestErr, err)
|
||||
}
|
||||
mutex.Unlock()
|
||||
}(request)
|
||||
e.options.Progress.IncrementRequests()
|
||||
}
|
||||
swg.Wait()
|
||||
return requestErr
|
||||
}
|
||||
|
||||
// ExecuteWithResults executes the final request on a URL
|
||||
func (r *Request) ExecuteWithResults(reqURL string, dynamicValues output.InternalEvent, callback protocols.OutputEventCallback) error {
|
||||
// verify if pipeline was requested
|
||||
if r.Pipeline {
|
||||
return r.executeTurboHTTP(reqURL, dynamicValues, callback)
|
||||
}
|
||||
|
||||
// verify if a basic race condition was requested
|
||||
if r.Race && r.RaceNumberRequests > 0 {
|
||||
return r.executeRaceRequest(reqURL, dynamicValues, callback)
|
||||
}
|
||||
|
||||
// verify if parallel elaboration was requested
|
||||
if r.Threads > 0 {
|
||||
return r.executeParallelHTTP(reqURL, dynamicValues, callback)
|
||||
}
|
||||
|
||||
generator := r.newGenerator()
|
||||
|
||||
var requestErr error
|
||||
for {
|
||||
request, err := generator.Make(reqURL, dynamicValues)
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
r.options.Progress.DecrementRequests(int64(generator.Total()))
|
||||
return err
|
||||
}
|
||||
|
||||
var gotOutput bool
|
||||
r.options.RateLimiter.Take()
|
||||
err = r.executeRequest(reqURL, request, dynamicValues, func(event *output.InternalWrappedEvent) {
|
||||
// Add the extracts to the dynamic values if any.
|
||||
if event.OperatorsResult != nil {
|
||||
gotOutput = true
|
||||
dynamicValues = generators.MergeMaps(dynamicValues, event.OperatorsResult.DynamicValues)
|
||||
}
|
||||
callback(event)
|
||||
})
|
||||
if err != nil {
|
||||
requestErr = multierr.Append(requestErr, err)
|
||||
}
|
||||
r.options.Progress.IncrementRequests()
|
||||
|
||||
if request.original.options.Options.StopAtFirstMatch && gotOutput {
|
||||
r.options.Progress.DecrementRequests(int64(generator.Total()))
|
||||
break
|
||||
}
|
||||
}
|
||||
return requestErr
|
||||
}
|
||||
|
||||
// executeRequest executes the actual generated request and returns error if occured
|
||||
func (r *Request) executeRequest(reqURL string, request *generatedRequest, dynamicvalues map[string]interface{}, callback protocols.OutputEventCallback) error {
|
||||
// Add User-Agent value randomly to the customHeaders slice if `random-agent` flag is given
|
||||
if r.options.Options.RandomAgent {
|
||||
builder := &strings.Builder{}
|
||||
builder.WriteString("User-Agent: ")
|
||||
builder.WriteString(uarand.GetRandom())
|
||||
r.customHeaders = append(r.customHeaders, builder.String())
|
||||
}
|
||||
r.setCustomHeaders(request)
|
||||
|
||||
var (
|
||||
resp *http.Response
|
||||
err error
|
||||
dumpedRequest []byte
|
||||
fromcache bool
|
||||
)
|
||||
if r.options.Options.Debug || r.options.ProjectFile != nil || r.options.Options.DebugRequests {
|
||||
dumpedRequest, err = dump(request, reqURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if r.options.Options.Debug || r.options.Options.DebugRequests {
|
||||
gologger.Info().Msgf("[%s] Dumped HTTP request for %s\n\n", r.options.TemplateID, reqURL)
|
||||
fmt.Fprintf(os.Stderr, "%s", string(dumpedRequest))
|
||||
}
|
||||
|
||||
var formedURL string
|
||||
var hostname string
|
||||
timeStart := time.Now()
|
||||
if request.original.Pipeline {
|
||||
formedURL = request.rawRequest.FullURL
|
||||
if parsed, err := url.Parse(formedURL); err == nil {
|
||||
hostname = parsed.Hostname()
|
||||
}
|
||||
resp, err = request.pipelinedClient.DoRaw(request.rawRequest.Method, reqURL, request.rawRequest.Path, generators.ExpandMapValues(request.rawRequest.Headers), ioutil.NopCloser(strings.NewReader(request.rawRequest.Data)))
|
||||
} else if request.original.Unsafe {
|
||||
formedURL = request.rawRequest.FullURL
|
||||
if parsed, err := url.Parse(formedURL); err == nil {
|
||||
hostname = parsed.Hostname()
|
||||
}
|
||||
request.rawRequest.Data = strings.ReplaceAll(request.rawRequest.Data, "\n", "\r\n")
|
||||
options := request.original.rawhttpClient.Options
|
||||
options.AutomaticContentLength = !r.DisableAutoContentLength
|
||||
options.AutomaticHostHeader = !r.DisableAutoHostname
|
||||
options.FollowRedirects = r.Redirects
|
||||
resp, err = request.original.rawhttpClient.DoRawWithOptions(request.rawRequest.Method, reqURL, request.rawRequest.Path, generators.ExpandMapValues(request.rawRequest.Headers), ioutil.NopCloser(strings.NewReader(request.rawRequest.Data)), options)
|
||||
} else {
|
||||
hostname = request.request.URL.Hostname()
|
||||
formedURL = request.request.URL.String()
|
||||
// if nuclei-project is available check if the request was already sent previously
|
||||
if r.options.ProjectFile != nil {
|
||||
// if unavailable fail silently
|
||||
fromcache = true
|
||||
// nolint:bodyclose // false positive the response is generated at runtime
|
||||
resp, err = r.options.ProjectFile.Get(dumpedRequest)
|
||||
if err != nil {
|
||||
fromcache = false
|
||||
}
|
||||
}
|
||||
if resp == nil {
|
||||
resp, err = r.httpClient.Do(request.request)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
// rawhttp doesn't supports draining response bodies.
|
||||
if resp != nil && resp.Body != nil && request.rawRequest == nil {
|
||||
_, _ = io.Copy(ioutil.Discard, resp.Body)
|
||||
resp.Body.Close()
|
||||
}
|
||||
r.options.Output.Request(r.options.TemplateID, reqURL, "http", err)
|
||||
r.options.Progress.DecrementRequests(1)
|
||||
return err
|
||||
}
|
||||
gologger.Verbose().Msgf("[%s] Sent HTTP request to %s", r.options.TemplateID, formedURL)
|
||||
r.options.Output.Request(r.options.TemplateID, reqURL, "http", err)
|
||||
|
||||
duration := time.Since(timeStart)
|
||||
// Dump response - Step 1 - Decompression not yet handled
|
||||
var dumpedResponse []byte
|
||||
if r.options.Options.Debug || r.options.Options.DebugResponse {
|
||||
var dumpErr error
|
||||
dumpedResponse, dumpErr = httputil.DumpResponse(resp, true)
|
||||
if dumpErr != nil {
|
||||
return errors.Wrap(dumpErr, "could not dump http response")
|
||||
}
|
||||
}
|
||||
|
||||
data, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
_, _ = io.Copy(ioutil.Discard, resp.Body)
|
||||
resp.Body.Close()
|
||||
return errors.Wrap(err, "could not read http body")
|
||||
}
|
||||
resp.Body.Close()
|
||||
|
||||
// net/http doesn't automatically decompress the response body if an
|
||||
// encoding has been specified by the user in the request so in case we have to
|
||||
// manually do it.
|
||||
dataOrig := data
|
||||
data, err = handleDecompression(request, data)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not decompress http body")
|
||||
}
|
||||
|
||||
// Dump response - step 2 - replace gzip body with deflated one or with itself (NOP operation)
|
||||
if r.options.Options.Debug || r.options.Options.DebugResponse {
|
||||
dumpedResponse = bytes.ReplaceAll(dumpedResponse, dataOrig, data)
|
||||
gologger.Info().Msgf("[%s] Dumped HTTP response for %s\n\n", r.options.TemplateID, formedURL)
|
||||
fmt.Fprintf(os.Stderr, "%s\n", string(dumpedResponse))
|
||||
}
|
||||
|
||||
// if nuclei-project is enabled store the response if not previously done
|
||||
if r.options.ProjectFile != nil && !fromcache {
|
||||
err := r.options.ProjectFile.Set(dumpedRequest, resp, data)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not store in project file")
|
||||
}
|
||||
}
|
||||
|
||||
var matchedURL string
|
||||
if request.rawRequest != nil {
|
||||
matchedURL = request.rawRequest.FullURL
|
||||
}
|
||||
if request.request != nil {
|
||||
matchedURL = request.request.URL.String()
|
||||
}
|
||||
outputEvent := r.responseToDSLMap(resp, reqURL, matchedURL, tostring.UnsafeToString(dumpedRequest), tostring.UnsafeToString(dumpedResponse), tostring.UnsafeToString(data), headersToString(resp.Header), duration, request.meta)
|
||||
outputEvent["ip"] = httpclientpool.Dialer.GetDialedIP(hostname)
|
||||
|
||||
event := &output.InternalWrappedEvent{InternalEvent: outputEvent}
|
||||
if r.CompiledOperators != nil {
|
||||
result, ok := r.CompiledOperators.Execute(outputEvent, r.Match, r.Extract)
|
||||
if ok && result != nil {
|
||||
event.OperatorsResult = result
|
||||
result.PayloadValues = request.meta
|
||||
event.Results = r.MakeResultEvent(event)
|
||||
}
|
||||
}
|
||||
callback(event)
|
||||
return nil
|
||||
}
|
||||
|
||||
const two = 2
|
||||
|
||||
// setCustomHeaders sets the custom headers for generated request
|
||||
func (e *Request) setCustomHeaders(r *generatedRequest) {
|
||||
for _, customHeader := range e.customHeaders {
|
||||
if customHeader == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
// This should be pre-computed somewhere and done only once
|
||||
tokens := strings.SplitN(customHeader, ":", two)
|
||||
// if it's an invalid header skip it
|
||||
if len(tokens) < 2 {
|
||||
continue
|
||||
}
|
||||
|
||||
headerName, headerValue := tokens[0], strings.Join(tokens[1:], "")
|
||||
if r.rawRequest != nil {
|
||||
r.rawRequest.Headers[headerName] = headerValue
|
||||
} else {
|
||||
r.request.Header.Set(strings.TrimSpace(headerName), strings.TrimSpace(headerValue))
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,69 @@
|
|||
package http
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"strings"
|
||||
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/protocols/common/generators"
|
||||
"github.com/projectdiscovery/rawhttp"
|
||||
)
|
||||
|
||||
// headersToString converts http headers to string
|
||||
func headersToString(headers http.Header) string {
|
||||
builder := &strings.Builder{}
|
||||
|
||||
for header, values := range headers {
|
||||
builder.WriteString(header)
|
||||
builder.WriteString(": ")
|
||||
|
||||
for i, value := range values {
|
||||
builder.WriteString(value)
|
||||
|
||||
if i != len(values)-1 {
|
||||
builder.WriteRune('\n')
|
||||
builder.WriteString(header)
|
||||
builder.WriteString(": ")
|
||||
}
|
||||
}
|
||||
builder.WriteRune('\n')
|
||||
}
|
||||
return builder.String()
|
||||
}
|
||||
|
||||
// dump creates a dump of the http request in form of a byte slice
|
||||
func dump(req *generatedRequest, reqURL string) ([]byte, error) {
|
||||
if req.request != nil {
|
||||
// Create a copy on the fly of the request body - ignore errors
|
||||
bodyBytes, _ := req.request.BodyBytes()
|
||||
req.request.Request.Body = ioutil.NopCloser(bytes.NewReader(bodyBytes))
|
||||
return httputil.DumpRequest(req.request.Request, true)
|
||||
}
|
||||
return rawhttp.DumpRequestRaw(req.rawRequest.Method, reqURL, req.rawRequest.Path, generators.ExpandMapValues(req.rawRequest.Headers), ioutil.NopCloser(strings.NewReader(req.rawRequest.Data)))
|
||||
}
|
||||
|
||||
// handleDecompression if the user specified a custom encoding (as golang transport doesn't do this automatically)
|
||||
func handleDecompression(r *generatedRequest, bodyOrig []byte) (bodyDec []byte, err error) {
|
||||
if r.request == nil {
|
||||
return bodyOrig, nil
|
||||
}
|
||||
|
||||
encodingHeader := strings.TrimSpace(strings.ToLower(r.request.Header.Get("Accept-Encoding")))
|
||||
if encodingHeader == "gzip" || encodingHeader == "gzip, deflate" {
|
||||
gzipreader, err := gzip.NewReader(bytes.NewReader(bodyOrig))
|
||||
if err != nil {
|
||||
return bodyDec, err
|
||||
}
|
||||
defer gzipreader.Close()
|
||||
|
||||
bodyDec, err = ioutil.ReadAll(gzipreader)
|
||||
if err != nil {
|
||||
return bodyDec, err
|
||||
}
|
||||
return bodyDec, nil
|
||||
}
|
||||
return bodyOrig, nil
|
||||
}
|
|
@ -0,0 +1,84 @@
|
|||
package network
|
||||
|
||||
import (
|
||||
"net"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/projectdiscovery/fastdialer/fastdialer"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/operators"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/protocols"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/protocols/network/networkclientpool"
|
||||
)
|
||||
|
||||
// Request contains a Network protocol request to be made from a template
|
||||
type Request struct {
|
||||
// Address is the address to send requests to (host:port combos generally)
|
||||
Address []string `yaml:"host"`
|
||||
addresses []keyValue
|
||||
|
||||
// Payload is the payload to send for the network request
|
||||
Inputs []*Input `yaml:"inputs"`
|
||||
// ReadSize is the size of response to read (1024 if not provided by default)
|
||||
ReadSize int `yaml:"read-size"`
|
||||
|
||||
// Operators for the current request go here.
|
||||
operators.Operators `yaml:",inline"`
|
||||
CompiledOperators *operators.Operators
|
||||
|
||||
// cache any variables that may be needed for operation.
|
||||
dialer *fastdialer.Dialer
|
||||
options *protocols.ExecuterOptions
|
||||
}
|
||||
|
||||
// keyValue is a key value pair
|
||||
type keyValue struct {
|
||||
key string
|
||||
value string
|
||||
}
|
||||
|
||||
// Input is the input to send on the network
|
||||
type Input struct {
|
||||
// Data is the data to send as the input
|
||||
Data string `yaml:"data"`
|
||||
// Type is the type of input - hex, text.
|
||||
Type string `yaml:"type"`
|
||||
}
|
||||
|
||||
// Compile compiles the protocol request for further execution.
|
||||
func (r *Request) Compile(options *protocols.ExecuterOptions) error {
|
||||
var err error
|
||||
for _, address := range r.Address {
|
||||
if strings.Contains(address, ":") {
|
||||
addressHost, addressPort, err := net.SplitHostPort(address)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not parse address")
|
||||
}
|
||||
r.addresses = append(r.addresses, keyValue{key: addressHost, value: addressPort})
|
||||
} else {
|
||||
r.addresses = append(r.addresses, keyValue{key: address})
|
||||
}
|
||||
}
|
||||
|
||||
// Create a client for the class
|
||||
client, err := networkclientpool.Get(options.Options, &networkclientpool.Configuration{})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get network client")
|
||||
}
|
||||
r.dialer = client
|
||||
|
||||
if len(r.Matchers) > 0 || len(r.Extractors) > 0 {
|
||||
compiled := &r.Operators
|
||||
if err := compiled.Compile(); err != nil {
|
||||
return errors.Wrap(err, "could not compile operators")
|
||||
}
|
||||
r.CompiledOperators = compiled
|
||||
}
|
||||
r.options = options
|
||||
return nil
|
||||
}
|
||||
|
||||
// Requests returns the total number of requests the YAML rule will perform
|
||||
func (r *Request) Requests() int {
|
||||
return len(r.Address)
|
||||
}
|
|
@ -0,0 +1,38 @@
|
|||
package networkclientpool
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/projectdiscovery/fastdialer/fastdialer"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/types"
|
||||
)
|
||||
|
||||
var (
|
||||
normalClient *fastdialer.Dialer
|
||||
)
|
||||
|
||||
// Init initializes the clientpool implementation
|
||||
func Init(options *types.Options) error {
|
||||
// Don't create clients if already created in past.
|
||||
if normalClient != nil {
|
||||
return nil
|
||||
}
|
||||
dialer, err := fastdialer.NewDialer(fastdialer.DefaultOptions)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not create dialer")
|
||||
}
|
||||
normalClient = dialer
|
||||
return nil
|
||||
}
|
||||
|
||||
// Configuration contains the custom configuration options for a client
|
||||
type Configuration struct{}
|
||||
|
||||
// Hash returns the hash of the configuration to allow client pooling
|
||||
func (c *Configuration) Hash() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Get creates or gets a client for the protocol based on custom configuration
|
||||
func Get(options *types.Options, configuration *Configuration) (*fastdialer.Dialer, error) {
|
||||
return normalClient, nil
|
||||
}
|
|
@ -0,0 +1,123 @@
|
|||
package network
|
||||
|
||||
import (
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/operators/extractors"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/operators/matchers"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/output"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/types"
|
||||
)
|
||||
|
||||
// Match matches a generic data response again a given matcher
|
||||
func (r *Request) Match(data map[string]interface{}, matcher *matchers.Matcher) bool {
|
||||
partString := matcher.Part
|
||||
switch partString {
|
||||
case "body", "all", "":
|
||||
partString = "data"
|
||||
}
|
||||
|
||||
item, ok := data[partString]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
itemStr := types.ToString(item)
|
||||
|
||||
switch matcher.GetType() {
|
||||
case matchers.SizeMatcher:
|
||||
return matcher.Result(matcher.MatchSize(len(itemStr)))
|
||||
case matchers.WordsMatcher:
|
||||
return matcher.Result(matcher.MatchWords(itemStr))
|
||||
case matchers.RegexMatcher:
|
||||
return matcher.Result(matcher.MatchRegex(itemStr))
|
||||
case matchers.BinaryMatcher:
|
||||
return matcher.Result(matcher.MatchBinary(itemStr))
|
||||
case matchers.DSLMatcher:
|
||||
return matcher.Result(matcher.MatchDSL(data))
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Extract performs extracting operation for a extractor on model and returns true or false.
|
||||
func (r *Request) Extract(data map[string]interface{}, extractor *extractors.Extractor) map[string]struct{} {
|
||||
part, ok := data[extractor.Part]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
partString := part.(string)
|
||||
|
||||
switch partString {
|
||||
case "body", "all":
|
||||
partString = "data"
|
||||
}
|
||||
|
||||
item, ok := data[partString]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
itemStr := types.ToString(item)
|
||||
|
||||
switch extractor.GetType() {
|
||||
case extractors.RegexExtractor:
|
||||
return extractor.ExtractRegex(itemStr)
|
||||
case extractors.KValExtractor:
|
||||
return extractor.ExtractKval(data)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// responseToDSLMap converts a DNS response to a map for use in DSL matching
|
||||
func (r *Request) responseToDSLMap(req, resp string, host, matched string) output.InternalEvent {
|
||||
data := make(output.InternalEvent, 4)
|
||||
|
||||
// Some data regarding the request metadata
|
||||
data["host"] = host
|
||||
data["matched"] = matched
|
||||
if r.options.Options.JSONRequests {
|
||||
data["request"] = req
|
||||
}
|
||||
data["data"] = resp
|
||||
data["template-id"] = r.options.TemplateID
|
||||
data["template-info"] = r.options.TemplateInfo
|
||||
return data
|
||||
}
|
||||
|
||||
// MakeResultEvent creates a result event from internal wrapped event
|
||||
func (r *Request) MakeResultEvent(wrapped *output.InternalWrappedEvent) []*output.ResultEvent {
|
||||
results := make([]*output.ResultEvent, 0, len(wrapped.OperatorsResult.Matches)+1)
|
||||
|
||||
// If we have multiple matchers with names, write each of them separately.
|
||||
if len(wrapped.OperatorsResult.Matches) > 0 {
|
||||
for k := range wrapped.OperatorsResult.Matches {
|
||||
data := r.makeResultEventItem(wrapped)
|
||||
data.MatcherName = k
|
||||
results = append(results, data)
|
||||
}
|
||||
} else if len(wrapped.OperatorsResult.Extracts) > 0 {
|
||||
for k, v := range wrapped.OperatorsResult.Extracts {
|
||||
data := r.makeResultEventItem(wrapped)
|
||||
data.ExtractedResults = v
|
||||
data.ExtractorName = k
|
||||
results = append(results, data)
|
||||
}
|
||||
} else {
|
||||
data := r.makeResultEventItem(wrapped)
|
||||
results = append(results, data)
|
||||
}
|
||||
return results
|
||||
}
|
||||
|
||||
func (r *Request) makeResultEventItem(wrapped *output.InternalWrappedEvent) *output.ResultEvent {
|
||||
data := &output.ResultEvent{
|
||||
TemplateID: wrapped.InternalEvent["template-id"].(string),
|
||||
Info: wrapped.InternalEvent["template-info"].(map[string]string),
|
||||
Type: "network",
|
||||
Host: wrapped.InternalEvent["host"].(string),
|
||||
Matched: wrapped.InternalEvent["matched"].(string),
|
||||
ExtractedResults: wrapped.OperatorsResult.OutputExtracts,
|
||||
IP: wrapped.InternalEvent["ip"].(string),
|
||||
}
|
||||
if r.options.Options.JSONRequests {
|
||||
data.Request = wrapped.InternalEvent["request"].(string)
|
||||
data.Response = wrapped.InternalEvent["data"].(string)
|
||||
}
|
||||
return data
|
||||
}
|
|
@ -0,0 +1,151 @@
|
|||
package network
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/projectdiscovery/gologger"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/output"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/protocols"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/protocols/common/replacer"
|
||||
)
|
||||
|
||||
var _ protocols.Request = &Request{}
|
||||
|
||||
// ExecuteWithResults executes the protocol requests and returns results instead of writing them.
|
||||
func (r *Request) ExecuteWithResults(input string, metadata output.InternalEvent, callback protocols.OutputEventCallback) error {
|
||||
address, err := getAddress(input)
|
||||
if err != nil {
|
||||
r.options.Output.Request(r.options.TemplateID, input, "network", err)
|
||||
r.options.Progress.DecrementRequests(1)
|
||||
return errors.Wrap(err, "could not get address from url")
|
||||
}
|
||||
|
||||
for _, kv := range r.addresses {
|
||||
replacer := replacer.New(map[string]interface{}{"Hostname": address})
|
||||
actualAddress := replacer.Replace(kv.key)
|
||||
if kv.value != "" {
|
||||
if strings.Contains(address, ":") {
|
||||
actualAddress, _, _ = net.SplitHostPort(actualAddress)
|
||||
}
|
||||
actualAddress = net.JoinHostPort(actualAddress, kv.value)
|
||||
}
|
||||
|
||||
err = r.executeAddress(actualAddress, address, input, callback)
|
||||
if err != nil {
|
||||
gologger.Verbose().Lable("ERR").Msgf("Could not make network request for %s: %s\n", actualAddress, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// executeAddress executes the request for an address
|
||||
func (r *Request) executeAddress(actualAddress, address, input string, callback protocols.OutputEventCallback) error {
|
||||
if !strings.Contains(actualAddress, ":") {
|
||||
err := errors.New("no port provided in network protocol request")
|
||||
r.options.Output.Request(r.options.TemplateID, address, "network", err)
|
||||
r.options.Progress.DecrementRequests(1)
|
||||
return err
|
||||
}
|
||||
|
||||
var hostname string
|
||||
if host, _, err := net.SplitHostPort(actualAddress); err == nil {
|
||||
hostname = host
|
||||
}
|
||||
|
||||
conn, err := r.dialer.Dial(context.Background(), "tcp", actualAddress)
|
||||
if err != nil {
|
||||
r.options.Output.Request(r.options.TemplateID, address, "network", err)
|
||||
r.options.Progress.DecrementRequests(1)
|
||||
return errors.Wrap(err, "could not connect to server request")
|
||||
}
|
||||
defer conn.Close()
|
||||
conn.SetReadDeadline(time.Now().Add(5 * time.Second))
|
||||
|
||||
reqBuilder := &strings.Builder{}
|
||||
for _, input := range r.Inputs {
|
||||
var data []byte
|
||||
|
||||
switch input.Type {
|
||||
case "hex":
|
||||
data, err = hex.DecodeString(input.Data)
|
||||
default:
|
||||
data = []byte(input.Data)
|
||||
}
|
||||
if err != nil {
|
||||
r.options.Output.Request(r.options.TemplateID, address, "network", err)
|
||||
r.options.Progress.DecrementRequests(1)
|
||||
return errors.Wrap(err, "could not write request to server")
|
||||
}
|
||||
reqBuilder.Grow(len(input.Data))
|
||||
reqBuilder.WriteString(input.Data)
|
||||
|
||||
_, err = conn.Write(data)
|
||||
if err != nil {
|
||||
r.options.Output.Request(r.options.TemplateID, address, "network", err)
|
||||
r.options.Progress.DecrementRequests(1)
|
||||
return errors.Wrap(err, "could not write request to server")
|
||||
}
|
||||
r.options.Progress.IncrementRequests()
|
||||
}
|
||||
if err != nil {
|
||||
r.options.Output.Request(r.options.TemplateID, address, "network", err)
|
||||
r.options.Progress.DecrementRequests(1)
|
||||
return errors.Wrap(err, "could not write request to server")
|
||||
}
|
||||
|
||||
if r.options.Options.Debug || r.options.Options.DebugRequests {
|
||||
gologger.Info().Str("address", actualAddress).Msgf("[%s] Dumped Network request for %s", r.options.TemplateID, actualAddress)
|
||||
|
||||
fmt.Fprintf(os.Stderr, "%s\n", reqBuilder.String())
|
||||
}
|
||||
|
||||
r.options.Output.Request(r.options.TemplateID, actualAddress, "network", err)
|
||||
gologger.Verbose().Msgf("Sent TCP request to %s", actualAddress)
|
||||
|
||||
bufferSize := 1024
|
||||
if r.ReadSize != 0 {
|
||||
bufferSize = r.ReadSize
|
||||
}
|
||||
buffer := make([]byte, bufferSize)
|
||||
n, _ := conn.Read(buffer)
|
||||
resp := string(buffer[:n])
|
||||
|
||||
if r.options.Options.Debug || r.options.Options.DebugResponse {
|
||||
gologger.Debug().Msgf("[%s] Dumped Network response for %s", r.options.TemplateID, actualAddress)
|
||||
fmt.Fprintf(os.Stderr, "%s\n", resp)
|
||||
}
|
||||
outputEvent := r.responseToDSLMap(reqBuilder.String(), resp, input, actualAddress)
|
||||
outputEvent["ip"] = r.dialer.GetDialedIP(hostname)
|
||||
|
||||
event := &output.InternalWrappedEvent{InternalEvent: outputEvent}
|
||||
if r.CompiledOperators != nil {
|
||||
result, ok := r.CompiledOperators.Execute(outputEvent, r.Match, r.Extract)
|
||||
if ok && result != nil {
|
||||
event.OperatorsResult = result
|
||||
event.Results = r.MakeResultEvent(event)
|
||||
}
|
||||
}
|
||||
callback(event)
|
||||
return nil
|
||||
}
|
||||
|
||||
// getAddress returns the address of the host to make request to
|
||||
func getAddress(toTest string) (string, error) {
|
||||
if strings.Contains(toTest, "://") {
|
||||
parsed, err := url.Parse(toTest)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
toTest = parsed.Host
|
||||
}
|
||||
return toTest, nil
|
||||
}
|
|
@ -0,0 +1,63 @@
|
|||
package protocols
|
||||
|
||||
import (
|
||||
"github.com/projectdiscovery/nuclei/v2/internal/progress"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/catalogue"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/operators/extractors"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/operators/matchers"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/output"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/projectfile"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/types"
|
||||
"go.uber.org/ratelimit"
|
||||
)
|
||||
|
||||
// Executer is an interface implemented any protocol based request executer.
|
||||
type Executer interface {
|
||||
// Compile compiles the execution generators preparing any requests possible.
|
||||
Compile() error
|
||||
// Requests returns the total number of requests the rule will perform
|
||||
Requests() int
|
||||
// Execute executes the protocol group and returns true or false if results were found.
|
||||
Execute(input string) (bool, error)
|
||||
// ExecuteWithResults executes the protocol requests and returns results instead of writing them.
|
||||
ExecuteWithResults(input string, callback OutputEventCallback) error
|
||||
}
|
||||
|
||||
// ExecuterOptions contains the configuration options for executer clients
|
||||
type ExecuterOptions struct {
|
||||
// TemplateID is the ID of the template for the request
|
||||
TemplateID string
|
||||
// TemplatePath is the path of the template for the request
|
||||
TemplatePath string
|
||||
// TemplateInfo contains information block of the template request
|
||||
TemplateInfo map[string]string
|
||||
// Output is a writer interface for writing output events from executer.
|
||||
Output output.Writer
|
||||
// Options contains configuration options for the executer.
|
||||
Options *types.Options
|
||||
// Progress is a progress client for scan reporting
|
||||
Progress *progress.Progress
|
||||
// RateLimiter is a rate-limiter for limiting sent number of requests.
|
||||
RateLimiter ratelimit.Limiter
|
||||
// Catalogue is a template catalogue implementation for nuclei
|
||||
Catalogue *catalogue.Catalogue
|
||||
// ProjectFile is the project file for nuclei
|
||||
ProjectFile *projectfile.ProjectFile
|
||||
}
|
||||
|
||||
// Request is an interface implemented any protocol based request generator.
|
||||
type Request interface {
|
||||
// Compile compiles the request generators preparing any requests possible.
|
||||
Compile(options *ExecuterOptions) error
|
||||
// Requests returns the total number of requests the rule will perform
|
||||
Requests() int
|
||||
// Match performs matching operation for a matcher on model and returns true or false.
|
||||
Match(data map[string]interface{}, matcher *matchers.Matcher) bool
|
||||
// Extract performs extracting operation for a extractor on model and returns true or false.
|
||||
Extract(data map[string]interface{}, matcher *extractors.Extractor) map[string]struct{}
|
||||
// ExecuteWithResults executes the protocol requests and returns results instead of writing them.
|
||||
ExecuteWithResults(input string, metadata output.InternalEvent, callback OutputEventCallback) error
|
||||
}
|
||||
|
||||
// OutputEventCallback is a callback event for any results found during scanning.
|
||||
type OutputEventCallback func(result *output.InternalWrappedEvent)
|
|
@ -1,531 +0,0 @@
|
|||
package requests
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Knetic/govaluate"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/extractors"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/generators"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/matchers"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/syncedreadcloser"
|
||||
"github.com/projectdiscovery/rawhttp"
|
||||
retryablehttp "github.com/projectdiscovery/retryablehttp-go"
|
||||
)
|
||||
|
||||
const (
|
||||
two = 2
|
||||
three = 3
|
||||
)
|
||||
|
||||
var urlWithPortRgx = regexp.MustCompile(`{{BaseURL}}:(\d+)`)
|
||||
|
||||
// BulkHTTPRequest contains a request to be made from a template
|
||||
type BulkHTTPRequest struct {
|
||||
// Path contains the path/s for the request
|
||||
Path []string `yaml:"path"`
|
||||
// Matchers contains the detection mechanism for the request to identify
|
||||
// whether the request was successful
|
||||
Matchers []*matchers.Matcher `yaml:"matchers,omitempty"`
|
||||
// Extractors contains the extraction mechanism for the request to identify
|
||||
// and extract parts of the response.
|
||||
Extractors []*extractors.Extractor `yaml:"extractors,omitempty"`
|
||||
// Raw contains raw requests
|
||||
Raw []string `yaml:"raw,omitempty"`
|
||||
Name string `yaml:"Name,omitempty"`
|
||||
// AttackType is the attack type
|
||||
// Sniper, PitchFork and ClusterBomb. Default is Sniper
|
||||
AttackType string `yaml:"attack,omitempty"`
|
||||
// Method is the request method, whether GET, POST, PUT, etc
|
||||
Method string `yaml:"method"`
|
||||
// Body is an optional parameter which contains the request body for POST methods, etc
|
||||
Body string `yaml:"body,omitempty"`
|
||||
// MatchersCondition is the condition of the matchers
|
||||
// whether to use AND or OR. Default is OR.
|
||||
MatchersCondition string `yaml:"matchers-condition,omitempty"`
|
||||
// attackType is internal attack type
|
||||
attackType generators.Type
|
||||
// Path contains the path/s for the request variables
|
||||
Payloads map[string]interface{} `yaml:"payloads,omitempty"`
|
||||
// Headers contains headers to send with the request
|
||||
Headers map[string]string `yaml:"headers,omitempty"`
|
||||
// matchersCondition is internal condition for the matchers.
|
||||
matchersCondition matchers.ConditionType
|
||||
// MaxRedirects is the maximum number of redirects that should be followed.
|
||||
MaxRedirects int `yaml:"max-redirects,omitempty"`
|
||||
PipelineConcurrentConnections int `yaml:"pipeline-concurrent-connections,omitempty"`
|
||||
PipelineRequestsPerConnection int `yaml:"pipeline-requests-per-connection,omitempty"`
|
||||
Threads int `yaml:"threads,omitempty"`
|
||||
// Internal Finite State Machine keeping track of scan process
|
||||
gsfm *GeneratorFSM
|
||||
// CookieReuse is an optional setting that makes cookies shared within requests
|
||||
CookieReuse bool `yaml:"cookie-reuse,omitempty"`
|
||||
// Redirects specifies whether redirects should be followed.
|
||||
Redirects bool `yaml:"redirects,omitempty"`
|
||||
// Pipeline defines if the attack should be performed with HTTP 1.1 Pipelining (race conditions/billions requests)
|
||||
// All requests must be indempotent (GET/POST)
|
||||
Pipeline bool `yaml:"pipeline,omitempty"`
|
||||
// Specify in order to skip request RFC normalization
|
||||
Unsafe bool `yaml:"unsafe,omitempty"`
|
||||
// DisableAutoHostname Enable/Disable Host header for unsafe raw requests
|
||||
DisableAutoHostname bool `yaml:"disable-automatic-host-header,omitempty"`
|
||||
// DisableAutoContentLength Enable/Disable Content-Length header for unsafe raw requests
|
||||
DisableAutoContentLength bool `yaml:"disable-automatic-content-length-header,omitempty"`
|
||||
// Race determines if all the request have to be attempted at the same time
|
||||
// The minimum number fof requests is determined by threads
|
||||
Race bool `yaml:"race,omitempty"`
|
||||
// Number of same request to send in race condition attack
|
||||
RaceNumberRequests int `yaml:"race_count,omitempty"`
|
||||
}
|
||||
|
||||
// GetMatchersCondition returns the condition for the matcher
|
||||
func (r *BulkHTTPRequest) GetMatchersCondition() matchers.ConditionType {
|
||||
return r.matchersCondition
|
||||
}
|
||||
|
||||
// SetMatchersCondition sets the condition for the matcher
|
||||
func (r *BulkHTTPRequest) SetMatchersCondition(condition matchers.ConditionType) {
|
||||
r.matchersCondition = condition
|
||||
}
|
||||
|
||||
// GetAttackType returns the attack
|
||||
func (r *BulkHTTPRequest) GetAttackType() generators.Type {
|
||||
return r.attackType
|
||||
}
|
||||
|
||||
// SetAttackType sets the attack
|
||||
func (r *BulkHTTPRequest) SetAttackType(attack generators.Type) {
|
||||
r.attackType = attack
|
||||
}
|
||||
|
||||
// GetRequestCount returns the total number of requests the YAML rule will perform
|
||||
func (r *BulkHTTPRequest) GetRequestCount() int64 {
|
||||
return int64(r.gsfm.Total())
|
||||
}
|
||||
|
||||
// MakeHTTPRequest makes the HTTP request
|
||||
func (r *BulkHTTPRequest) MakeHTTPRequest(baseURL string, dynamicValues map[string]interface{}, data string) (*HTTPRequest, error) {
|
||||
ctx := context.Background()
|
||||
|
||||
parsed, err := url.Parse(baseURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hostname := parsed.Host
|
||||
|
||||
values := generators.MergeMaps(dynamicValues, map[string]interface{}{
|
||||
"BaseURL": baseURLWithTemplatePrefs(data, parsed),
|
||||
"Hostname": hostname,
|
||||
})
|
||||
|
||||
// if data contains \n it's a raw request
|
||||
if strings.Contains(data, "\n") {
|
||||
return r.makeHTTPRequestFromRaw(ctx, baseURL, data, values)
|
||||
}
|
||||
return r.makeHTTPRequestFromModel(ctx, data, values)
|
||||
}
|
||||
|
||||
// MakeHTTPRequestFromModel creates a *http.Request from a request template
|
||||
func (r *BulkHTTPRequest) makeHTTPRequestFromModel(ctx context.Context, data string, values map[string]interface{}) (*HTTPRequest, error) {
|
||||
replacer := newReplacer(values)
|
||||
URL := replacer.Replace(data)
|
||||
|
||||
// Build a request on the specified URL
|
||||
req, err := http.NewRequestWithContext(ctx, r.Method, URL, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
request, err := r.fillRequest(req, values)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &HTTPRequest{Request: request}, nil
|
||||
}
|
||||
|
||||
// InitGenerator initializes the generator
|
||||
func (r *BulkHTTPRequest) InitGenerator() {
|
||||
r.gsfm = NewGeneratorFSM(r.attackType, r.Payloads, r.Path, r.Raw)
|
||||
}
|
||||
|
||||
// CreateGenerator creates the generator
|
||||
func (r *BulkHTTPRequest) CreateGenerator(reqURL string) {
|
||||
r.gsfm.Add(reqURL)
|
||||
}
|
||||
|
||||
// HasGenerator check if an URL has a generator
|
||||
func (r *BulkHTTPRequest) HasGenerator(reqURL string) bool {
|
||||
return r.gsfm.Has(reqURL)
|
||||
}
|
||||
|
||||
// ReadOne reads and return a generator by URL
|
||||
func (r *BulkHTTPRequest) ReadOne(reqURL string) {
|
||||
r.gsfm.ReadOne(reqURL)
|
||||
}
|
||||
|
||||
// makeHTTPRequestFromRaw creates a *http.Request from a raw request
|
||||
func (r *BulkHTTPRequest) makeHTTPRequestFromRaw(ctx context.Context, baseURL, data string, values map[string]interface{}) (*HTTPRequest, error) {
|
||||
// Add trailing line
|
||||
data += "\n"
|
||||
|
||||
if len(r.Payloads) > 0 {
|
||||
r.gsfm.InitOrSkip(baseURL)
|
||||
r.ReadOne(baseURL)
|
||||
|
||||
payloads, err := r.GetPayloadsValues(baseURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return r.handleRawWithPaylods(ctx, data, baseURL, values, payloads)
|
||||
}
|
||||
|
||||
// otherwise continue with normal flow
|
||||
return r.handleRawWithPaylods(ctx, data, baseURL, values, nil)
|
||||
}
|
||||
|
||||
func (r *BulkHTTPRequest) handleRawWithPaylods(ctx context.Context, raw, baseURL string, values, genValues map[string]interface{}) (*HTTPRequest, error) {
|
||||
baseValues := generators.CopyMap(values)
|
||||
finValues := generators.MergeMaps(baseValues, genValues)
|
||||
|
||||
replacer := newReplacer(finValues)
|
||||
|
||||
// Replace the dynamic variables in the URL if any
|
||||
raw = replacer.Replace(raw)
|
||||
|
||||
dynamicValues := make(map[string]interface{})
|
||||
// find all potentials tokens between {{}}
|
||||
var re = regexp.MustCompile(`(?m)\{\{[^}]+\}\}`)
|
||||
for _, match := range re.FindAllString(raw, -1) {
|
||||
// check if the match contains a dynamic variable
|
||||
expr := generators.TrimDelimiters(match)
|
||||
compiled, err := govaluate.NewEvaluableExpressionWithFunctions(expr, generators.HelperFunctions())
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result, err := compiled.Evaluate(finValues)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dynamicValues[expr] = result
|
||||
}
|
||||
|
||||
// replace dynamic values
|
||||
dynamicReplacer := newReplacer(dynamicValues)
|
||||
raw = dynamicReplacer.Replace(raw)
|
||||
|
||||
rawRequest, err := r.parseRawRequest(raw, baseURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// rawhttp
|
||||
if r.Unsafe {
|
||||
unsafeReq := &HTTPRequest{
|
||||
RawRequest: rawRequest,
|
||||
Meta: genValues,
|
||||
AutomaticHostHeader: !r.DisableAutoHostname,
|
||||
AutomaticContentLengthHeader: !r.DisableAutoContentLength,
|
||||
Unsafe: true,
|
||||
FollowRedirects: r.Redirects,
|
||||
}
|
||||
return unsafeReq, nil
|
||||
}
|
||||
|
||||
// retryablehttp
|
||||
var body io.ReadCloser
|
||||
body = ioutil.NopCloser(strings.NewReader(rawRequest.Data))
|
||||
if r.Race {
|
||||
// More or less this ensures that all requests hit the endpoint at the same approximated time
|
||||
// Todo: sync internally upon writing latest request byte
|
||||
body = syncedreadcloser.NewOpenGateWithTimeout(body, time.Duration(two)*time.Second)
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, rawRequest.Method, rawRequest.FullURL, body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// copy headers
|
||||
for key, value := range rawRequest.Headers {
|
||||
req.Header[key] = []string{value}
|
||||
}
|
||||
|
||||
request, err := r.fillRequest(req, values)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &HTTPRequest{Request: request, Meta: genValues}, nil
|
||||
}
|
||||
|
||||
func (r *BulkHTTPRequest) fillRequest(req *http.Request, values map[string]interface{}) (*retryablehttp.Request, error) {
|
||||
replacer := newReplacer(values)
|
||||
// Set the header values requested
|
||||
for header, value := range r.Headers {
|
||||
req.Header[header] = []string{replacer.Replace(value)}
|
||||
}
|
||||
|
||||
// In case of multiple threads the underlying connection should remain open to allow reuse
|
||||
if r.Threads <= 0 && req.Header.Get("Connection") == "" {
|
||||
req.Close = true
|
||||
}
|
||||
|
||||
// Check if the user requested a request body
|
||||
if r.Body != "" {
|
||||
req.Body = ioutil.NopCloser(strings.NewReader(r.Body))
|
||||
}
|
||||
|
||||
setHeader(req, "User-Agent", "Nuclei - Open-source project (github.com/projectdiscovery/nuclei)")
|
||||
|
||||
// raw requests are left untouched
|
||||
if len(r.Raw) > 0 {
|
||||
return retryablehttp.FromRequest(req)
|
||||
}
|
||||
|
||||
setHeader(req, "Accept", "*/*")
|
||||
setHeader(req, "Accept-Language", "en")
|
||||
|
||||
return retryablehttp.FromRequest(req)
|
||||
}
|
||||
|
||||
// HTTPRequest is the basic HTTP request
|
||||
type HTTPRequest struct {
|
||||
Request *retryablehttp.Request
|
||||
RawRequest *RawRequest
|
||||
Meta map[string]interface{}
|
||||
|
||||
// flags
|
||||
Unsafe bool
|
||||
Pipeline bool
|
||||
AutomaticHostHeader bool
|
||||
AutomaticContentLengthHeader bool
|
||||
AutomaticConnectionHeader bool
|
||||
FollowRedirects bool
|
||||
Rawclient *rawhttp.Client
|
||||
Httpclient *retryablehttp.Client
|
||||
PipelineClient *rawhttp.PipelineClient
|
||||
}
|
||||
|
||||
func setHeader(req *http.Request, name, value string) {
|
||||
// Set some headers only if the header wasn't supplied by the user
|
||||
if _, ok := req.Header[name]; !ok {
|
||||
req.Header.Set(name, value)
|
||||
}
|
||||
}
|
||||
|
||||
// baseURLWithTemplatePrefs returns the url for BaseURL keeping
|
||||
// the template port and path preference
|
||||
func baseURLWithTemplatePrefs(data string, parsedURL *url.URL) string {
|
||||
// template port preference over input URL port
|
||||
// template has port
|
||||
hasPort := len(urlWithPortRgx.FindStringSubmatch(data)) > 0
|
||||
if hasPort {
|
||||
// check if also the input contains port, in this case extracts the url
|
||||
if hostname, _, err := net.SplitHostPort(parsedURL.Host); err == nil {
|
||||
parsedURL.Host = hostname
|
||||
}
|
||||
}
|
||||
|
||||
return parsedURL.String()
|
||||
}
|
||||
|
||||
// CustomHeaders valid for all requests
|
||||
type CustomHeaders []string
|
||||
|
||||
// String returns just a label
|
||||
func (c *CustomHeaders) String() string {
|
||||
return "Custom Global Headers"
|
||||
}
|
||||
|
||||
// Set a new global header
|
||||
func (c *CustomHeaders) Set(value string) error {
|
||||
*c = append(*c, value)
|
||||
return nil
|
||||
}
|
||||
|
||||
// RawRequest defines a basic HTTP raw request
|
||||
type RawRequest struct {
|
||||
FullURL string
|
||||
Method string
|
||||
Path string
|
||||
Data string
|
||||
Headers map[string]string
|
||||
}
|
||||
|
||||
// parseRawRequest parses the raw request as supplied by the user
|
||||
func (r *BulkHTTPRequest) parseRawRequest(request, baseURL string) (*RawRequest, error) {
|
||||
reader := bufio.NewReader(strings.NewReader(request))
|
||||
|
||||
rawRequest := RawRequest{
|
||||
Headers: make(map[string]string),
|
||||
}
|
||||
|
||||
s, err := reader.ReadString('\n')
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not read request: %s", err)
|
||||
}
|
||||
|
||||
parts := strings.Split(s, " ")
|
||||
|
||||
if len(parts) < three {
|
||||
return nil, fmt.Errorf("malformed request supplied")
|
||||
}
|
||||
// Set the request Method
|
||||
rawRequest.Method = parts[0]
|
||||
|
||||
// Accepts all malformed headers
|
||||
var key, value string
|
||||
for {
|
||||
line, readErr := reader.ReadString('\n')
|
||||
line = strings.TrimSpace(line)
|
||||
|
||||
if readErr != nil || line == "" {
|
||||
break
|
||||
}
|
||||
|
||||
p := strings.SplitN(line, ":", two)
|
||||
key = p[0]
|
||||
if len(p) > 1 {
|
||||
value = p[1]
|
||||
}
|
||||
|
||||
// in case of unsafe requests multiple headers should be accepted
|
||||
// therefore use the full line as key
|
||||
_, found := rawRequest.Headers[key]
|
||||
if r.Unsafe && found {
|
||||
rawRequest.Headers[line] = ""
|
||||
} else {
|
||||
rawRequest.Headers[key] = value
|
||||
}
|
||||
}
|
||||
|
||||
// Handle case with the full http url in path. In that case,
|
||||
// ignore any host header that we encounter and use the path as request URL
|
||||
if !r.Unsafe && strings.HasPrefix(parts[1], "http") {
|
||||
parsed, parseErr := url.Parse(parts[1])
|
||||
if parseErr != nil {
|
||||
return nil, fmt.Errorf("could not parse request URL: %s", parseErr)
|
||||
}
|
||||
|
||||
rawRequest.Path = parts[1]
|
||||
rawRequest.Headers["Host"] = parsed.Host
|
||||
} else {
|
||||
rawRequest.Path = parts[1]
|
||||
}
|
||||
|
||||
// If raw request doesn't have a Host header and/ path,
|
||||
// this will be generated from the parsed baseURL
|
||||
parsedURL, err := url.Parse(baseURL)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not parse request URL: %s", err)
|
||||
}
|
||||
|
||||
var hostURL string
|
||||
if rawRequest.Headers["Host"] == "" {
|
||||
hostURL = parsedURL.Host
|
||||
} else {
|
||||
hostURL = rawRequest.Headers["Host"]
|
||||
}
|
||||
|
||||
if rawRequest.Path == "" {
|
||||
rawRequest.Path = parsedURL.Path
|
||||
} else if strings.HasPrefix(rawRequest.Path, "?") {
|
||||
// requests generated from http.ReadRequest have incorrect RequestURI, so they
|
||||
// cannot be used to perform another request directly, we need to generate a new one
|
||||
// with the new target url
|
||||
rawRequest.Path = fmt.Sprintf("%s%s", parsedURL.Path, rawRequest.Path)
|
||||
}
|
||||
|
||||
rawRequest.FullURL = fmt.Sprintf("%s://%s%s", parsedURL.Scheme, strings.TrimSpace(hostURL), rawRequest.Path)
|
||||
|
||||
// Set the request body
|
||||
b, err := ioutil.ReadAll(reader)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not read request body: %s", err)
|
||||
}
|
||||
|
||||
rawRequest.Data = string(b)
|
||||
|
||||
return &rawRequest, nil
|
||||
}
|
||||
|
||||
// Next returns the next generator by URL
|
||||
func (r *BulkHTTPRequest) Next(reqURL string) bool {
|
||||
return r.gsfm.Next(reqURL)
|
||||
}
|
||||
|
||||
// Position returns the current generator's position by URL
|
||||
func (r *BulkHTTPRequest) Position(reqURL string) int {
|
||||
return r.gsfm.Position(reqURL)
|
||||
}
|
||||
|
||||
// Reset resets the generator by URL
|
||||
func (r *BulkHTTPRequest) Reset(reqURL string) {
|
||||
r.gsfm.Reset(reqURL)
|
||||
}
|
||||
|
||||
// Current returns the current generator by URL
|
||||
func (r *BulkHTTPRequest) Current(reqURL string) string {
|
||||
return r.gsfm.Current(reqURL)
|
||||
}
|
||||
|
||||
// Total is the total number of requests
|
||||
func (r *BulkHTTPRequest) Total() int {
|
||||
return r.gsfm.Total()
|
||||
}
|
||||
|
||||
// Increment increments the processed request
|
||||
func (r *BulkHTTPRequest) Increment(reqURL string) {
|
||||
r.gsfm.Increment(reqURL)
|
||||
}
|
||||
|
||||
// GetPayloadsValues for the specified URL
|
||||
func (r *BulkHTTPRequest) GetPayloadsValues(reqURL string) (map[string]interface{}, error) {
|
||||
payloadProcessedValues := make(map[string]interface{})
|
||||
payloadsFromTemplate := r.gsfm.Value(reqURL)
|
||||
for k, v := range payloadsFromTemplate {
|
||||
kexp := v.(string)
|
||||
// if it doesn't containing markups, we just continue
|
||||
if !hasMarker(kexp) {
|
||||
payloadProcessedValues[k] = v
|
||||
continue
|
||||
}
|
||||
// attempts to expand expressions
|
||||
compiled, err := govaluate.NewEvaluableExpressionWithFunctions(kexp, generators.HelperFunctions())
|
||||
if err != nil {
|
||||
// it is a simple literal payload => proceed with literal value
|
||||
payloadProcessedValues[k] = v
|
||||
continue
|
||||
}
|
||||
// it is an expression - try to solve it
|
||||
expValue, err := compiled.Evaluate(payloadsFromTemplate)
|
||||
if err != nil {
|
||||
// an error occurred => proceed with literal value
|
||||
payloadProcessedValues[k] = v
|
||||
continue
|
||||
}
|
||||
payloadProcessedValues[k] = fmt.Sprint(expValue)
|
||||
}
|
||||
var err error
|
||||
if len(payloadProcessedValues) == 0 {
|
||||
err = ErrNoPayload
|
||||
}
|
||||
return payloadProcessedValues, err
|
||||
}
|
||||
|
||||
// ErrNoPayload error to avoid the additional base null request
|
||||
var ErrNoPayload = fmt.Errorf("no payload found")
|
|
@ -1,121 +0,0 @@
|
|||
package requests
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/miekg/dns"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/extractors"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/matchers"
|
||||
)
|
||||
|
||||
// DNSRequest contains a request to be made from a template
|
||||
type DNSRequest struct {
|
||||
Recursion bool `yaml:"recursion"`
|
||||
// Path contains the path/s for the request
|
||||
Name string `yaml:"name"`
|
||||
Type string `yaml:"type"`
|
||||
Class string `yaml:"class"`
|
||||
Retries int `yaml:"retries"`
|
||||
// Raw contains a raw request
|
||||
Raw string `yaml:"raw,omitempty"`
|
||||
|
||||
// Matchers contains the detection mechanism for the request to identify
|
||||
// whether the request was successful
|
||||
Matchers []*matchers.Matcher `yaml:"matchers,omitempty"`
|
||||
// matchersCondition is internal condition for the matchers.
|
||||
matchersCondition matchers.ConditionType
|
||||
// MatchersCondition is the condition of the matchers
|
||||
// whether to use AND or OR. Default is OR.
|
||||
MatchersCondition string `yaml:"matchers-condition,omitempty"`
|
||||
// Extractors contains the extraction mechanism for the request to identify
|
||||
// and extract parts of the response.
|
||||
Extractors []*extractors.Extractor `yaml:"extractors,omitempty"`
|
||||
}
|
||||
|
||||
// GetMatchersCondition returns the condition for the matcher
|
||||
func (r *DNSRequest) GetMatchersCondition() matchers.ConditionType {
|
||||
return r.matchersCondition
|
||||
}
|
||||
|
||||
// SetMatchersCondition sets the condition for the matcher
|
||||
func (r *DNSRequest) SetMatchersCondition(condition matchers.ConditionType) {
|
||||
r.matchersCondition = condition
|
||||
}
|
||||
|
||||
// Returns the total number of requests the YAML rule will perform
|
||||
func (r *DNSRequest) GetRequestCount() int64 {
|
||||
return 1
|
||||
}
|
||||
|
||||
// MakeDNSRequest creates a *dns.Request from a request template
|
||||
func (r *DNSRequest) MakeDNSRequest(domain string) (*dns.Msg, error) {
|
||||
domain = dns.Fqdn(domain)
|
||||
|
||||
// Build a request on the specified URL
|
||||
req := new(dns.Msg)
|
||||
req.Id = dns.Id()
|
||||
req.RecursionDesired = r.Recursion
|
||||
|
||||
var q dns.Question
|
||||
|
||||
replacer := newReplacer(map[string]interface{}{"FQDN": domain})
|
||||
|
||||
q.Name = dns.Fqdn(replacer.Replace(r.Name))
|
||||
q.Qclass = toQClass(r.Class)
|
||||
q.Qtype = toQType(r.Type)
|
||||
|
||||
req.Question = append(req.Question, q)
|
||||
|
||||
return req, nil
|
||||
}
|
||||
|
||||
func toQType(ttype string) (rtype uint16) {
|
||||
ttype = strings.TrimSpace(strings.ToUpper(ttype))
|
||||
|
||||
switch ttype {
|
||||
case "A":
|
||||
rtype = dns.TypeA
|
||||
case "NS":
|
||||
rtype = dns.TypeNS
|
||||
case "CNAME":
|
||||
rtype = dns.TypeCNAME
|
||||
case "SOA":
|
||||
rtype = dns.TypeSOA
|
||||
case "PTR":
|
||||
rtype = dns.TypePTR
|
||||
case "MX":
|
||||
rtype = dns.TypeMX
|
||||
case "TXT":
|
||||
rtype = dns.TypeTXT
|
||||
case "AAAA":
|
||||
rtype = dns.TypeAAAA
|
||||
default:
|
||||
rtype = dns.TypeA
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func toQClass(tclass string) (rclass uint16) {
|
||||
tclass = strings.TrimSpace(strings.ToUpper(tclass))
|
||||
|
||||
switch tclass {
|
||||
case "INET":
|
||||
rclass = dns.ClassINET
|
||||
case "CSNET":
|
||||
rclass = dns.ClassCSNET
|
||||
case "CHAOS":
|
||||
rclass = dns.ClassCHAOS
|
||||
case "HESIOD":
|
||||
rclass = dns.ClassHESIOD
|
||||
case "NONE":
|
||||
rclass = dns.ClassNONE
|
||||
case "ANY":
|
||||
rclass = dns.ClassANY
|
||||
default:
|
||||
// Use INET by default.
|
||||
rclass = dns.ClassINET
|
||||
}
|
||||
|
||||
return
|
||||
}
|
|
@ -1,3 +0,0 @@
|
|||
// Package requests implements requests for templates that
|
||||
// will be sent to hosts.
|
||||
package requests
|
|
@ -1,21 +0,0 @@
|
|||
package requests
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"net/http/httputil"
|
||||
"strings"
|
||||
|
||||
"github.com/projectdiscovery/rawhttp"
|
||||
)
|
||||
|
||||
func Dump(req *HTTPRequest, reqURL string) ([]byte, error) {
|
||||
if req.Request != nil {
|
||||
// Create a copy on the fly of the request body - ignore errors
|
||||
bodyBytes, _ := req.Request.BodyBytes()
|
||||
req.Request.Request.Body = ioutil.NopCloser(bytes.NewReader(bodyBytes))
|
||||
return httputil.DumpRequest(req.Request.Request, true)
|
||||
}
|
||||
|
||||
return rawhttp.DumpRequestRaw(req.RawRequest.Method, reqURL, req.RawRequest.Path, ExpandMapValues(req.RawRequest.Headers), ioutil.NopCloser(strings.NewReader(req.RawRequest.Data)))
|
||||
}
|
|
@ -1,274 +0,0 @@
|
|||
package requests
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/generators"
|
||||
)
|
||||
|
||||
type GeneratorState int
|
||||
|
||||
const (
|
||||
fifteen = 15
|
||||
initial GeneratorState = iota
|
||||
running
|
||||
done
|
||||
)
|
||||
|
||||
type Generator struct {
|
||||
sync.RWMutex
|
||||
positionPath int
|
||||
positionRaw int
|
||||
gchan chan map[string]interface{}
|
||||
currentGeneratorValue map[string]interface{}
|
||||
state GeneratorState
|
||||
}
|
||||
|
||||
type GeneratorFSM struct {
|
||||
sync.RWMutex
|
||||
payloads map[string]interface{}
|
||||
basePayloads map[string][]string
|
||||
generator func(payloads map[string][]string) (out chan map[string]interface{})
|
||||
Generators map[string]*Generator
|
||||
Type generators.Type
|
||||
Paths []string
|
||||
Raws []string
|
||||
}
|
||||
|
||||
func NewGeneratorFSM(typ generators.Type, payloads map[string]interface{}, paths, raws []string) *GeneratorFSM {
|
||||
var gsfm GeneratorFSM
|
||||
gsfm.payloads = payloads
|
||||
gsfm.Paths = paths
|
||||
gsfm.Raws = raws
|
||||
gsfm.Type = typ
|
||||
|
||||
if len(gsfm.payloads) > 0 {
|
||||
// load payloads if not already done
|
||||
if gsfm.basePayloads == nil {
|
||||
gsfm.basePayloads = generators.LoadPayloads(gsfm.payloads)
|
||||
}
|
||||
|
||||
generatorFunc := generators.SniperGenerator
|
||||
|
||||
switch typ {
|
||||
case generators.PitchFork:
|
||||
generatorFunc = generators.PitchforkGenerator
|
||||
case generators.ClusterBomb:
|
||||
generatorFunc = generators.ClusterbombGenerator
|
||||
case generators.Sniper:
|
||||
generatorFunc = generators.SniperGenerator
|
||||
}
|
||||
|
||||
gsfm.generator = generatorFunc
|
||||
}
|
||||
|
||||
gsfm.Generators = make(map[string]*Generator)
|
||||
|
||||
return &gsfm
|
||||
}
|
||||
|
||||
func (gfsm *GeneratorFSM) Add(key string) {
|
||||
gfsm.Lock()
|
||||
defer gfsm.Unlock()
|
||||
|
||||
if _, ok := gfsm.Generators[key]; !ok {
|
||||
gfsm.Generators[key] = &Generator{state: initial}
|
||||
}
|
||||
}
|
||||
|
||||
func (gfsm *GeneratorFSM) Has(key string) bool {
|
||||
gfsm.RLock()
|
||||
defer gfsm.RUnlock()
|
||||
|
||||
_, ok := gfsm.Generators[key]
|
||||
|
||||
return ok
|
||||
}
|
||||
|
||||
func (gfsm *GeneratorFSM) Delete(key string) {
|
||||
gfsm.Lock()
|
||||
defer gfsm.Unlock()
|
||||
|
||||
delete(gfsm.Generators, key)
|
||||
}
|
||||
|
||||
func (gfsm *GeneratorFSM) ReadOne(key string) {
|
||||
gfsm.RLock()
|
||||
defer gfsm.RUnlock()
|
||||
g, ok := gfsm.Generators[key]
|
||||
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
for afterCh := time.After(fifteen * time.Second); ; {
|
||||
select {
|
||||
// got a value
|
||||
case curGenValue, ok := <-g.gchan:
|
||||
if !ok {
|
||||
g.Lock()
|
||||
g.gchan = nil
|
||||
g.state = done
|
||||
g.currentGeneratorValue = nil
|
||||
g.Unlock()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
g.currentGeneratorValue = curGenValue
|
||||
|
||||
return
|
||||
// timeout
|
||||
case <-afterCh:
|
||||
g.Lock()
|
||||
g.gchan = nil
|
||||
g.state = done
|
||||
g.Unlock()
|
||||
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (gfsm *GeneratorFSM) InitOrSkip(key string) {
|
||||
gfsm.RLock()
|
||||
defer gfsm.RUnlock()
|
||||
|
||||
g, ok := gfsm.Generators[key]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
if len(gfsm.payloads) > 0 {
|
||||
g.Lock()
|
||||
defer g.Unlock()
|
||||
|
||||
if g.gchan == nil {
|
||||
g.gchan = gfsm.generator(gfsm.basePayloads)
|
||||
g.state = running
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (gfsm *GeneratorFSM) Value(key string) map[string]interface{} {
|
||||
gfsm.RLock()
|
||||
defer gfsm.RUnlock()
|
||||
|
||||
g, ok := gfsm.Generators[key]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
return g.currentGeneratorValue
|
||||
}
|
||||
|
||||
func (gfsm *GeneratorFSM) Next(key string) bool {
|
||||
gfsm.RLock()
|
||||
defer gfsm.RUnlock()
|
||||
|
||||
g, ok := gfsm.Generators[key]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
if g.positionPath+g.positionRaw >= len(gfsm.Paths)+len(gfsm.Raws) {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (gfsm *GeneratorFSM) Position(key string) int {
|
||||
gfsm.RLock()
|
||||
defer gfsm.RUnlock()
|
||||
|
||||
g, ok := gfsm.Generators[key]
|
||||
if !ok {
|
||||
return 0
|
||||
}
|
||||
|
||||
return g.positionPath + g.positionRaw
|
||||
}
|
||||
|
||||
func (gfsm *GeneratorFSM) Reset(key string) {
|
||||
gfsm.Lock()
|
||||
defer gfsm.Unlock()
|
||||
|
||||
if !gfsm.Has(key) {
|
||||
gfsm.Add(key)
|
||||
}
|
||||
|
||||
g, ok := gfsm.Generators[key]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
g.positionPath = 0
|
||||
g.positionRaw = 0
|
||||
}
|
||||
|
||||
func (gfsm *GeneratorFSM) Current(key string) string {
|
||||
gfsm.RLock()
|
||||
defer gfsm.RUnlock()
|
||||
|
||||
g, ok := gfsm.Generators[key]
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
|
||||
if g.positionPath < len(gfsm.Paths) && len(gfsm.Paths) != 0 {
|
||||
return gfsm.Paths[g.positionPath]
|
||||
}
|
||||
|
||||
return gfsm.Raws[g.positionRaw]
|
||||
}
|
||||
func (gfsm *GeneratorFSM) Total() int {
|
||||
estimatedRequestsWithPayload := 0
|
||||
if len(gfsm.basePayloads) > 0 {
|
||||
switch gfsm.Type {
|
||||
case generators.Sniper:
|
||||
for _, kv := range gfsm.basePayloads {
|
||||
estimatedRequestsWithPayload += len(kv)
|
||||
}
|
||||
case generators.PitchFork:
|
||||
// Positional so it's equal to the length of one list
|
||||
for _, kv := range gfsm.basePayloads {
|
||||
estimatedRequestsWithPayload += len(kv)
|
||||
break
|
||||
}
|
||||
case generators.ClusterBomb:
|
||||
// Total of combinations => rule of product
|
||||
prod := 1
|
||||
for _, kv := range gfsm.basePayloads {
|
||||
prod *= len(kv)
|
||||
}
|
||||
estimatedRequestsWithPayload += prod
|
||||
}
|
||||
}
|
||||
|
||||
return len(gfsm.Paths) + len(gfsm.Raws) + estimatedRequestsWithPayload
|
||||
}
|
||||
|
||||
func (gfsm *GeneratorFSM) Increment(key string) {
|
||||
gfsm.Lock()
|
||||
defer gfsm.Unlock()
|
||||
|
||||
g, ok := gfsm.Generators[key]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
if len(gfsm.Paths) > 0 && g.positionPath < len(gfsm.Paths) {
|
||||
g.positionPath++
|
||||
return
|
||||
}
|
||||
|
||||
if len(gfsm.Raws) > 0 && g.positionRaw < len(gfsm.Raws) {
|
||||
// if we have payloads increment only when the generators are done
|
||||
if g.gchan == nil {
|
||||
g.state = done
|
||||
g.positionRaw++
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,77 +0,0 @@
|
|||
package requests
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
markerParenthesisOpen = "{{"
|
||||
markerParenthesisClose = "}}"
|
||||
markerGeneral = "§"
|
||||
)
|
||||
|
||||
func newReplacer(values map[string]interface{}) *strings.Replacer {
|
||||
var replacerItems []string
|
||||
for key, val := range values {
|
||||
replacerItems = append(
|
||||
replacerItems,
|
||||
fmt.Sprintf("%s%s%s", markerParenthesisOpen, key, markerParenthesisClose),
|
||||
fmt.Sprintf("%s", val),
|
||||
fmt.Sprintf("%s%s%s", markerGeneral, key, markerGeneral),
|
||||
fmt.Sprintf("%s", val),
|
||||
)
|
||||
}
|
||||
|
||||
return strings.NewReplacer(replacerItems...)
|
||||
}
|
||||
|
||||
// HandleDecompression if the user specified a custom encoding (as golang transport doesn't do this automatically)
|
||||
func HandleDecompression(r *HTTPRequest, bodyOrig []byte) (bodyDec []byte, err error) {
|
||||
if r.Request == nil {
|
||||
return bodyOrig, nil
|
||||
}
|
||||
|
||||
encodingHeader := strings.TrimSpace(strings.ToLower(r.Request.Header.Get("Accept-Encoding")))
|
||||
if encodingHeader == "gzip" || encodingHeader == "gzip, deflate" {
|
||||
gzipreader, err := gzip.NewReader(bytes.NewReader(bodyOrig))
|
||||
if err != nil {
|
||||
return bodyDec, err
|
||||
}
|
||||
defer gzipreader.Close()
|
||||
|
||||
bodyDec, err = ioutil.ReadAll(gzipreader)
|
||||
if err != nil {
|
||||
return bodyDec, err
|
||||
}
|
||||
|
||||
return bodyDec, nil
|
||||
}
|
||||
|
||||
return bodyOrig, nil
|
||||
}
|
||||
|
||||
// ZipMapValues converts values from strings slices to flat string
|
||||
func ZipMapValues(m map[string][]string) (m1 map[string]string) {
|
||||
m1 = make(map[string]string)
|
||||
for k, v := range m {
|
||||
m1[k] = strings.Join(v, "")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ExpandMapValues converts values from flat string to strings slice
|
||||
func ExpandMapValues(m map[string]string) (m1 map[string][]string) {
|
||||
m1 = make(map[string][]string)
|
||||
for k, v := range m {
|
||||
m1[k] = []string{v}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func hasMarker(s string) bool {
|
||||
return strings.Contains(s, markerParenthesisOpen) || strings.Contains(s, markerParenthesisClose) || strings.Contains(s, markerGeneral)
|
||||
}
|
|
@ -3,19 +3,19 @@ package templates
|
|||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/generators"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/matchers"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/protocols"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/protocols/common/executer"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/workflows"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
// Parse parses a yaml request template file
|
||||
func Parse(file string) (*Template, error) {
|
||||
func Parse(filePath string, options *protocols.ExecuterOptions) (*Template, error) {
|
||||
template := &Template{}
|
||||
|
||||
f, err := os.Open(file)
|
||||
f, err := os.Open(filePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -26,108 +26,114 @@ func Parse(file string) (*Template, error) {
|
|||
}
|
||||
defer f.Close()
|
||||
|
||||
template.path = file
|
||||
// Setting up variables regarding template metadata
|
||||
options.TemplateID = template.ID
|
||||
options.TemplateInfo = template.Info
|
||||
options.TemplatePath = filePath
|
||||
|
||||
// If no requests, and it is also not a workflow, return error.
|
||||
if len(template.BulkRequestsHTTP)+len(template.RequestsDNS) <= 0 {
|
||||
if len(template.RequestsDNS)+len(template.RequestsHTTP)+len(template.RequestsFile)+len(template.RequestsNetwork)+len(template.Workflows) == 0 {
|
||||
return nil, fmt.Errorf("no requests defined for %s", template.ID)
|
||||
}
|
||||
|
||||
// Compile the matchers and the extractors for http requests
|
||||
for _, request := range template.BulkRequestsHTTP {
|
||||
// Get the condition between the matchers
|
||||
condition, ok := matchers.ConditionTypes[request.MatchersCondition]
|
||||
if !ok {
|
||||
request.SetMatchersCondition(matchers.ORCondition)
|
||||
} else {
|
||||
request.SetMatchersCondition(condition)
|
||||
// Compile the workflow request
|
||||
if len(template.Workflows) > 0 {
|
||||
compiled := &template.Workflow
|
||||
if err := template.compileWorkflow(options, compiled); err != nil {
|
||||
return nil, errors.Wrap(err, "could not compile workflow")
|
||||
}
|
||||
template.Workflow.Compile(options)
|
||||
template.CompiledWorkflow = compiled
|
||||
}
|
||||
|
||||
// Set the attack type - used only in raw requests
|
||||
attack, ok := generators.AttackTypes[request.AttackType]
|
||||
if !ok {
|
||||
request.SetAttackType(generators.Sniper)
|
||||
} else {
|
||||
request.SetAttackType(attack)
|
||||
// Compile the requests found
|
||||
requests := []protocols.Request{}
|
||||
if len(template.RequestsDNS) > 0 {
|
||||
for _, req := range template.RequestsDNS {
|
||||
requests = append(requests, req)
|
||||
}
|
||||
|
||||
// Validate the payloads if any
|
||||
for name, payload := range request.Payloads {
|
||||
switch pt := payload.(type) {
|
||||
case string:
|
||||
// check if it's a multiline string list
|
||||
if len(strings.Split(pt, "\n")) <= 1 {
|
||||
// check if it's a worldlist file
|
||||
if !generators.FileExists(pt) {
|
||||
// attempt to load the file by taking the full path, tokezining it and searching the template in such paths
|
||||
changed := false
|
||||
pathTokens := strings.Split(template.path, "/")
|
||||
|
||||
for i := range pathTokens {
|
||||
tpath := path.Join(strings.Join(pathTokens[:i], "/"), pt)
|
||||
if generators.FileExists(tpath) {
|
||||
request.Payloads[name] = tpath
|
||||
changed = true
|
||||
|
||||
break
|
||||
template.Executer = executer.NewExecuter(requests, options)
|
||||
}
|
||||
if len(template.RequestsHTTP) > 0 {
|
||||
for _, req := range template.RequestsHTTP {
|
||||
requests = append(requests, req)
|
||||
}
|
||||
|
||||
if !changed {
|
||||
return nil, fmt.Errorf("the %s file for payload %s does not exist or does not contain enough elements", pt, name)
|
||||
template.Executer = executer.NewExecuter(requests, options)
|
||||
}
|
||||
if len(template.RequestsFile) > 0 {
|
||||
for _, req := range template.RequestsFile {
|
||||
requests = append(requests, req)
|
||||
}
|
||||
template.Executer = executer.NewExecuter(requests, options)
|
||||
}
|
||||
case []string, []interface{}:
|
||||
if len(payload.([]interface{})) == 0 {
|
||||
return nil, fmt.Errorf("the payload %s does not contain enough elements", name)
|
||||
if len(template.RequestsNetwork) > 0 {
|
||||
for _, req := range template.RequestsNetwork {
|
||||
requests = append(requests, req)
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("the payload %s has invalid type", name)
|
||||
template.Executer = executer.NewExecuter(requests, options)
|
||||
}
|
||||
}
|
||||
|
||||
for _, matcher := range request.Matchers {
|
||||
matchErr := matcher.CompileMatchers()
|
||||
if matchErr != nil {
|
||||
return nil, matchErr
|
||||
}
|
||||
}
|
||||
|
||||
for _, extractor := range request.Extractors {
|
||||
extractErr := extractor.CompileExtractors()
|
||||
if extractErr != nil {
|
||||
return nil, extractErr
|
||||
}
|
||||
}
|
||||
|
||||
request.InitGenerator()
|
||||
}
|
||||
|
||||
// Compile the matchers and the extractors for dns requests
|
||||
for _, request := range template.RequestsDNS {
|
||||
// Get the condition between the matchers
|
||||
condition, ok := matchers.ConditionTypes[request.MatchersCondition]
|
||||
if !ok {
|
||||
request.SetMatchersCondition(matchers.ORCondition)
|
||||
} else {
|
||||
request.SetMatchersCondition(condition)
|
||||
}
|
||||
|
||||
for _, matcher := range request.Matchers {
|
||||
err = matcher.CompileMatchers()
|
||||
if template.Executer != nil {
|
||||
err := template.Executer.Compile()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "could not compile request")
|
||||
}
|
||||
template.TotalRequests += template.Executer.Requests()
|
||||
}
|
||||
|
||||
for _, extractor := range request.Extractors {
|
||||
err := extractor.CompileExtractors()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return template, nil
|
||||
}
|
||||
|
||||
// compileWorkflow compiles the workflow for execution
|
||||
func (t *Template) compileWorkflow(options *protocols.ExecuterOptions, workflows *workflows.Workflow) error {
|
||||
for _, workflow := range workflows.Workflows {
|
||||
if err := t.parseWorkflow(workflow, options); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// parseWorkflow parses and compiles all templates in a workflow recursively
|
||||
func (t *Template) parseWorkflow(workflow *workflows.WorkflowTemplate, options *protocols.ExecuterOptions) error {
|
||||
if err := t.parseWorkflowTemplate(workflow, options); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, subtemplates := range workflow.Subtemplates {
|
||||
if err := t.parseWorkflow(subtemplates, options); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, matcher := range workflow.Matchers {
|
||||
for _, subtemplates := range matcher.Subtemplates {
|
||||
if err := t.parseWorkflow(subtemplates, options); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// parseWorkflowTemplate parses a workflow template creating an executer
|
||||
func (t *Template) parseWorkflowTemplate(workflow *workflows.WorkflowTemplate, options *protocols.ExecuterOptions) error {
|
||||
opts := protocols.ExecuterOptions{
|
||||
Output: options.Output,
|
||||
Options: options.Options,
|
||||
Progress: options.Progress,
|
||||
Catalogue: options.Catalogue,
|
||||
RateLimiter: options.RateLimiter,
|
||||
ProjectFile: options.ProjectFile,
|
||||
}
|
||||
paths, err := options.Catalogue.GetTemplatePath(workflow.Template)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get workflow template")
|
||||
}
|
||||
if len(paths) != 1 {
|
||||
return errors.Wrap(err, "invalid number of templates matched")
|
||||
}
|
||||
|
||||
template, err := Parse(paths[0], &opts)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not parse workflow template")
|
||||
}
|
||||
workflow.Executer = template.Executer
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -1,7 +1,12 @@
|
|||
package templates
|
||||
|
||||
import (
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/requests"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/protocols"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/protocols/dns"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/protocols/file"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/protocols/http"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/protocols/network"
|
||||
"github.com/projectdiscovery/nuclei/v2/pkg/workflows"
|
||||
)
|
||||
|
||||
// Template is a request template parsed from a yaml file
|
||||
|
@ -10,32 +15,21 @@ type Template struct {
|
|||
ID string `yaml:"id"`
|
||||
// Info contains information about the template
|
||||
Info map[string]string `yaml:"info"`
|
||||
// BulkRequestsHTTP contains the http request to make in the template
|
||||
BulkRequestsHTTP []*requests.BulkHTTPRequest `yaml:"requests,omitempty"`
|
||||
// RequestsHTTP contains the http request to make in the template
|
||||
RequestsHTTP []*http.Request `yaml:"requests,omitempty"`
|
||||
// RequestsDNS contains the dns request to make in the template
|
||||
RequestsDNS []*requests.DNSRequest `yaml:"dns,omitempty"`
|
||||
path string
|
||||
}
|
||||
|
||||
// GetPath of the workflow
|
||||
func (t *Template) GetPath() string {
|
||||
return t.path
|
||||
}
|
||||
|
||||
func (t *Template) GetHTTPRequestCount() int64 {
|
||||
var count int64 = 0
|
||||
for _, request := range t.BulkRequestsHTTP {
|
||||
count += request.GetRequestCount()
|
||||
}
|
||||
|
||||
return count
|
||||
}
|
||||
|
||||
func (t *Template) GetDNSRequestCount() int64 {
|
||||
var count int64 = 0
|
||||
for _, request := range t.RequestsDNS {
|
||||
count += request.GetRequestCount()
|
||||
}
|
||||
|
||||
return count
|
||||
RequestsDNS []*dns.Request `yaml:"dns,omitempty"`
|
||||
// RequestsFile contains the file request to make in the template
|
||||
RequestsFile []*file.Request `yaml:"file,omitempty"`
|
||||
// RequestsNetwork contains the network request to make in the template
|
||||
RequestsNetwork []*network.Request `yaml:"network,omitempty"`
|
||||
|
||||
// Workflows is a yaml based workflow declaration code.
|
||||
workflows.Workflow `yaml:",inline"`
|
||||
CompiledWorkflow *workflows.Workflow
|
||||
|
||||
// TotalRequests is the total number of requests for the template.
|
||||
TotalRequests int
|
||||
// Executer is the actual template executor for running template requests
|
||||
Executer protocols.Executer
|
||||
}
|
||||
|
|
|
@ -0,0 +1,89 @@
|
|||
// Taken from https://github.com/spf13/cast.
|
||||
|
||||
package types
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ToString converts an interface to string in a quick way
|
||||
func ToString(data interface{}) string {
|
||||
switch s := data.(type) {
|
||||
case string:
|
||||
return s
|
||||
case bool:
|
||||
return strconv.FormatBool(s)
|
||||
case float64:
|
||||
return strconv.FormatFloat(s, 'f', -1, 64)
|
||||
case float32:
|
||||
return strconv.FormatFloat(float64(s), 'f', -1, 32)
|
||||
case int:
|
||||
return strconv.Itoa(s)
|
||||
case int64:
|
||||
return strconv.FormatInt(s, 10)
|
||||
case int32:
|
||||
return strconv.Itoa(int(s))
|
||||
case int16:
|
||||
return strconv.FormatInt(int64(s), 10)
|
||||
case int8:
|
||||
return strconv.FormatInt(int64(s), 10)
|
||||
case uint:
|
||||
return strconv.FormatUint(uint64(s), 10)
|
||||
case uint64:
|
||||
return strconv.FormatUint(uint64(s), 10)
|
||||
case uint32:
|
||||
return strconv.FormatUint(uint64(s), 10)
|
||||
case uint16:
|
||||
return strconv.FormatUint(uint64(s), 10)
|
||||
case uint8:
|
||||
return strconv.FormatUint(uint64(s), 10)
|
||||
case []byte:
|
||||
return string(s)
|
||||
case fmt.Stringer:
|
||||
return s.String()
|
||||
case error:
|
||||
return s.Error()
|
||||
default:
|
||||
return fmt.Sprintf("%v", data)
|
||||
}
|
||||
}
|
||||
|
||||
// ToStringSlice casts an interface to a []string type.
|
||||
func ToStringSlice(i interface{}) []string {
|
||||
var a []string
|
||||
|
||||
switch v := i.(type) {
|
||||
case []interface{}:
|
||||
for _, u := range v {
|
||||
a = append(a, ToString(u))
|
||||
}
|
||||
return a
|
||||
case []string:
|
||||
return v
|
||||
case string:
|
||||
return strings.Fields(v)
|
||||
case interface{}:
|
||||
return []string{ToString(v)}
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// ToStringMap casts an interface to a map[string]interface{} type.
|
||||
func ToStringMap(i interface{}) map[string]interface{} {
|
||||
var m = map[string]interface{}{}
|
||||
|
||||
switch v := i.(type) {
|
||||
case map[interface{}]interface{}:
|
||||
for k, val := range v {
|
||||
m[ToString(k)] = val
|
||||
}
|
||||
return m
|
||||
case map[string]interface{}:
|
||||
return v
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue