Merge branch 'projectdiscovery:main' into main

patch-10
ctflearner 2024-08-15 15:52:24 +05:30 committed by GitHub
commit c608afec59
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
9886 changed files with 319291 additions and 62572 deletions

View File

@ -1,14 +1,14 @@
# Set to true to add reviewers to pull requests
addReviewers: true
addReviewers: false
# Set to true to add assignees to pull requests
addAssignees: true
addAssignees: false
# A list of reviewers to be added to pull requests (GitHub user name)
reviewers:
- ritikchaddha
- pussycat0x
- DhiyaneshGeek
- ritikchaddha
# A number of reviewers added to the pull request
# Set 0 to add all the reviewers (default: 0)
@ -16,9 +16,9 @@ numberOfReviewers: 1
# A list of assignees, overrides reviewers if set
assignees:
- pussycat0x
- ritikchaddha
- DhiyaneshGeek
- pussycat0x
# A number of assignees to add to the pull request
# Set to 0 to add all of the assignees.

139
.github/scripts/assign_tasks.py vendored Normal file
View File

@ -0,0 +1,139 @@
import requests
import sys
import json
# GitHub credentials
password = sys.argv[3]
repo_owner = "projectdiscovery"
repo_name = "nuclei-templates"
pr_user_list = ["DhiyaneshGeek", "pussycat0x", "ritikchaddha"]
issue_user_list = ["DhiyaneshGeek", "pussycat0x", "ritikchaddha", "princechaddha"]
headers = {'Authorization': f'Bearer {password}',
'Accept': 'application/vnd.github+json',
'X-GitHub-Api-Version': '2022-11-28'}
def get_issue_assignee(issue_number):
issue_url = f"https://api.github.com/repos/{repo_owner}/{repo_name}/issues?per_page=2"
response = requests.get(issue_url, headers=headers)
if response.status_code == 200:
issue_data = response.json()[1]
assignee = issue_data["assignee"]["login"] if issue_data["assignee"] else "None"
return assignee
else:
print(f"Failed to fetch assignee for issue #{issue_number}")
return None
def assign_issue_or_pr(user, issue_number):
url = f"https://api.github.com/repos/{repo_owner}/{repo_name}/issues/{issue_number}/assignees"
data = { "assignees": [user] }
response = requests.post(url, headers=headers, data=json.dumps(data))
if response.status_code == 201:
print(f"Assigned issue #{issue_number} to {user}")
else:
print(f"Failed to assign issue #{issue_number} to {user}. Status code: {response.status_code}")
def get_pr_assignee_and_reviewer(pull_request_number):
pull_url = f'https://api.github.com/repos/{repo_owner}/{repo_name}/pulls?per_page=2'
response = requests.get(pull_url, headers=headers)
if response.status_code == 200:
pull_request_data = response.json()[1]
assignee = pull_request_data['assignee']['login'] if pull_request_data['assignee'] else None
reviewers = [reviewer['login'] for reviewer in pull_request_data['requested_reviewers']]
return assignee, reviewers
else:
print(f"Failed to retrieve pull request #{pull_request_number}. Response: {response.text}")
return None, None
def get_pr_author(pull_request_number):
pull_url = f'https://api.github.com/repos/{repo_owner}/{repo_name}/pulls/{pull_request_number}'
response = requests.get(pull_url, headers=headers)
if response.status_code == 200:
pull_request_data = response.json()
author = pull_request_data['user']['login']
return author
else:
print(f"Failed to retrieve pull request #{pull_request_number}. Response: {response.text}")
return None
def review_pr(user, pull_request_number):
url = f'https://api.github.com/repos/{repo_owner}/{repo_name}/pulls/{pull_request_number}/requested_reviewers'
data = { 'reviewers': [user] }
response = requests.post(url, headers=headers, data=json.dumps(data))
if response.status_code == 201:
print(f"Review request for pull request #{pull_request_number} sent to {user} successfully.")
else:
print(f"Failed to send review request for pull request #{pull_request_number}. Response: {response.text}")
def main():
if len(sys.argv) != 4:
print("Usage: python assign_tasks.py <issue_number> <pr_or_issue> <token>")
sys.exit(1)
issue_number = int(sys.argv[1])
type_ = sys.argv[2]
if type_ == 'pr':
assignee, reviewers = get_pr_assignee_and_reviewer(issue_number - 1)
author = get_pr_author(issue_number)
if reviewers:
try:
index = pr_user_list.index(reviewers[0])
try:
reviewer = pr_user_list[index + 1]
except:
reviewer = pr_user_list[0]
if reviewer == author:
reviewer = pr_user_list(pr_user_list.index(reviewer) + 1)
review_pr(reviewer, issue_number)
else:
review_pr(reviewer, issue_number)
except Exception as e:
reviewer = pr_user_list[0]
review_pr(reviewer, issue_number)
else:
for user in pr_user_list:
if (user != author):
reviewer = user
review_pr(reviewer, issue_number)
break
if assignee:
try:
index = pr_user_list.index(assignee)
if (pr_user_list[index + 1] == reviewer):
assign_issue_or_pr(pr_user_list[index + 2], issue_number)
else:
assign_issue_or_pr(pr_user_list[index + 1], issue_number)
except Exception as e:
if (pr_user_list[0] == reviewer):
assign_issue_or_pr(pr_user_list[1], issue_number)
else:
assign_issue_or_pr(pr_user_list[0], issue_number)
else:
if (pr_user_list[0] == reviewer):
assign_issue_or_pr(pr_user_list[1], issue_number)
else:
assign_issue_or_pr(pr_user_list[0], issue_number)
elif type_ == 'issue':
assignee = get_issue_assignee(issue_number-1)
if assignee:
try:
index = issue_user_list.index(assignee)
assign_issue_or_pr(issue_user_list[index + 1], issue_number)
except Exception as e:
assign_issue_or_pr(issue_user_list[0], issue_number)
else:
assign_issue_or_pr(issue_user_list[0], issue_number)
main()

33
.github/scripts/weak-matcher-checks.sh vendored Normal file
View File

@ -0,0 +1,33 @@
#!/bin/bash
set -uo pipefail
OUTPUT="/tmp/nuclei-result-${GITHUB_SHA}.out"
CHANGED_FILES="$(sed 's/ / -t /g' <<< "${CHANGED_FILES}")"
WEAK=false
COMMENT=""
eval "nuclei -duc -silent -ud ${GITHUB_WORKSPACE} -u ${HONEYPOT_URL} -o ${OUTPUT} -t ${CHANGED_FILES}"
if [[ "$(wc -l < $OUTPUT)" -gt 0 ]]; then
COMMENT+="**:warning: Weak matcher detected**\n\n"
COMMENT+="It looks like Nuclei has found some results on the honeypot target.\n\n"
COMMENT+="To improve the accuracy of these results and avoid any false positives, "
COMMENT+="please adjust the matchers as needed. "
COMMENT+="This will help in providing more reliable and precise results.\n\n"
COMMENT+="| **Template ID** |\n"
COMMENT+="|--|\n"
COMMENT+=$(grep -Po "^\\K[[\w_-]+\]" $OUTPUT | sed 's/\[/| /g; s/\]/ |/g' | sed ':a;N;$!ba;s/\n/\\n/g')
COMMENT+="\n\n"
COMMENT+="> Ref ${GITHUB_SHA}"
WEAK=true
fi
echo "weak=${WEAK}" >> $GITHUB_OUTPUT
{
echo "comment<<EOF"
echo -e "${COMMENT}"
echo "EOF"
} >> $GITHUB_OUTPUT

View File

@ -1,6 +1,6 @@
beautifulsoup4==4.11.1
bs4==0.0.1
certifi==2022.9.24
certifi==2023.7.22
charset-normalizer==2.1.1
idna==3.4
Markdown==3.4.1

View File

@ -43,8 +43,8 @@ for page_number in range(1, 11):
# Parse HTML
soup = BeautifulSoup(html, 'html.parser')
results = soup.find(id="main")
articles = results.find_all("article", class_="plugin-card")
results = soup.find(class_="plugin-cards")
articles = results.find_all("div", class_="plugin-card")
# Setting the top tag
top_tag = "top-100,top-200" if page_number <= 5 else "top-200"
@ -124,7 +124,6 @@ info:
http:
- method: GET
path:
- "{{{{BaseURL}}}}/wp-content/plugins/{name}/readme.txt"
@ -171,7 +170,7 @@ http:
if not os.path.exists(template_dir):
os.makedirs(template_dir)
helper_path = f"helpers/wordpress/plugins/{name}.txt"
helper_path = f"{work_dir}/helpers/wordpress/plugins/{name}.txt"
version_file = open(helper_path, "w")
version_file.write(version)
version_file.close()

View File

@ -1,93 +0,0 @@
package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"gopkg.in/yaml.v3"
)
type Classification struct {
CVSSScore string `yaml:"cvss-score,omitempty"`
}
type Info struct {
Name string `yaml:"name"`
Severity string `yaml:"severity"`
Description string `yaml:"description"`
Classification Classification `yaml:"classification,omitempty"`
}
type Data struct {
ID string `yaml:"id"`
Info Info `yaml:"info"`
FilePath string `json:"file_path"`
}
func main() {
if len(os.Args) != 3 {
fmt.Println("Usage: go run main.go <directory> <output_file>")
os.Exit(1)
}
directory := os.Args[1]
outputFile := os.Args[2]
var data []Data
err := filepath.Walk(directory, func(path string, info os.FileInfo, err error) error {
if strings.HasSuffix(path, ".yaml") || strings.HasSuffix(path, ".yml") {
yamlFile, err := ioutil.ReadFile(path)
if err != nil {
fmt.Printf("Error reading YAML file %s: %v\n", path, err)
return err
}
var d Data
err = yaml.Unmarshal(yamlFile, &d)
if err != nil {
fmt.Printf("Error unmarshalling YAML file %s: %v\n", path, err)
return err
}
if d.Info.Classification.CVSSScore == "" {
d.Info.Classification.CVSSScore = "N/A"
}
if d.Info.Classification == (Classification{}) {
d.Info.Classification.CVSSScore = "N/A"
}
fpath := strings.Replace(path, "/home/runner/work/nuclei-templates/nuclei-templates/", "", 1)
d.FilePath = fpath
data = append(data, d)
}
return nil
})
if err != nil {
fmt.Printf("Error reading directory: %v\n", err)
os.Exit(1)
}
var jsonData []byte
for _, d := range data {
temp, err := json.Marshal(d)
if err != nil {
fmt.Printf("Error marshalling JSON: %v\n", err)
os.Exit(1)
}
jsonData = append(jsonData, temp...)
jsonData = append(jsonData, byte('\n'))
}
err = ioutil.WriteFile(outputFile, jsonData, 0644)
if err != nil {
fmt.Printf("Error writing JSON data to file: %v\n", err)
os.Exit(1)
}
fmt.Println("JSON data written to", outputFile)
}

5
.github/scripts/yaml2json/go.mod vendored Normal file
View File

@ -0,0 +1,5 @@
module yaml2json
go 1.22.0
require gopkg.in/yaml.v3 v3.0.1 // indirect

3
.github/scripts/yaml2json/go.sum vendored Normal file
View File

@ -0,0 +1,3 @@
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

108
.github/scripts/yaml2json/main.go vendored Normal file
View File

@ -0,0 +1,108 @@
package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"gopkg.in/yaml.v3"
)
type Classification struct {
CVSSScore string `yaml:"cvss-score,omitempty"`
}
type Info struct {
Name string `yaml:"name"`
Severity string `yaml:"severity"`
Description string `yaml:"description"`
Classification Classification `yaml:"classification,omitempty"`
}
type Data struct {
ID string `yaml:"id"`
Info Info `yaml:"info"`
FilePath string `json:"file_path"`
}
func main() {
if len(os.Args) != 3 {
fmt.Println("Usage: go run main.go <directory1[,directory2,...]> <output_file>")
os.Exit(1)
}
input := os.Args[1]
outputFile := os.Args[2]
var directories []string
// Check if the input contains a comma
if strings.Contains(input, ",") {
directories = strings.Split(input, ",")
} else {
directories = []string{input}
}
var data []Data
for _, directory := range directories {
fmt.Println("Generating data for", directory)
err := filepath.Walk(directory, func(path string, info os.FileInfo, err error) error {
if err != nil {
fmt.Printf("Error accessing path %s: %v\n", path, err)
return err
}
if strings.HasSuffix(path, ".yaml") || strings.HasSuffix(path, ".yml") {
yamlFile, err := ioutil.ReadFile(path)
if err != nil {
fmt.Printf("Error reading YAML file %s: %v\n", path, err)
return err
}
var d Data
err = yaml.Unmarshal(yamlFile, &d)
if err != nil {
fmt.Printf("Error unmarshalling YAML file %s: %v\n", path, err)
return err
}
if d.Info.Classification.CVSSScore == "" {
d.Info.Classification.CVSSScore = "N/A"
}
if d.Info.Classification == (Classification{}) {
d.Info.Classification.CVSSScore = "N/A"
}
fpath := strings.Replace(path, "/home/runner/work/nuclei-templates/nuclei-templates/", "", 1)
d.FilePath = fpath
data = append(data, d)
}
return nil
})
if err != nil {
fmt.Printf("Error reading directory: %v\n", err)
os.Exit(1)
}
}
var jsonData []byte
for _, d := range data {
temp, err := json.Marshal(d)
if err != nil {
fmt.Printf("Error marshalling JSON: %v\n", err)
os.Exit(1)
}
jsonData = append(jsonData, temp...)
jsonData = append(jsonData, byte('\n'))
}
err := ioutil.WriteFile(outputFile, jsonData, 0644)
if err != nil {
fmt.Printf("Error writing JSON data to file: %v\n", err)
os.Exit(1)
}
fmt.Println("JSON data written to", outputFile)
}

26
.github/workflows/autoassign.yml vendored Normal file
View File

@ -0,0 +1,26 @@
name: 🤖 issue/pr assignment
on:
pull_request:
types: [opened]
branches:
- main
issues:
types: [opened]
env:
ASSIGN_TASK_TOKEN: ${{ secrets.PDTEAMX_PAT }} # github personal token
jobs:
build:
permissions: write-all
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4 # checkout the repository content
- uses: actions/setup-python@v5
with:
python-version: '3.10' # install the python version needed
- run: pip install requests
- if: github.event_name == 'pull_request'
run: python .github/scripts/assign_tasks.py ${{ github.event.pull_request.number }} pr ${{ secrets.GITHUB_TOKEN }}
- if: github.event_name == 'issues'
run: python .github/scripts/assign_tasks.py ${{ github.event.issue.number }} issue ${{ secrets.GITHUB_TOKEN }}

View File

@ -1,22 +0,0 @@
name: 🗑️ Cache Purge
on:
push:
tags:
- '*'
workflow_dispatch:
jobs:
deploy:
runs-on: ubuntu-latest
steps:
# Wait for 5 minutes
- name: Wait for 2 minutes
run: sleep 120
- name: Purge cache
uses: jakejarvis/cloudflare-purge-action@master
env:
CLOUDFLARE_ZONE: ${{ secrets.CLOUDFLARE_ZONE }}
CLOUDFLARE_TOKEN: ${{ secrets.CLOUDFLARE_TOKEN }}
# PURGE_URLS: '["https://version-check.nuclei.sh/versions"]'

View File

@ -1,46 +0,0 @@
name: ✍🏻 CVE Annotate
on:
push:
branches:
- main
paths:
- '**/cves/**.yaml'
workflow_dispatch:
jobs:
annotate:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version: 1.20.x
- name: cve-annotate install
run: go install -v github.com/projectdiscovery/nuclei/v2/cmd/cve-annotate@latest
- name: Generate CVE Annotations
id: cve-annotate
run: |
cve-annotate -i .
git status -s | wc -l | xargs -I {} echo CHANGES={} >> $GITHUB_OUTPUT
- name: Commit files
if: steps.cve-annotate.outputs.CHANGES > 0
run: |
git config --local user.email "action@github.com"
git config --local user.name "GitHub Action"
git add .
git commit -am "Auto Generated CVE annotations [$(date)] :robot:"
- name: Push changes
if: steps.cve-annotate.outputs.CHANGES > 0
uses: ad-m/github-push-action@master
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
branch: ${{ github.ref }}

View File

@ -1,40 +1,31 @@
name: Generate JSON Metadata of CVE Templates
name: 📝 CVE JSON Metadata
on:
push:
branches:
- main
paths:
- '**/cves/'
workflow_dispatch: # allows manual triggering of the workflow
- '**/cves/**'
workflow_dispatch:
jobs:
cve2json:
runs-on: ubuntu-latest
if: github.repository == 'projectdiscovery/nuclei-templates'
steps:
- uses: actions/checkout@master
- name: Set up Go
uses: actions/setup-go@v4
- uses: actions/checkout@v4
- uses: projectdiscovery/actions/setup/go@v1
with:
go-version: 1.19
check-latest: true
- name: Run yaml2json.go to generate cves.json
run: |
go env -w GO111MODULE=off
go get gopkg.in/yaml.v3
go run .github/scripts/yaml2json.go $GITHUB_WORKSPACE/http/cves/ cves.json
md5sum cves.json | cut -d' ' -f1 > cves.json-checksum.txt
- name: Commit files
run: |
git pull
git add cves.json cves.json-checksum.txt
git config --local user.email "action@github.com"
git config --local user.name "GitHub Action"
git commit -m "Auto Generated cves.json [$(date)] :robot:" -a
go-version: 'stable'
- run: go run main.go $GITHUB_WORKSPACE/http/cves/,$GITHUB_WORKSPACE/network/cves/ $GITHUB_WORKSPACE/cves.json
working-directory: .github/scripts/yaml2json
- run: md5sum cves.json | cut -d' ' -f1 > cves.json-checksum.txt
- uses: projectdiscovery/actions/setup/git@v1
- uses: projectdiscovery/actions/commit@v1
with:
files: 'cves.json*'
message: 'chore: generate CVEs metadata 🤖'
- name: Push changes
uses: ad-m/github-push-action@master
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
run: |
git pull origin $GITHUB_REF --rebase
git push origin $GITHUB_REF

View File

@ -7,33 +7,34 @@ on:
paths:
- '**.yaml'
workflow_dispatch:
release:
types: [published]
env:
NEW_ADDITION_FILE: '.new-additions'
jobs:
templates:
new-addition:
runs-on: ubuntu-latest
if: github.repository == 'projectdiscovery/nuclei-templates'
steps:
- uses: actions/checkout@master
- uses: actions/checkout@v4
with:
token: ${{ secrets.GITHUB_TOKEN }}
persist-credentials: false
fetch-depth: 0
- name: Generate new template list
id: new-additions
- name: Generate new addition list
run: |
git pull
git diff --name-only --diff-filter=A $(git tag | tail -n 1) @ . | grep .yaml | tee .new-additions
- name: Commit files
run: |
git config --local user.email "action@github.com"
git config --local user.name "GitHub Action"
git add .new-additions -f
git commit --allow-empty -m "Auto Generated New Template Addition List [$(date)] :robot:" -a
- name: Push changes
uses: ad-m/github-push-action@master
git diff --name-only --diff-filter=A $(git tag | tail -n 1) @ . | grep -v "^\.github/" | grep "\.yaml$" | tee $NEW_ADDITION_FILE
- uses: projectdiscovery/actions/setup/git@v1
- uses: projectdiscovery/actions/commit@v1
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
branch: ${{ github.ref }}
files: '${{ env.NEW_ADDITION_FILE }}'
message: 'chore: generate new addition list 🤖'
- run: |
git pull origin $GITHUB_REF --rebase
git push origin $GITHUB_REF
sync:
needs: new-addition
uses: ./.github/workflows/templates-sync.yml
secrets: inherit

View File

@ -12,36 +12,15 @@ jobs:
runs-on: ubuntu-latest
if: github.repository == 'projectdiscovery/nuclei-templates'
steps:
- name: Install tree
run: sudo apt-get install tree -y
- name: Check out code
uses: actions/checkout@master
with:
persist-credentials: false
fetch-depth: 0
- name: Set up Python
uses: actions/setup-python@v4
- run: sudo apt-get install tree -y
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: "3.8"
- name: Update README
id: readme-update
run: |
python .github/scripts/update-readme.py
git status -s | wc -l | xargs -I {} echo CHANGES={} >> $GITHUB_OUTPUT
- name: Commit files
if: steps.readme-update.outputs.CHANGES > 0
run: |
git config --local user.email "action@github.com"
git config --local user.name "GitHub Action"
git commit -m "Auto README Update [$(date)] :robot:" -a
- name: Push changes
if: steps.readme-update.outputs.CHANGES > 0
uses: ad-m/github-push-action@master
- run: python .github/scripts/update-readme.py
- uses: projectdiscovery/actions/setup/git@v5
- uses: projectdiscovery/actions/commit@v5
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
branch: ${{ github.ref }}
files: 'README.md'
message: 'docs: update README 🤖'
- run: git push origin $GITHUB_REF

View File

@ -1,19 +0,0 @@
name: ❄️ YAML Lint
on:
pull_request:
paths:
- '**.yaml'
workflow_dispatch:
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Yamllint
uses: karancode/yamllint-github-action@v2.1.1
with:
yamllint_config_filepath: .yamllint
yamllint_strict: false
yamllint_comment: true

View File

@ -6,41 +6,28 @@ on:
- main
paths:
- '**.yaml'
workflow_dispatch: # allows manual triggering of the workflow
workflow_dispatch:
env:
CHECKSUM_FILE: 'templates-checksum.txt'
jobs:
checksum:
runs-on: ubuntu-latest
if: github.repository == 'projectdiscovery/nuclei-templates'
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Set up Go
uses: actions/setup-go@v4
- uses: actions/checkout@v4
- uses: projectdiscovery/actions/generate/checksum@v1
with:
go-version: 1.20.x
- name: install checksum generator
run: |
go install -v github.com/projectdiscovery/nuclei/v2/cmd/generate-checksum@dev
- name: generate checksum
id: checksum
run: |
generate-checksum /home/runner/work/nuclei-templates/nuclei-templates/ templates-checksum.txt
git status -s | wc -l | xargs -I {} echo CHANGES={} >> $GITHUB_OUTPUT
- name: Commit files
if: steps.checksum.outputs.CHANGES > 0
run: |
git config --local user.email "action@github.com"
git config --local user.name "GitHub Action"
git add templates-checksum.txt
git commit -am "Auto Generated Templates Checksum [$(date)] :robot:"
templates-directory: '${{ github.workspace }}'
checksum-file: '${{ env.CHECKSUM_FILE }}'
- run: git status
- uses: projectdiscovery/actions/setup/git@v1
- uses: projectdiscovery/actions/commit@v1
with:
files: '${{ env.CHECKSUM_FILE }}'
message: 'chore: generate templates checksum 🤖'
- name: Push changes
uses: ad-m/github-push-action@master
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
branch: ${{ github.ref }}
run: |
git pull origin $GITHUB_REF --rebase
git push origin $GITHUB_REF

View File

@ -2,31 +2,20 @@ name: 📑 Template-DB Indexer
on:
push:
branches:
- main
paths:
- '**.yaml'
tags:
- '*'
workflow_dispatch:
jobs:
index:
runs-on: ubuntu-latest
if: github.repository == 'projectdiscovery/nuclei-templates'
steps:
- name: Set up Go
uses: actions/setup-go@v4
- uses: projectdiscovery/actions/generate/db-indexer@v1
with:
go-version: 1.19
- name: Installing Indexer
run: |
git config --global url."https://${{ secrets.ACCESS_TOKEN }}@github".insteadOf https://github
git clone https://github.com/projectdiscovery/nucleish-api.git
cd nucleish-api/cmd/generate-index/
go install
- name: Generate Index
token: '${{ secrets.ACCESS_TOKEN }}'
mode: 'templates'
args: '-eft'
env:
AWS_ACCESS_KEY: ${{ secrets.AWS_ACCESS_KEY }}
AWS_SECRET_KEY: ${{ secrets.AWS_SECRET_KEY }}
run: |
generate-index -mode templates

30
.github/workflows/template-sign.yml vendored Normal file
View File

@ -0,0 +1,30 @@
name: ☑️ Template Sign
on:
push:
branches:
- main
paths:
- '**.yaml'
workflow_dispatch:
jobs:
build:
runs-on: ubuntu-latest
if: github.repository == 'projectdiscovery/nuclei-templates'
steps:
- uses: actions/checkout@v4
- uses: projectdiscovery/actions/setup/nuclei@v1
- run: nuclei -lfa -duc -sign -ud $GITHUB_WORKSPACE -t .
env:
NUCLEI_USER_CERTIFICATE: ${{ secrets.NUCLEI_USER_CERTIFICATE }}
NUCLEI_USER_PRIVATE_KEY: ${{ secrets.NUCLEI_USER_PRIVATE_KEY }}
- uses: projectdiscovery/actions/setup/git@v1
- uses: projectdiscovery/actions/commit@v1
with:
files: '.'
message: 'chore: sign templates 🤖'
- name: Push changes
run: |
git pull origin $GITHUB_REF --rebase
git push origin $GITHUB_REF

View File

@ -1,29 +0,0 @@
name: 🛠 Template Validate
on:
pull_request:
paths:
- '**.yaml'
workflow_dispatch:
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version: 1.20.x
- name: nuclei install
run: go install -v github.com/projectdiscovery/nuclei/v2/cmd/nuclei@latest
- name: Template Validation
run: |
cp -r ${{ github.workspace }} $HOME
nuclei -duc -validate
nuclei -duc -validate -w ./workflows

23
.github/workflows/templateman.yml vendored Normal file
View File

@ -0,0 +1,23 @@
name: 🤖 TemplateMan
on:
workflow_dispatch:
jobs:
templateman:
runs-on: ubuntu-latest
if: github.repository == 'projectdiscovery/nuclei-templates'
steps:
- uses: actions/checkout@v4
- uses: projectdiscovery/actions/setup/templateman@v1
with:
token: '${{ secrets.ACCESS_TOKEN }}'
- run: tmc -mr -e -at <<< "$(pwd)"
- uses: projectdiscovery/actions/setup/git@v1
- uses: projectdiscovery/actions/commit@v1
with:
message: 'chore: update TemplateMan 🤖'
- name: Push changes
run: |
git pull origin $GITHUB_REF --rebase
git push origin $GITHUB_REF

View File

@ -9,50 +9,32 @@ on:
jobs:
build:
runs-on: ubuntu-latest
if: github.repository == 'projectdiscovery/nuclei-templates'
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Go
uses: actions/setup-go@v4
- uses: projectdiscovery/actions/templates/stats@v1
with:
go-version: 1.19
- name: Installing Template Stats
run: |
go install github.com/projectdiscovery/templates-stats@main
shell: bash
- name: Markdown Stats
run: |
templates-stats -output TEMPLATES-STATS.md -path /home/runner/work/nuclei-templates/nuclei-templates/
shell: bash
- name: JSON Stats
run: |
templates-stats -output TEMPLATES-STATS.json -json -path /home/runner/work/nuclei-templates/nuclei-templates/
shell: bash
- name: Top 10 Stats
run: |
templates-stats -output TOP-10.md -top 10 -path /home/runner/work/nuclei-templates/nuclei-templates/
shell: bash
- name: Get statistical changes
id: stats
run: git status -s | wc -l | xargs -I {} echo CHANGES={} >> $GITHUB_OUTPUT
- name: Commit files
if: steps.stats.outputs.CHANGES > 0
run: |
git add TEMPLATES-STATS.*
git add TOP-10.md
git config --local user.email "action@github.com"
git config --local user.name "GitHub Action"
git commit -m "Auto Generated Templates Stats [$(date)] :robot:" -a
- name: Push changes
uses: ad-m/github-push-action@master
path: '${{ github.workspace }}'
output: 'TEMPLATES-STATS.md'
- uses: projectdiscovery/actions/templates/stats@v1
with:
path: '${{ github.workspace }}'
output: 'TEMPLATES-STATS.json'
args: '-json'
- uses: projectdiscovery/actions/templates/stats@v1
with:
path: '${{ github.workspace }}'
output: 'TOP-10.md'
args: '-top 10'
- uses: projectdiscovery/actions/setup/git@v1
- uses: projectdiscovery/actions/commit@v1
with:
files: |
TEMPLATES-STATS.*
TOP-10.md
message: 'chore: generate templates stats 🤖'
- uses: ad-m/github-push-action@master
with:
github_token: ${{ secrets.GITHUB_TOKEN }}

62
.github/workflows/templates-sync.yml vendored Normal file
View File

@ -0,0 +1,62 @@
name: Sync Repositories Workflow
on:
push:
paths:
- '.new-additions'
- '.github/scripts/yaml2json/go.mod'
- '.github/scripts/yaml2json/go.sum'
- '.github/workflows/tests.yaml'
- 'CVE-2024-7339.yaml'
- 'http/cves/2022/CVE-2022-27043.yaml'
- 'http/cves/2023/CVE-2023-43323.yaml'
- 'http/cves/2023/CVE-2023-44393.yaml'
- 'http/cves/2023/CVE-2023-5222.yaml'
- 'http/cves/2024/CVE-2024-24763.yaml'
- 'http/cves/2024/CVE-2024-34061.yaml'
- 'http/cves/2024/CVE-2024-36104.yaml'
- 'http/cves/2024/CVE-2024-38856.yaml'
- 'http/cves/2024/CVE-2024-39903.yaml'
- 'http/cves/2024/CVE-2024-39907.yaml'
- 'http/cves/2024/CVE-2024-40422.yaml'
- 'http/cves/2024/CVE-2024-41107.yaml'
- 'http/cves/2024/CVE-2024-41628.yaml'
- 'http/cves/2024/CVE-2024-5975.yaml'
- 'http/cves/2024/CVE-2024-6205.yaml'
- 'http/cves/2024/CVE-2024-6366.yaml'
- 'http/cves/2024/CVE-2024-6396.yaml'
- 'http/cves/2024/CVE-2024-6781.yaml'
- 'http/cves/2024/CVE-2024-6782.yaml'
- 'http/cves/2024/CVE-2024-6922.yaml'
- 'http/cves/2024/CVE-2024-7008.yaml'
- 'http/cves/2024/CVE-2024-7120.yaml'
- 'http/cves/2024/CVE-2024-7188.yaml'
- 'http/cves/2024/CVE-2024-7332.yaml'
- 'http/cves/2024/CVE-2024-7340.yaml'
- 'http/default-logins/apache/cloudstack-default-login.yaml'
- 'http/exposed-panels/airos-panel.yaml'
- 'http/exposed-panels/metube-panel.yaml'
- 'http/exposed-panels/openedge-panel.yaml'
- 'http/exposed-panels/whatsup-gold-panel.yaml'
- 'http/exposures/files/gitlab-ci-yml.yaml'
- 'http/misconfiguration/changedetection-unauth.yaml'
- 'http/misconfiguration/deployment-interface-exposed.yaml'
- 'http/misconfiguration/installer/quickcms-installer.yaml'
- 'http/misconfiguration/manage-cabinet-register.yaml'
- 'http/osint/user-enumeration/substack.yaml'
- 'http/technologies/apache/apache-ofbiz-detect.yaml'
- 'http/technologies/apache/apache-shenyu-detect.yaml'
- 'http/technologies/searxng-detect.yaml'
- 'http/vulnerabilities/esafenet/esafenet-netsecconfigajax-sqli.yaml'
- 'http/vulnerabilities/esafenet/esafenet-noticeajax-sqli.yaml'
- 'http/vulnerabilities/landray/landray-oa-replaceextend-rce.yaml'
- 'javascript/misconfiguration/x11/x11-unauth-access.yaml'
- 'javascript/udp/detection/db2-discover.yaml'
workflow_dispatch:
jobs:
sync:
if: github.repository == 'projectdiscovery/nuclei-templates'
runs-on: ubuntu-latest
steps:
- uses: projectdiscovery/actions/templates/sync@v1
with:
token: ${{ secrets.GTOKEN }}

75
.github/workflows/tests.yaml vendored Normal file
View File

@ -0,0 +1,75 @@
name: 🛠 Tests
on:
pull_request:
paths:
- '**.yaml'
workflow_dispatch:
jobs:
lint:
runs-on: ubuntu-latest
if: github.repository == 'projectdiscovery/nuclei-templates'
steps:
- uses: actions/checkout@v4
- uses: karancode/yamllint-github-action@v2.1.1
with:
yamllint_config_filepath: .yamllint
yamllint_strict: false
yamllint_comment: true
validate:
needs: lint
runs-on: ubuntu-latest
if: github.repository == 'projectdiscovery/nuclei-templates'
steps:
- uses: actions/checkout@v4
- uses: projectdiscovery/actions/setup/nuclei@v1
- name: Validate templates
run: nuclei -duc -validate -lfa -ud $GITHUB_WORKSPACE -w workflows/ -et .github/
weak-matcher-checks:
needs: lint
runs-on: ubuntu-latest
if: github.repository == 'projectdiscovery/nuclei-templates'
env:
HONEYPOT_URL: 'http://honey.scanme.sh'
steps:
- uses: actions/checkout@v4
- uses: dorny/paths-filter@v3
id: filter
with:
list-files: shell
filters: |
templates: &templates
- '**.yml'
- '**.yaml'
- '!.github/**'
changed:
- added|modified: *templates
- uses: projectdiscovery/actions/setup/nuclei@v1
- run: bash weak-matcher-checks.sh
id: check
if: steps.filter.outputs.changed == 'true'
env:
CHANGED_FILES: '${{ steps.filter.outputs.changed_files }}'
working-directory: '.github/scripts/'
- uses: actions/github-script@v7
if: steps.check.outputs.weak == 'true'
with:
script: |
github.rest.issues.createComment({
issue_number: context.issue.number,
owner: context.repo.owner,
repo: context.repo.repo,
body: process.env.COMMENT
})
github.rest.issues.addLabels({
issue_number: context.issue.number,
owner: context.repo.owner,
repo: context.repo.repo,
labels: ['false-positive']
})
env:
COMMENT: '${{ steps.check.outputs.comment }}'

View File

@ -2,21 +2,21 @@ name: ✨ WordPress Plugins - Update
on:
schedule:
- cron: "0 4 * * *" # every day at 4am UTC
workflow_dispatch:
- cron: "0 0 * * *"
jobs:
Update:
runs-on: ubuntu-latest
if: github.repository == 'projectdiscovery/nuclei-templates'
steps:
- name: Check out repository code
uses: actions/checkout@v3
uses: actions/checkout@v4
with:
persist-credentials: false # otherwise, the token used is the GITHUB_TOKEN, instead of your personal token
fetch-depth: 0 # otherwise, you will failed to push refs to dest repo
- name: Install Python3
uses: actions/setup-python@v4
uses: actions/setup-python@v5
with:
python-version: "3.10"
- run: |

3
.gitignore vendored
View File

@ -2,4 +2,5 @@
.DS_Store
local/
.checksum
.new-additions
.new-additions
*.txt

View File

@ -1,8 +1,48 @@
http/cves/2019/CVE-2019-1943.yaml
http/cves/2023/CVE-2023-25157.yaml
http/miscellaneous/crypto-mining-malware.yaml
http/misconfiguration/symfony-fragment.yaml
http/technologies/openproject-detect.yaml
http/technologies/phplist-detect.yaml
network/cves/2022/CVE-2022-24706.yaml
network/cves/2023/CVE-2023-33246.yaml
.github/scripts/yaml2json/go.mod
.github/scripts/yaml2json/go.sum
.github/workflows/tests.yaml
CVE-2024-7339.yaml
http/cves/2022/CVE-2022-27043.yaml
http/cves/2023/CVE-2023-43323.yaml
http/cves/2023/CVE-2023-44393.yaml
http/cves/2023/CVE-2023-5222.yaml
http/cves/2024/CVE-2024-24763.yaml
http/cves/2024/CVE-2024-34061.yaml
http/cves/2024/CVE-2024-36104.yaml
http/cves/2024/CVE-2024-38856.yaml
http/cves/2024/CVE-2024-39903.yaml
http/cves/2024/CVE-2024-39907.yaml
http/cves/2024/CVE-2024-40422.yaml
http/cves/2024/CVE-2024-41107.yaml
http/cves/2024/CVE-2024-41628.yaml
http/cves/2024/CVE-2024-5975.yaml
http/cves/2024/CVE-2024-6205.yaml
http/cves/2024/CVE-2024-6366.yaml
http/cves/2024/CVE-2024-6396.yaml
http/cves/2024/CVE-2024-6781.yaml
http/cves/2024/CVE-2024-6782.yaml
http/cves/2024/CVE-2024-6922.yaml
http/cves/2024/CVE-2024-7008.yaml
http/cves/2024/CVE-2024-7120.yaml
http/cves/2024/CVE-2024-7188.yaml
http/cves/2024/CVE-2024-7332.yaml
http/cves/2024/CVE-2024-7340.yaml
http/default-logins/apache/cloudstack-default-login.yaml
http/exposed-panels/airos-panel.yaml
http/exposed-panels/metube-panel.yaml
http/exposed-panels/openedge-panel.yaml
http/exposed-panels/whatsup-gold-panel.yaml
http/exposures/files/gitlab-ci-yml.yaml
http/misconfiguration/changedetection-unauth.yaml
http/misconfiguration/deployment-interface-exposed.yaml
http/misconfiguration/installer/quickcms-installer.yaml
http/misconfiguration/manage-cabinet-register.yaml
http/osint/user-enumeration/substack.yaml
http/technologies/apache/apache-ofbiz-detect.yaml
http/technologies/apache/apache-shenyu-detect.yaml
http/technologies/searxng-detect.yaml
http/vulnerabilities/esafenet/esafenet-netsecconfigajax-sqli.yaml
http/vulnerabilities/esafenet/esafenet-noticeajax-sqli.yaml
http/vulnerabilities/landray/landray-oa-replaceextend-rce.yaml
javascript/misconfiguration/x11/x11-unauth-access.yaml
javascript/udp/detection/db2-discover.yaml

View File

@ -2,7 +2,7 @@
# ====================================
#
# This is default list of tags and files to excluded from default nuclei scan.
# More details - https://nuclei.projectdiscovery.io/nuclei/get-started/#template-exclusion
# More details - https://docs.projectdiscovery.io/tools/nuclei/running#template-exclusion
#
# ============ DO NOT EDIT ============
# Automatically updated by nuclei on execution from nuclei-templates
@ -13,8 +13,10 @@
# unless asked for by the user.
tags:
- "fuzz"
- "dos"
- "local"
- "fuzz"
- "bruteforce"
# The following templates have been excluded because they have weak matchers and may generate FP results.
# Please feel free to create PR if you can update the templates with strict matchers.
@ -24,14 +26,18 @@ tags:
files:
- http/cves/2006/CVE-2006-1681.yaml
- http/cves/2007/CVE-2007-5728.yaml
- http/cves/2014/CVE-2014-9608.yaml
- http/cves/2018/CVE-2018-5233.yaml
- http/cves/2019/CVE-2019-14696.yaml
- http/cves/2020/CVE-2020-11930.yaml
- http/cves/2020/CVE-2020-19295.yaml
- http/cves/2020/CVE-2020-2036.yaml
- http/cves/2020/CVE-2020-28351.yaml
- http/cves/2021/CVE-2021-35265.yaml
- http/vulnerabilities/oracle/oracle-ebs-xss.yaml
- http/vulnerabilities/other/nginx-module-vts-xss.yaml
- http/cves/2021/CVE-2021-28164.yaml
- http/fuzzing/wordpress-themes-detect.yaml
- http/fuzzing/mdb-database-file.yaml
- http/fuzzing/iis-shortname.yaml
- dns/soa-detect.yaml
- dns/txt-service-detect.yaml
- javascript/enumeration/pop3/pop3-capabilities-enum.yaml
- javascript/enumeration/redis/redis-require-auth.yaml
- dast/vulnerabilities/sqli/time-based-sqli.yaml
- javascript/enumeration/minecraft-enum.yaml
- javascript/enumeration/minecraft-enum.yaml

View File

@ -9,6 +9,7 @@ ignore: |
rules:
document-start: disable
comments-indentation: disable
line-length: disable
new-lines: disable
new-line-at-end-of-file: disable
@ -18,4 +19,8 @@ rules:
ignore-shebangs: true
min-spaces-from-content: 1
empty-lines:
max: 5
max: 5
braces:
forbid: true
brackets:
forbid: true

View File

@ -30,8 +30,8 @@ git remote add upstream https://github.com/projectdiscovery/nuclei-templates
```sh
git remote update
git checkout master
git rebase upstream/master
git checkout main
git rebase upstream/main
```
## Step 3 : Create your Template Branch

50
CVE-2024-7339.yaml Normal file
View File

@ -0,0 +1,50 @@
id: CVE-2024-7339
info:
name: TVT DVR Sensitive Device - Information Disclosure
author: Stuxctf
severity: medium
description: |
A vulnerability has been found in TVT DVR TD-2104TS-CL, DVR TD-2108TS-HP, Provision-ISR DVR SH-4050A5-5L(MM) and AVISION DVR AV108T and classified as problematic. This vulnerability affects unknown code of the file /queryDevInfo. The manipulation leads to information disclosure.
impact: |
An attacker get detailed device information including hardware and software versions, serial numbers, and network configuration.
remediation: |
Implement strict access controls and authentication mechanisms to manage access to the device interfaces.
reference:
- https://netsecfish.notion.site/Sensitive-Device-Information-Disclosure-in-TVT-DVR-fad1cce703d946969be5130bf3aaac0d
- https://netsecfish.notion.site/Sensitive-Device-Information-Disclosure-in-TVT-DVR-fad1cce703d946969be5130bf3aaac0d?pvs=4
- https://vuldb.com/?ctiid.273262
- https://vuldb.com/?id.273262
- https://vuldb.com/?submit.379373
classification:
cvss-metrics: CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:L/I:N/A:N
cvss-score: 5.3
cve-id: CVE-2024-7339
cwe-id: CWE-200
epss-score: 0.00045
epss-percentile: 0.16163
metadata:
verified: true
max-request: 1
tags: cve,cve2024,dvr,tvt,info-leak
http:
- raw:
- |
POST /queryDevInfo HTTP/1.1
Host: {{Hostname}}
<?xml version="1.0" encoding="utf-8" ?><request version="1.0" systemType="NVMS-9000" clientType="WEB"/>
matchers-condition: and
matchers:
- type: word
words:
- "softwareVersion"
- "eth0"
condition: and
- type: status
status:
- 200
# digest: 490a0046304402200854a7eafd3bb701237ce315534258e891e25fe34c1652b55a1812764ddfc3ca02201376cfbf5e9dec38011bac3616f102aa8bd7b303601e82c7142114ea95191ee2:922c64590222798bb761d5b6d8e72950

View File

@ -40,20 +40,20 @@ An overview of the nuclei template project, including statistics on unique tags,
## Nuclei Templates Top 10 statistics
| TAG | COUNT | AUTHOR | COUNT | DIRECTORY | COUNT | SEVERITY | COUNT | TYPE | COUNT |
|-----------|-------|--------------|-------|----------------------|-------|----------|-------|------|-------|
| cve | 1855 | dhiyaneshdk | 835 | http | 5860 | info | 2857 | file | 123 |
| panel | 896 | dwisiswant0 | 794 | workflows | 190 | high | 1270 | dns | 18 |
| wordpress | 781 | daffainfo | 664 | file | 123 | medium | 1042 | | |
| exposure | 677 | pikpikcu | 353 | network | 93 | critical | 704 | | |
| wp-plugin | 672 | pdteam | 278 | dns | 18 | low | 216 | | |
| xss | 646 | pussycat0x | 240 | ssl | 12 | unknown | 26 | | |
| osint | 639 | geeknik | 220 | headless | 9 | | | | |
| tech | 602 | ricardomaia | 215 | TEMPLATES-STATS.json | 1 | | | | |
| edb | 596 | ritikchaddha | 210 | contributors.json | 1 | | | | |
| lfi | 548 | 0x_akoko | 179 | cves.json | 1 | | | | |
| TAG | COUNT | AUTHOR | COUNT | DIRECTORY | COUNT | SEVERITY | COUNT | TYPE | COUNT |
|-----------|-------|---------------|-------|------------|-------|----------|-------|------|-------|
| cve | 2604 | dhiyaneshdk | 1360 | http | 7723 | info | 3802 | file | 402 |
| panel | 1173 | daffainfo | 864 | file | 402 | high | 1843 | dns | 25 |
| wordpress | 1008 | dwisiswant0 | 803 | workflows | 192 | medium | 1588 | | |
| exposure | 971 | pussycat0x | 447 | network | 137 | critical | 1083 | | |
| xss | 919 | ritikchaddha | 393 | cloud | 134 | low | 272 | | |
| wp-plugin | 878 | pikpikcu | 353 | code | 81 | unknown | 41 | | |
| osint | 805 | princechaddha | 303 | javascript | 61 | | | | |
| tech | 703 | pdteam | 297 | ssl | 29 | | | | |
| lfi | 685 | ricardomaia | 241 | dast | 25 | | | | |
| misconfig | 678 | geeknik | 231 | dns | 22 | | | | |
**404 directories, 6542 files**.
**671 directories, 9126 files**.
</td>
</tr>

94
README_JA.md Normal file
View File

@ -0,0 +1,94 @@
<h1 align="center">
Nuclei テンプレート
</h1>
<h4 align="center">アプリケーションのセキュリティ脆弱性を発見するためのNucleiエンジン用テンプレートのコミュニティキュレーションリスト</h4>
<p align="center">
<a href="https://github.com/projectdiscovery/nuclei-templates/issues"><img src="https://img.shields.io/badge/contributions-welcome-brightgreen.svg?style=flat"></a>
<a href="https://github.com/projectdiscovery/nuclei-templates/releases"><img src="https://img.shields.io/github/release/projectdiscovery/nuclei-templates"></a>
<a href="https://twitter.com/pdnuclei"><img src="https://img.shields.io/twitter/follow/pdnuclei.svg?logo=twitter"></a>
<a href="https://discord.gg/projectdiscovery"><img src="https://img.shields.io/discord/695645237418131507.svg?logo=discord"></a>
</p>
<p align="center">
<a href="https://nuclei.projectdiscovery.io/templating-guide/">ドキュメント</a>
<a href="#-contributions">貢献</a>
<a href="#-discussion">ディスカッション</a>
<a href="#-community">コミュニティ</a>
<a href="https://nuclei.projectdiscovery.io/faq/templates/">FAQs</a>
<a href="https://discord.gg/projectdiscovery">Discordに参加</a>
</p>
<p align="center">
<a href="https://github.com/projectdiscovery/nuclei-templates/blob/master/README.md">English</a>
<a href="https://github.com/projectdiscovery/nuclei-templates/blob/master/README_KR.md">한국어</a>
<a href="https://github.com/projectdiscovery/nuclei-templates/blob/master/README_JP.md">日本語</a>
</p>
----
テンプレートは、実際のスキャンエンジンを動作させる[nucleiスキャナー](https://github.com/projectdiscovery/nuclei)のコアです。
このリポジトリは、私たちのチームが提供するテンプレートや、コミュニティからの貢献によるさまざまなテンプレートを保存・管理します。
テンプレートのリストを増やすために、**プルリクエスト**や[Github issues](https://github.com/projectdiscovery/nuclei-templates/issues/new?assignees=&labels=&template=submit-template.md&title=%5Bnuclei-template%5D+)を通じて貢献していただけると幸いです。
## Nuclei テンプレートの概要
Nucleiテンプレートプロジェクトの概要であり、ユニークなタグ、著者、ディレクトリ、重大度、テンプレートの種類に関する統計を含みます。以下の表は、各マトリックスのトップ10の統計を示しています。拡張バージョンは[こちら](TEMPLATES-STATS.md)で確認でき、[JSON](TEMPLATES-STATS.json)形式でも利用可能です。
<table>
<tr>
<td>
## Nuclei テンプレート トップ10統計
| タグ | 数 | 著者 | 数 | ディレクトリ | 数 | 重大度 | 数 | 種類 | 数 |
|-----------|-------|---------------|-------|------------------|-------|----------|-------|---------|-------|
| cve | 1325 | daffainfo | 629 | cves | 1306 | info | 1398 | http | 3644 |
| panel | 604 | dhiyaneshdk | 509 | exposed-panels | 613 | high | 955 | file | 76 |
| lfi | 490 | pikpikcu | 322 | vulnerabilities | 506 | medium | 784 | network | 50 |
| xss | 451 | pdteam | 269 | technologies | 273 | critical | 445 | dns | 17 |
| wordpress | 409 | geeknik | 187 | exposures | 254 | low | 211 | | |
| exposure | 360 | dwisiswant0 | 169 | token-spray | 230 | unknown | 7 | | |
| cve2021 | 324 | 0x_akoko | 157 | misconfiguration | 210 | | | | |
| rce | 319 | princechaddha | 149 | workflows | 187 | | | | |
| wp-plugin | 304 | pussycat0x | 130 | default-logins | 102 | | | | |
| tech | 286 | gy741 | 126 | file | 76 | | | | |
**286個のディレクトリ、4012個のファイル**。
</td>
</tr>
</table>
📖 ドキュメント
-----
新しいテンプレートやカスタムテンプレートを作成するための詳細なドキュメントは、https://nuclei.projectdiscovery.io で確認できます。作業方法を理解するためのテンプレートも用意しています。
💪 貢献
-----
Nucleiテンプレートはコミュニティの貢献によって動作します。
[テンプレートの貢献](https://github.com/projectdiscovery/nuclei-templates/issues/new?assignees=&labels=&template=submit-template.md&title=%5Bnuclei-template%5D+)、[機能リクエスト](https://github.com/projectdiscovery/nuclei-templates/issues/new?assignees=&labels=&template=feature_request.md&title=%5BFeature%5D+)、[バグ報告](https://github.com/projectdiscovery/nuclei-templates/issues/new?assignees=&labels=&template=bug_report.md&title=%5BBug%5D+)はいつでも歓迎します。
![Alt](https://repobeats.axiom.co/api/embed/55ee65543bb9a0f9c797626c4e66d472a517d17c.svg "Repobeats analytics image")
💬 ディスカッション
-----
質問、疑問、アイデアを話し合いたいですか?
[Github discussions](https://github.com/projectdiscovery/nuclei-templates/discussions)で自由に始めることができます。
👨‍💻 コミュニティ
-----
プロジェクトの管理者と直接話し合い、セキュリティや自動化に関することを他の人と共有するために、[Discord Community](https://discord.gg/projectdiscovery)に参加することを歓迎します。さらに、Nucleiに関するすべての情報を更新するために、[Twitter](https://twitter.com/pdnuclei)をフォローすることもできます。
<p align="center">
<a href="https://github.com/projectdiscovery/nuclei-templates/graphs/contributors">
<img src="https://contrib.rocks/image?repo=projectdiscovery/nuclei-templates&max=300">
</a>
</p>
皆さんの貢献とコミュニティの活性化への努力に感謝します。
:heart:

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

View File

@ -1,12 +1,12 @@
| TAG | COUNT | AUTHOR | COUNT | DIRECTORY | COUNT | SEVERITY | COUNT | TYPE | COUNT |
|-----------|-------|--------------|-------|----------------------|-------|----------|-------|------|-------|
| cve | 1855 | dhiyaneshdk | 835 | http | 5860 | info | 2857 | file | 123 |
| panel | 896 | dwisiswant0 | 794 | workflows | 190 | high | 1270 | dns | 18 |
| wordpress | 781 | daffainfo | 664 | file | 123 | medium | 1042 | | |
| exposure | 677 | pikpikcu | 353 | network | 93 | critical | 704 | | |
| wp-plugin | 672 | pdteam | 278 | dns | 18 | low | 216 | | |
| xss | 646 | pussycat0x | 240 | ssl | 12 | unknown | 26 | | |
| osint | 639 | geeknik | 220 | headless | 9 | | | | |
| tech | 602 | ricardomaia | 215 | TEMPLATES-STATS.json | 1 | | | | |
| edb | 596 | ritikchaddha | 210 | contributors.json | 1 | | | | |
| lfi | 548 | 0x_akoko | 179 | cves.json | 1 | | | | |
| TAG | COUNT | AUTHOR | COUNT | DIRECTORY | COUNT | SEVERITY | COUNT | TYPE | COUNT |
|-----------|-------|---------------|-------|------------|-------|----------|-------|------|-------|
| cve | 2604 | dhiyaneshdk | 1360 | http | 7723 | info | 3802 | file | 402 |
| panel | 1173 | daffainfo | 864 | file | 402 | high | 1843 | dns | 25 |
| wordpress | 1008 | dwisiswant0 | 803 | workflows | 192 | medium | 1588 | | |
| exposure | 971 | pussycat0x | 447 | network | 137 | critical | 1083 | | |
| xss | 919 | ritikchaddha | 393 | cloud | 134 | low | 272 | | |
| wp-plugin | 878 | pikpikcu | 353 | code | 81 | unknown | 41 | | |
| osint | 805 | princechaddha | 303 | javascript | 61 | | | | |
| tech | 703 | pdteam | 297 | ssl | 29 | | | | |
| lfi | 685 | ricardomaia | 241 | dast | 25 | | | | |
| misconfig | 678 | geeknik | 231 | dns | 22 | | | | |

View File

@ -0,0 +1,41 @@
id: acm-cert-expired
info:
name: Expired ACM Certificates
author: princechaddha
severity: high
description: |
Ensure removal of expired SSL/TLS certificates in AWS Certificate Manager to comply with Amazon Security Best Practices.
impact: |
Expired certificates can lead to service interruptions and expose applications to man-in-the-middle attacks.
remediation: |
Regularly review ACM for expired certificates and delete them or replace with updated versions.
reference:
- https://docs.aws.amazon.com/acm/latest/userguide/acm-certificate.html
tags: cloud,devops,aws,amazon,acm,aws-cloud-config
variables:
region: "us-east-1"
self-contained: true
code:
- engine:
- sh
- bash
source: |
aws acm list-certificates --region $region --certificate-statuses EXPIRED
matchers:
- type: word
words:
- 'CertificateArn'
extractors:
- type: json
name: certificatearn
json:
- '.CertificateSummaryList[] | .CertificateArn'
- type: dsl
dsl:
- 'region + " AWS region have expired SSL/TLS certificates"'
# digest: 490a00463044022020875df0814bb41d33d015a50a6a2d23309be5b695bad8ba9840f77e139f719b02205052abd88786969a3d7dcc2594b881841f82308df082a71df3b221085d1e9ceb:922c64590222798bb761d5b6d8e72950

View File

@ -0,0 +1,58 @@
id: acm-cert-renewal-30days
info:
name: ACM Certificates Pre-expiration Renewal
author: princechaddha
severity: medium
description: |
Ensure AWS ACM SSL/TLS certificates are renewed at least 30 days before expiration to prevent service disruptions.
impact: |
Failure to renew certificates timely may lead to expired certificates causing service access issues or downtimes.
remediation: |
Set up Amazon CloudWatch to monitor ACM certificate expiration and automate renewal notifications or processes.
reference:
- https://docs.aws.amazon.com/acm/latest/userguide/acm-renewal.html
metadata:
max-request: 2
tags: cloud,devops,aws,amazon,acm,aws-cloud-config
variables:
region: "us-east-1"
flow: |
code(1)
for(let arns of iterate(template.certificatearns)){
set("certificatearn", arns)
code(2)
}
self-contained: true
code:
- engine:
- sh
- bash
source: |
aws acm list-certificates --region $region --certificate-statuses ISSUED --query 'CertificateSummaryList[*].CertificateArn' --output json
extractors:
- type: json
name: certificatearns
internal: true
json:
- '.CertificateSummaryList[] | .CertificateArn'
- engine:
- sh
- bash
source: |
aws acm describe-certificate --region $region --certificate-arn $certificatearn --query 'Certificate.[NotAfter, CertificateArn]' --output json | jq -r 'select((.[0] | fromdateiso8601 | mktime) - (now | mktime) < (30 * 86400)) | .[1]'
extractors:
- type: regex # type of the extractor
name: certificate
internal: true
regex:
- '^arn.*'
- type: dsl
dsl:
- '"The AWS ACM Certificate " + certificate +" is about to expire in 30 days"'
# digest: 4b0a00483046022100bc7d6e62968fc709c8201354d29b61784664ef5c5ebed70a6a8b305447b93725022100bad54d48aab6fdd1356608d1940730ea10536641398de6172861695612abd412:922c64590222798bb761d5b6d8e72950

View File

@ -0,0 +1,58 @@
id: acm-cert-renewal-45days
info:
name: ACM Certificates Pre-expiration Renewal
author: princechaddha
severity: medium
description: |
Ensure AWS ACM SSL/TLS certificates are renewed at least 45 days before expiration to prevent service disruptions.
impact: |
Failure to renew certificates timely may lead to expired certificates causing service access issues or downtimes.
remediation: |
Set up Amazon CloudWatch to monitor ACM certificate expiration and automate renewal notifications or processes.
reference:
- https://docs.aws.amazon.com/acm/latest/userguide/acm-renewal.html
metadata:
max-request: 2
tags: cloud,devops,aws,amazon,acm,aws-cloud-config
variables:
region: "us-east-1"
flow: |
code(1)
for(let arns of iterate(template.certificatearns)){
set("certificatearn", arns)
code(2)
}
self-contained: true
code:
- engine:
- sh
- bash
source: |
aws acm list-certificates --region $region --certificate-statuses ISSUED --query 'CertificateSummaryList[*].CertificateArn' --output json
extractors:
- type: json
name: certificatearns
internal: true
json:
- '.CertificateSummaryList[] | .CertificateArn'
- engine:
- sh
- bash
source: |
aws acm describe-certificate --region $region --certificate-arn $certificatearn --query 'Certificate.[NotAfter, CertificateArn]' --output json | jq -r 'select((.[0] | fromdateiso8601 | mktime) - (now | mktime) < (45 * 86400)) | .[1]'
extractors:
- type: regex # type of the extractor
name: certificate
internal: true
regex:
- '^arn.*'
- type: dsl
dsl:
- '"The AWS ACM Certificate " + certificate +" is about to expire in 30 days"'
# digest: 490a0046304402202b2fedb03a19db3f9d0f87fdc3982c926a2478e6e2903d2fbb55b63561d3a29c0220337c43e0512cc540287235d9f3489fb5af0dc783ae118c4341c27e2812a8d8c7:922c64590222798bb761d5b6d8e72950

View File

@ -0,0 +1,57 @@
id: acm-cert-validation
info:
name: ACM Certificate Validation Check
author: princechaddha
severity: medium
description: |
Ensure ACM SSL/TLS certificates are properly validated during issue or renewal, indicating secure communication channels.
impact: |
Lack of validation may allow unauthorized certificates, leading to potential man-in-the-middle attacks or data breaches.
remediation: |
Use AWS ACM for certificate provisioning and ensure domain validation steps are correctly followed for each certificate issued or renewed.
reference:
- https://docs.aws.amazon.com/acm/latest/userguide/gs-acm-validate.html
metadata:
max-request: 2
tags: cloud,devops,aws,amazon,acm,aws-cloud-config
variables:
region: "us-east-1"
flow: |
code(1)
for(let arns of iterate(template.certificatearns)){
set("certificatearn", arns)
code(2)
}
self-contained: true
code:
- engine:
- sh
- bash
source: |
aws acm list-certificates --region $region --output json
extractors:
- type: json
name: certificatearns
internal: true
json:
- '.CertificateSummaryList[] | .CertificateArn'
- engine:
- sh
- bash
source: |
aws acm describe-certificate --region $region --certificate-arn $certificatearn --query 'Certificate.Status'
matchers:
- type: word
words:
- "PENDING_VALIDATION"
extractors:
- type: dsl
dsl:
- '"The issue/renewal request for " + certificatearn + " SSL/TLS certificate was not validated"'
# digest: 4a0a0047304502210092b18eb3a24d6dea12fc385763c84745bf8201424ef620661e9c9fbb1b3b513a02201dc10c6f007cea631d51e81c2b6c883bf6c530a4de13398dea1c605b4a925714:922c64590222798bb761d5b6d8e72950

View File

@ -0,0 +1,57 @@
id: acm-wildcard-cert
info:
name: Wildcard ACM Certificate Usage
author: princechaddha
severity: low
description: |
Ensure ACM certificates for specific domain names are used over wildcard certificates to adhere to best security practices, providing unique private keys for each domain/subdomain.
impact: |
Using wildcard certificates can expose your AWS environment to increased risk by potentially allowing unauthorized subdomains to be protected under the same certificate, reducing the granularity of access control and increasing the blast radius in the event of a key compromise.
remediation: |
Replace wildcard ACM certificates with single domain name certificates for each domain/subdomain within your AWS account. This enhances security by ensuring each domain/subdomain has its own unique private key and certificate.
reference:
- https://docs.aws.amazon.com/acm/latest/userguide/acm-certificate.html
metadata:
max-request: 2
tags: cloud,devops,aws,amazon,acm,aws-cloud-config
variables:
region: "us-east-1"
flow: |
code(1)
for(let arns of iterate(template.certificatearns)){
set("certificatearn", arns)
code(2)
}
self-contained: true
code:
- engine:
- sh
- bash
source: |
aws acm list-certificates --region $region --certificate-statuses ISSUED --query 'CertificateSummaryList[*].CertificateArn' --output json
extractors:
- type: json
name: certificatearns
internal: true
json:
- '.[]'
- engine:
- sh
- bash
source: |
aws acm describe-certificate --region $region --certificate-arn $certificatearn --query 'Certificate.DomainName'
matchers:
- type: word
words:
- "*."
extractors:
- type: dsl
dsl:
- 'certificatearn + " AWS ACM certificate is a wildcard certificate"'
# digest: 4a0a00473045022078c25c2aeb4e1ecb7851bfcf3e176bbd0eff547432a2a5ec04d150b1c3fbfdaf022100b3e428a513082fb7357f95e92309d0dfe47823bc6eb40cc403cc2836756ccd60:922c64590222798bb761d5b6d8e72950

View File

@ -0,0 +1,55 @@
id: aws-code-env
info:
name: AWS Cloud Environment Validation
author: princechaddha
severity: info
description: |
Checks if AWS CLI is set up and all necessary tools are installed on the environment.
reference:
- https://aws.amazon.com/cli/
metadata:
max-request: 2
tags: cloud,devops,aws,amazone,aws-cloud-config
variables:
region: "us-east-1"
flow: code(1) && code(2)
self-contained: true
code:
- engine:
- sh
- bash
source: |
aws sts get-caller-identity --output json
matchers:
- type: word
internal: true
words:
- '"UserId"'
extractors:
- type: json
name: account
internal: true
json:
- '.Account'
- engine:
- sh
- bash
source: |
jq --version >/dev/null 2>&1 && echo "jq is installed." || echo "jq is not installed."
matchers:
- type: word
words:
- "jq is installed"
extractors:
- type: dsl
dsl:
- '"AWS CLI is properly configured for account \"" + account + "\" and all the necessary tools required are installed"'
# digest: 4b0a00483046022100c79a6583acb05a00dfa742962972031f8c42ae9ce85aabc1c9edb1ae7ebd9368022100b98762cb406a952a4115e28bb639f0d16d02e0b737012da638e3bf3f5d73f5f5:922c64590222798bb761d5b6d8e72950

View File

@ -0,0 +1,57 @@
id: cloudtrail-data-events
info:
name: CloudTrail S3 Data Events Logging
author: princechaddha
severity: low
description: |
Ensure Amazon CloudTrail trails log S3 data events to monitor object-level operations like GetObject, DeleteObject, and PutObject.
impact: |
Without logging S3 data events, you lose visibility into object-level operations which could help detect unauthorized access or modifications.
remediation: |
Enable data event logging in CloudTrail for S3 buckets to ensure detailed activity monitoring and logging for better security and compliance.
reference:
- https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-data-events-with-cloudtrail.html
metadata:
max-request: 2
tags: cloud,devops,aws,amazon,s3,cloudtrail,aws-cloud-config
variables:
region: "ap-south-1"
flow: |
code(1)
for(let CloudTrail of iterate(template.cloudtrailname)){
set("trail", CloudTrail)
code(2)
}
self-contained: true
code:
- engine:
- sh
- bash
source: |
aws cloudtrail list-trails --region $region --query 'Trails[*].Name' --output json
extractors:
- type: json
name: cloudtrailname
internal: true
json:
- '.[]'
- engine:
- sh
- bash
source: |
aws cloudtrail get-event-selectors --region $region --trail-name $trail --query 'EventSelectors[*].DataResources[]'
matchers:
- type: word
words:
- "[]"
extractors:
- type: dsl
dsl:
- '"CloudTrail trail" + trail + " is not configured to capture resource operations performed on or within an AWS cloud resource"'
# digest: 4b0a00483046022100da87f9b597db66bbcf87384782b53d2b838ad5c8b6c89924afc2607aa6c92bdf022100849208d4cb009645e9a5d9bf73dd7dfa351b390b23991bffa72a85d99ca0ac4c:922c64590222798bb761d5b6d8e72950

View File

@ -0,0 +1,57 @@
id: cloudtrail-disabled
info:
name: CloudTrail Disabled
author: princechaddha
severity: high
description: |
Ensures AWS CloudTrail is enabled in all regions to monitor and record account activity across your AWS infrastructure, enhancing security and compliance.
impact: |
Lack of region-wide CloudTrail logging can lead to insufficient visibility into account activities, hindering anomaly detection and forensic analysis.
remediation: |
Enable CloudTrail in all AWS regions through the AWS Management Console or CLI to ensure comprehensive activity logging and monitoring.
reference:
- https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-getting-started.html
metadata:
max-request: 2
tags: cloud,devops,aws,amazon,cloudtrail,aws-cloud-config
variables:
region: "ap-south-1"
flow: |
code(1)
for(let CloudTrail of iterate(template.cloudtrailname)){
set("trail", CloudTrail)
code(2)
}
self-contained: true
code:
- engine:
- sh
- bash
source: |
aws cloudtrail list-trails --region $region --query 'Trails[*].Name' --output json
extractors:
- type: json
name: cloudtrailname
internal: true
json:
- '.[]'
- engine:
- sh
- bash
source: |
aws cloudtrail describe-trails --region $region --trail-name-list $trail --query 'trailList[*].IsMultiRegionTrail'
matchers:
- type: word
words:
- "false"
extractors:
- type: dsl
dsl:
- '"CloudTrail trail" + trail + " is not configured to receive log files from all the AWS cloud regions"'
# digest: 4a0a00473045022100a7330af1aa9ad989dc95304b0e71f8479849de9782179443c3b7caf9d9373add022034c783da46b9b3b530bbb04d08b70e1803c5d298104e3d65659addd1a8c839d9:922c64590222798bb761d5b6d8e72950

View File

@ -0,0 +1,57 @@
id: cloudtrail-dup-logs
info:
name: CloudTrail Duplicate Log Avoidance
author: princechaddha
severity: medium
description: |
Ensure CloudTrail logging is configured to prevent duplicate recording of global service events across multiple trails.
impact: |
Duplicate log entries can lead to increased storage costs and complicate log analysis and anomaly detection efforts.
remediation: |
Configure only one multi-region trail to log global service events and disable global service logging for all other trails.
reference:
- https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-find-log-files.html
metadata:
max-request: 2
tags: cloud,devops,aws,amazon,cloudtrail,aws-cloud-config
variables:
region: "ap-south-1"
flow: |
code(1)
for(let CloudTrail of iterate(template.cloudtrailname)){
set("trail", CloudTrail)
code(2)
}
self-contained: true
code:
- engine:
- sh
- bash
source: |
aws cloudtrail list-trails --region $region --query 'Trails[*].Name' --output json
extractors:
- type: json
name: cloudtrailname
internal: true
json:
- '.[]'
- engine:
- sh
- bash
source: |
aws cloudtrail describe-trails --region $region --trail-name-list $trail --query 'trailList[*].IncludeGlobalServiceEvents' --output json
matchers:
- type: word
words:
- "true"
extractors:
- type: dsl
dsl:
- '"Ensure only one trail in Amazon CloudTrail is configured for global service events to avoid duplicates: " + trail'
# digest: 4b0a00483046022100c35edad75ea1ac20bfb4e2cbe8b2b4e8fc3b29c40e7ff611808957ab6d83f303022100a77f7c148769b6ca2d6277298d4a5269e1bb2092f609f67cef8e8152a67f02eb:922c64590222798bb761d5b6d8e72950

View File

@ -0,0 +1,57 @@
id: cloudtrail-global-disabled
info:
name: CloudTrail Global Events Enablement
author: princechaddha
severity: high
description: |
Ensure Amazon CloudTrail trails are configured to capture both regional and global API activity for enhanced security and compliance in your AWS account.
impact: |
Lacking global event logging reduces visibility across AWS services that operate at the global level, potentially missing critical security and compliance data.
remediation: |
Enable global service logging in CloudTrail by creating or updating a trail to include global services. This ensures comprehensive activity monitoring.
reference:
- https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-create-and-update-a-trail.html
metadata:
max-request: 2
tags: cloud,devops,aws,amazon,cloudtrail,aws-cloud-config
variables:
region: "ap-south-1"
flow: |
code(1)
for(let CloudTrail of iterate(template.cloudtrailname)){
set("trail", CloudTrail)
code(2)
}
self-contained: true
code:
- engine:
- sh
- bash
source: |
aws cloudtrail list-trails --region $region --query 'Trails[*].Name' --output json
extractors:
- type: json
name: cloudtrailname
internal: true
json:
- '.[]'
- engine:
- sh
- bash
source: |
aws cloudtrail describe-trails --region $region --trail-name-list $trail --query 'trailList[*].IncludeGlobalServiceEvents'
matchers:
- type: word
words:
- "false"
extractors:
- type: dsl
dsl:
- '"CloudTrail trail" + trail + " is not configured to record API calls for AWS global services"'
# digest: 4b0a00483046022100f10c2c9b4cb87ac0e4d1bdcdbf1f22db6d84b775136499410fe1fd92ba1ad9c5022100eecaa6515470a95ff633ad2df025ded9d8c20f051189a648b1f862861ceb3599:922c64590222798bb761d5b6d8e72950

View File

@ -0,0 +1,57 @@
id: cloudtrail-integrated-cloudwatch
info:
name: CloudTrail CloudWatch Integration
author: princechaddha
severity: medium
description: |
Ensure Amazon CloudTrail logs are integrated with CloudWatch Logs for real-time monitoring and analysis.
impact: |
Without integration, detecting and responding to critical events or unauthorized actions within AWS environment could be delayed.
remediation: |
Enable CloudTrail log file validation and configure CloudWatch Logs to monitor CloudTrail log files. Create CloudWatch Alarms for specific events of interest.
reference:
- https://docs.aws.amazon.com/awscloudtrail/latest/userguide/send-cloudtrail-events-to-cloudwatch-logs.html
metadata:
max-request: 2
tags: cloud,devops,aws,amazon,cloudtrail,cloudwatch,aws-cloud-config
variables:
region: "ap-south-1"
flow: |
code(1)
for(let CloudTrail of iterate(template.cloudtrailname)){
set("trail", CloudTrail)
code(2)
}
self-contained: true
code:
- engine:
- sh
- bash
source: |
aws cloudtrail list-trails --region $region --query 'Trails[*].Name' --output json
extractors:
- type: json
name: cloudtrailname
internal: true
json:
- '.[]'
- engine:
- sh
- bash
source: |
aws cloudtrail describe-trails --region $region --trail-name-list $trail --query 'trailList[*].CloudWatchLogsLogGroupArn'
matchers:
- type: word
words:
- "[]"
extractors:
- type: dsl
dsl:
- '"CloudTrail trail" + trail + " is not configured to send events to CloudWatch Logs for monitoring purposes"'
# digest: 4a0a004730450221008bdf150f8abb8be1e258c067aae73857443f219a130cf41d0cc3d9c0c6d45ab302205479a358041954f9d0aa04b2145860008c3732d303a381268f0c31a0148495dd:922c64590222798bb761d5b6d8e72950

View File

@ -0,0 +1,57 @@
id: cloudtrail-log-integrity
info:
name: CloudTrail Log Integrity Validation not Enabled
author: princechaddha
severity: high
description: |
Ensure CloudTrail log file integrity validation is enabled to detect unauthorized file modifications.
impact: |
Without log file integrity validation, it's harder to detect if CloudTrail logs have been tampered with, potentially hiding malicious activity.
remediation: |
Enable log file integrity validation on all CloudTrail trails to ensure the integrity and authenticity of your logs.
reference:
- https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-log-file-validation-intro.html
metadata:
max-request: 2
tags: cloud,devops,aws,amazon,cloudtrail,aws-cloud-config
variables:
region: "ap-south-1"
flow: |
code(1)
for(let CloudTrail of iterate(template.cloudtrailname)){
set("trail", CloudTrail)
code(2)
}
self-contained: true
code:
- engine:
- sh
- bash
source: |
aws cloudtrail list-trails --region $region --query 'Trails[*].Name' --output json
extractors:
- type: json
name: cloudtrailname
internal: true
json:
- '.[]'
- engine:
- sh
- bash
source: |
aws cloudtrail describe-trails --region $region --trail-name-list $trail --query 'trailList[*].LogFileValidationEnabled'
matchers:
- type: word
words:
- "false"
extractors:
- type: dsl
dsl:
- '"The log file integrity validation is not enabled for CloudTrail trail" + trail'
# digest: 4a0a00473045022100e301d2ce8df52b0170dbbbee6ca44cc69ea46fd81c0ff3dd3264dc81a8548c2402206321af47afdb4655e6ed862dbdc015d73cf98840e24c43636f0a2a28e2feb81c:922c64590222798bb761d5b6d8e72950

View File

@ -0,0 +1,64 @@
id: cloudtrail-logs-not-encrypted
info:
name: CloudTrail Logs Not Encrypted
author: princechaddha
severity: medium
description: |
Ensure Amazon CloudTrail logs are encrypted at rest using AWS Key Management Service (KMS) to secure log data.
impact: |
Non-encrypted CloudTrail logs pose a risk of unauthorized access, compromising the integrity and confidentiality of log data.
remediation: |
Enable Server-Side Encryption (SSE) for CloudTrail logs using an AWS KMS key through the CloudTrail console or AWS CLI.
reference:
- https://docs.aws.amazon.com/awscloudtrail/latest/userguide/encrypting-cloudtrail-log-files-with-aws-kms.html
metadata:
max-request: 2
tags: cloud,devops,aws,amazon,cloudtrail,aws-cloud-config
variables:
region: "us-east-1"
flow: |
code(1)
for(let CloudTrail of iterate(template.cloudtrailname)){
set("trail", CloudTrail)
set("region", template.trailregion)
code(2)
}
self-contained: true
code:
- engine:
- sh
- bash
source: |
aws cloudtrail list-trails --region $region --query 'Trails[*].[Name, HomeRegion]' --output json
extractors:
- type: json
name: cloudtrailname
internal: true
json:
- '.[] | .[0]'
- type: json
name: trailregion
internal: true
json:
- '.[] | .[1]'
- engine:
- sh
- bash
source: |
aws cloudtrail describe-trails --region $region --trail-name-list $trail --query 'trailList[*].KmsKeyId'
matchers:
- type: word
words:
- "[]"
extractors:
- type: dsl
dsl:
- '"CloudTrail trail " + trail + " is not configured to encrypt log files using SSE-KMS encryption"'
# digest: 4a0a00473045022100fb8aa2e414f88294926325f90076733d4a7d4af4ac18c47b9b82564412f5a2250220104bc5c6dcda1248db44229720dda05561319e3549bb6437ea1c97c6c099421c:922c64590222798bb761d5b6d8e72950

View File

@ -0,0 +1,74 @@
id: cloudtrail-mfa-delete
info:
name: CloudTrail MFA Delete
author: princechaddha
severity: high
description: |
Ensure Amazon CloudTrail buckets have MFA Delete enabled to protect log file deletion.
impact: |
Prevents unauthorized deletion of CloudTrail logs, enhancing security and compliance.
remediation: |
Enable MFA Delete on CloudTrail buckets via the S3 console or AWS CLI.
reference:
- https://docs.aws.amazon.com/AmazonS3/latest/userguide/MultiFactorAuthenticationDelete.html
metadata:
max-request: 3
tags: cloud,devops,aws,amazon,s3,aws-cloud-config
variables:
region: "ap-south-1"
flow: |
code(1)
for(let CloudTrail of iterate(template.cloudtrailname)){
set("trail", CloudTrail)
code(2)
for(let BucketNames of iterate(template.buckets)){
set("bucket", BucketNames)
code(3)
}
}
self-contained: true
code:
- engine:
- sh
- bash
source: |
aws cloudtrail list-trails --region $region --query 'Trails[*].Name' --output json
extractors:
- type: json
name: cloudtrailname
internal: true
json:
- '.[]'
- engine:
- sh
- bash
source: |
aws cloudtrail describe-trails --region $region --trail-name-list $trail --query 'trailList[*].S3BucketName'
extractors:
- type: json
name: buckets
internal: true
json:
- '.[]'
- engine:
- sh
- bash
source: |
aws s3api get-bucket-versioning --bucket $bucket --query 'MFADelete'
matchers:
- type: word
words:
- 'null'
extractors:
- type: dsl
dsl:
- '"The MFA Delete feature is not enabled for the S3 bucket " + bucket + " associated with the CloudTrail " + trail'
# digest: 4a0a00473045022003bb18e55eae6aa19233a988216a85ab85d1321a68dee66dc295ce19735d9900022100bbfbf82a13f2a4e5693299287c29e50507941e1576d01425abdb7a5b0e68f775:922c64590222798bb761d5b6d8e72950

View File

@ -0,0 +1,57 @@
id: cloudtrail-mgmt-events
info:
name: CloudTrail Management Events Logging Not Enabled
author: princechaddha
severity: medium
description: |
Ensures Amazon CloudTrail trails are configured to log management events, capturing crucial API calls and console actions for security and audit purposes.
impact: |
Failure to log management events can lead to insufficient audit trails, hindering the ability to investigate and respond to suspicious activities.
remediation: |
Enable management event logging in CloudTrail by creating a new trail or updating existing trails to include management events.
reference:
- https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-management-and-data-events-with-cloudtrail.html
metadata:
max-request: 2
tags: cloud,devops,aws,amazon,cloudtrail,aws-cloud-config
variables:
region: "ap-south-1"
flow: |
code(1)
for(let CloudTrail of iterate(template.cloudtrailname)){
set("trail", CloudTrail)
code(2)
}
self-contained: true
code:
- engine:
- sh
- bash
source: |
aws cloudtrail list-trails --region $region --query 'Trails[*].Name' --output json
extractors:
- type: json
name: cloudtrailname
internal: true
json:
- '.[]'
- engine:
- sh
- bash
source: |
aws cloudtrail get-event-selectors --region $region --trail-name $trail --query 'EventSelectors[*].IncludeManagementEvents'
matchers:
- type: word
words:
- "false"
extractors:
- type: dsl
dsl:
- '"CloudTrail trail" + trail + " is not configured to capture management operations performed on your AWS cloud resources"'
# digest: 4a0a00473045022100f0879bcbe45c9ed0c8921338f6384c009e9a4e2b4e9b8199e3b462fcb93ca7bb02202ba77a0927be3707abc226f4b5d0c4116cd8f2b4d463e8f822e8defbe7934e4e:922c64590222798bb761d5b6d8e72950

View File

@ -0,0 +1,57 @@
id: cloudtrail-public-buckets
info:
name: Public CloudTrail Buckets
author: princechaddha
severity: critical
description: |
Identifies AWS CloudTrail S3 buckets that are publicly accessible, risking exposure of sensitive log data.
impact: |
Unauthorized access to CloudTrail logs can lead to data leakage, compromising the integrity and confidentiality of cloud operations.
remediation: |
Restrict S3 bucket access using bucket policies or IAM policies to ensure that CloudTrail logs are not publicly accessible.
reference:
- https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-find-log-files.html
metadata:
max-request: 2
tags: cloud,devops,aws,amazon,s3,aws-cloud-config
variables:
region: "ap-south-1"
flow: |
code(1)
for(let CloudTrail of iterate(template.cloudtrailname)){
set("trail", CloudTrail)
code(2)
}
self-contained: true
code:
- engine:
- sh
- bash
source: |
aws cloudtrail list-trails --region $region --query 'Trails[*].Name' --output json
extractors:
- type: json
name: cloudtrailname
internal: true
json:
- '.[]'
- engine:
- sh
- bash
source: |
aws cloudtrail describe-trails --region $region --trail-name-list $trail --query 'trailList[*].IncludeGlobalServiceEvents'
matchers:
- type: word
words:
- "false"
extractors:
- type: dsl
dsl:
- '"CloudTrail trail" + trail + " is not configured to record API calls for AWS global services"'
# digest: 4a0a004730450220153c8058c6e3274fd6caf2b309baa876492c64fa5978590b21938000e9416aa6022100faaf8886e0deb971d17b2f325fc402814e59ce66ff16ea343543e3b6b3f13773:922c64590222798bb761d5b6d8e72950

View File

@ -0,0 +1,74 @@
id: cloudtrail-s3-bucket-logging
info:
name: CloudTrail S3 Logging
author: princechaddha
severity: high
description: |
Ensure AWS CloudTrail logs are captured in S3 buckets with Server Access Logging enabled for audit and forensic purposes.
impact: |
Without S3 Server Access Logging for CloudTrail, tracking unauthorized access or modifications to CloudTrail logs becomes difficult, impacting incident response and forensic analysis.
remediation: |
Enable Server Access Logging on the S3 bucket used by CloudTrail. Configure the logging feature to capture all requests made to the CloudTrail bucket.
reference:
- https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-log-file-validation-intro.html
metadata:
max-request: 3
tags: cloud,devops,aws,amazon,s3,cloudtrail,aws-cloud-config
variables:
region: "ap-south-1"
flow: |
code(1)
for(let CloudTrail of iterate(template.cloudtrailname)){
set("trail", CloudTrail)
code(2)
for(let BucketNames of iterate(template.buckets)){
set("bucket", BucketNames)
code(3)
}
}
self-contained: true
code:
- engine:
- sh
- bash
source: |
aws cloudtrail list-trails --region $region --query 'Trails[*].Name' --output json
extractors:
- type: json
name: cloudtrailname
internal: true
json:
- '.[]'
- engine:
- sh
- bash
source: |
aws cloudtrail describe-trails --region $region --trail-name-list $trail --query 'trailList[*].S3BucketName'
extractors:
- type: json
name: buckets
internal: true
json:
- '.[]'
- engine:
- sh
- bash
source: |
aws s3api get-bucket-logging --bucket $bucket --query 'LoggingEnabled'
matchers:
- type: word
words:
- 'null'
extractors:
- type: dsl
dsl:
- '"Access logging is not enabled for the S3 bucket associated with CloudTrail trail " + trail'
# digest: 4a0a00473045022100bfe94b20d18063458c694381cd23f96dd8023473e8b9e8151922295b88bff033022044b9f7a79baa2caa0d4ae5406a2701c73c77ddc43da72190b32f1e6ec1fa21ca:922c64590222798bb761d5b6d8e72950

View File

@ -0,0 +1,75 @@
id: s3-object-lock-not-enabled
info:
name: CloudTrail S3 Object Lock
author: princechaddha
severity: medium
description: |
Ensure Amazon CloudTrail S3 buckets have Object Lock enabled to prevent log deletion and ensure regulatory compliance.
impact: |
Without Object Lock, S3 objects such as CloudTrail logs can be deleted, compromising audit trails and violating compliance requirements.
remediation: |
Enable S3 Object Lock in Governance mode with a retention period that meets your compliance requirements for CloudTrail S3 buckets.
reference:
- https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html
metadata:
max-request: 3
tags: cloud,devops,aws,amazon,s3,aws-cloud-config
variables:
region: "ap-south-1"
flow: |
code(1)
for(let CloudTrail of iterate(template.cloudtrailname)){
set("trail", CloudTrail)
code(2)
for(let BucketNames of iterate(template.buckets)){
set("bucket", BucketNames)
code(3)
}
}
self-contained: true
code:
- engine:
- sh
- bash
source: |
aws cloudtrail list-trails --region $region --query 'Trails[*].Name' --output json
extractors:
- type: json
name: cloudtrailname
internal: true
json:
- '.[]'
- engine:
- sh
- bash
source: |
aws cloudtrail describe-trails --region $region --trail-name-list $trail --query 'trailList[*].S3BucketName'
extractors:
- type: json
name: buckets
internal: true
json:
- '.[]'
- engine:
- sh
- bash
source: |
aws s3api get-object-lock-configuration --bucket $bucket --query 'ObjectLockConfiguration.ObjectLockEnabled' --output json
matchers:
- type: word
part: code_3_stderr
words:
- 'ObjectLockConfigurationNotFoundError'
extractors:
- type: dsl
dsl:
- '"The Object Lock feature is not enabled for the S3 bucket associated with the CloudTrail trail " + trail'
# digest: 4a0a00473045022100adf9327a943f74cada1c893502adad96b8db198c24c0211486944503bc818dc202205d41291ad41820b5afe0d7d1eb4061acde307124ff04b588b1cb3fbeec75f54c:922c64590222798bb761d5b6d8e72950

View File

@ -0,0 +1,57 @@
id: cw-alarm-action-set
info:
name: CloudWatch Alarm Action Not Set
author: princechaddha
severity: medium
description: |
Ensure Amazon CloudWatch alarms have actions configured for the ALARM state to automate response to incidents.
impact: |
Without actions, CloudWatch alarms may not trigger automated incident response or notifications, potentially delaying mitigation.
remediation: |
Configure at least one action for each CloudWatch alarm to ensure timely response to monitored issues.
reference:
- https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html
metadata:
max-request: 2
tags: cloud,devops,aws,amazon,cloudwatch,aws-cloud-config
variables:
region: "us-east-1"
flow: |
code(1)
for(let AlarmName of iterate(template.alarms)){
set("alarm", AlarmName)
code(2)
}
self-contained: true
code:
- engine:
- sh
- bash
source: |
aws cloudwatch describe-alarms --region $region --query 'MetricAlarms[].AlarmName' --output json
extractors:
- type: json
name: alarms
internal: true
json:
- '.[]'
- engine:
- sh
- bash
source: |
aws cloudwatch describe-alarms --region $region --alarm-names "$alarm" --query 'MetricAlarms[*].AlarmActions[]' --output json
matchers:
- type: word
words:
- "[]"
extractors:
- type: dsl
dsl:
- '"The Amazon CloudWatch " + alarm +" is not configured with any actions for the ALARM state."'
# digest: 4a0a00473045022100f3558add899cfc87cef41ebadd1b931c1250bf0f7255e53a67e1aa663b37925b02204010a3c40e8a0ad49ac62d537bcf1a2e4da4d59b40ebc78d5c56e03d1f89348d:922c64590222798bb761d5b6d8e72950

View File

@ -0,0 +1,57 @@
id: cw-alarms-actions
info:
name: CloudWatch Alarms Actions Enabled
author: princechaddha
severity: high
description: |
Ensure that all Amazon CloudWatch alarms have actions enabled (ActionEnabled: true) to respond to state changes.
impact: |
Without actions enabled, CloudWatch alarms cannot perform automated actions in response to state changes, potentially missing critical alerts.
remediation: |
Enable actions for each CloudWatch alarm by setting the ActionEnabled parameter to true, allowing for automated responses to alarms.
reference:
- https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html
metadata:
max-request: 2
tags: cloud,devops,aws,amazon,cloudwatch,aws-cloud-config
variables:
region: "us-east-1"
flow: |
code(1)
for(let AlarmName of iterate(template.alarms)){
set("alarm", AlarmName)
code(2)
}
self-contained: true
code:
- engine:
- sh
- bash
source: |
aws cloudwatch describe-alarms --region $region --query 'MetricAlarms[].AlarmName' --output json
extractors:
- type: json
name: alarms
internal: true
json:
- '.[]'
- engine:
- sh
- bash
source: |
aws cloudwatch describe-alarms --region $region --alarm-names "DiskWritesOpsAlarm" --query 'MetricAlarms[*].ActionsEnabled'
matchers:
- type: word
words:
- "false"
extractors:
- type: dsl
dsl:
- '"The Amazon CloudWatch " + alarm + " does not have any active actions configured"'
# digest: 490a0046304402204f22697b5c7a4b568fa37b48600e0f99f469922bdd208491966d4eef4fd6355d02204f33504b85a9de2df430dde270e0f481760be59ca0340bb93c245143558b0444:922c64590222798bb761d5b6d8e72950

View File

@ -0,0 +1,56 @@
id: ec2-imdsv2
info:
name: Enforce IMDSv2 on EC2 Instances
author: princechaddha
severity: medium
description: |
Ensure all EC2 instances use Instance Metadata Service Version 2 (IMDSv2) for enhanced security when requesting instance metadata, protecting against certain types of attacks that target the older version, IMDSv1.
impact: |
Using IMDSv1 can expose EC2 instances to server-side request forgery (SSRF) attacks, potentially allowing attackers to access sensitive instance metadata.
remediation: |
Modify the EC2 instance metadata options to set `HttpTokens` to `required`, enforcing the use of IMDSv2. This can be done via the AWS Management Console, CLI, or EC2 API.
reference:
- https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html
metadata:
max-request: 2
tags: cloud,devops,aws,amazon,ec2,aws-cloud-config
variables:
region: "us-east-1"
flow: |
code(1)
for(let InstancesName of iterate(template.instances)){
set("ec2instance", InstancesName)
code(2)
}
self-contained: true
code:
- engine:
- sh
- bash
source: |
aws ec2 describe-instances --region $region --output table --query 'Reservations[*].Instances[*].InstanceId' --output json
extractors:
- type: json
name: instances
internal: true
json:
- '.[]'
- engine:
- sh
- bash
source: |
aws ec2 describe-instances --region $region --instance-ids $ec2instance --query 'Reservations[*].Instances[*].MetadataOptions.HttpTokens[]'
matchers:
- type: word
words:
- "optional"
extractors:
- type: dsl
dsl:
- 'ami + " is publically shared"'
# digest: 4a0a00473045022014b5f386ded068e3ca4990545da3f49124b5e48e86bea8ea94a380c367e3aeb9022100ed0ecb915d4c1b7be7a7906ffa2a55a2988669e3418301b6886a45df6a57b337:922c64590222798bb761d5b6d8e72950

View File

@ -0,0 +1,57 @@
id: ec2-public-ip
info:
name: Public IP on EC2 Instances
author: princechaddha
severity: unknown
description: |
Ensures Amazon EC2 instances, especially backend ones, do not use public IP addresses to minimize Internet exposure.
impact: |
Instances with public IP addresses are more vulnerable to Internet-based threats, compromising network security.
remediation: |
Restrict public IP assignment for EC2 instances, particularly for backend instances. Use private IPs and manage access via AWS VPC and security groups.
reference:
- https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-instance-addressing.html#concepts-public-addresses
metadata:
max-request: 2
tags: cloud,devops,aws,amazon,ec2,aws-cloud-config
variables:
region: "us-east-1"
flow: |
code(1)
for(let InstancesName of iterate(template.instances)){
set("ec2instance", InstancesName)
code(2)
}
self-contained: true
code:
- engine:
- sh
- bash
source: |
aws ec2 describe-instances --region $region --output json --query 'Reservations[*].Instances[*].InstanceId'
extractors:
- type: json
name: instances
internal: true
json:
- '.[].[]'
- engine:
- sh
- bash
source: |
aws ec2 describe-instances --region $region --instance-ids $ec2instance --query "Reservations[*].Instances[*].NetworkInterfaces[*].Association.IpOwnerId[] | []"
matchers:
- type: word
words:
- "amazon"
extractors:
- type: dsl
dsl:
- '"The Amazon Instance " + ec2instance + " uses public IP addresses"'
# digest: 4a0a00473045022100d6d48dea82c4b3c88a81c6060dbedadb56502f1d2b692dd7d309e67b7d20504602203063ae7dcaa055dc54d9d6f0f534a96feb3966280b2a9004201fc21fe7752964:922c64590222798bb761d5b6d8e72950

View File

@ -0,0 +1,58 @@
id: ec2-sg-egress-open
info:
name: Open Egress in EC2 Security Group
author: princechaddha
severity: high
description: |
Checks for unrestricted outbound/egress rules in Amazon EC2 security groups, highlighting potential over-permissive configurations.
impact: |
Allows unrestricted outbound traffic from EC2 instances, increasing the risk of data exfiltration and malicious external communications.
remediation: |
Restrict egress traffic in EC2 security groups to only necessary IP addresses and ranges, adhering to the Principle of Least Privilege.
reference:
- https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-security-groups.html#sg-rules
metadata:
max-request: 2
tags: cloud,devops,aws,amazon,ec2,aws-cloud-config
variables:
region: "us-east-1"
flow: |
code(1)
for(let SecurityGroup of iterate(template.securitygroups)){
set("groupid", SecurityGroup)
code(2)
}
self-contained: true
code:
- engine:
- sh
- bash
source: |
aws ec2 describe-security-groups --region $region --output json --query 'SecurityGroups[*].GroupId'
extractors:
- type: json
name: securitygroups
internal: true
json:
- '.[]'
- engine:
- sh
- bash
source: |
aws ec2 describe-security-groups --region $region --group-ids $groupid --query 'SecurityGroups[*].IpPermissionsEgress[]'
matchers:
- type: word
words:
- "0.0.0.0/0"
- "::/0"
extractors:
- type: dsl
dsl:
- '"Amazon EC2 security group(s) " + groupid + " allows unrestricted outbound traffic"'
# digest: 4a0a00473045022020d4b03ec7e884a6a9516b16ab27112d3d1e307bdd145875d8a47c5f85e8c5dd022100c3bcec6be21508dcf10fe542df392d777029d8f8658479f1690c7d38f234f7fc:922c64590222798bb761d5b6d8e72950

View File

@ -0,0 +1,58 @@
id: ec2-sg-ingress
info:
name: Unrestricted Access on Uncommon EC2 Ports
author: princechaddha
severity: high
description: |
Ensure Amazon EC2 security groups do not allow unrestricted access (0.0.0.0/0, ::/0) on uncommon ports, protecting against brute force attacks on EC2 instances.
impact: |
Unrestricted ingress on uncommon ports increases the risk of unauthorized access and potential brute force attacks on EC2 instances.
remediation: |
Restrict access to uncommon ports in EC2 security groups, permitting only necessary traffic and implementing stringent access controls.
reference:
- https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html
metadata:
max-request: 2
tags: cloud,devops,aws,amazon,ec2,aws-cloud-config
variables:
region: "us-east-1"
flow: |
code(1)
for(let SecurityGroup of iterate(template.securitygroups)){
set("groupid", SecurityGroup)
code(2)
}
self-contained: true
code:
- engine:
- sh
- bash
source: |
aws ec2 describe-security-groups --region $region --filters Name=ip-permission.cidr,Values='0.0.0.0/0' --output json --query 'SecurityGroups[*].GroupId'
extractors:
- type: json
name: securitygroups
internal: true
json:
- '.[]'
- engine:
- sh
- bash
source: |
aws ec2 describe-security-groups --region $region --group-ids $groupid --query 'SecurityGroups[*].IpPermissions[]'
matchers:
- type: word
words:
- "0.0.0.0/0"
- "::/0"
extractors:
- type: dsl
dsl:
- '"Amazon EC2 security group(s) " + groupid + " allows unrestricted inbound traffic"'
# digest: 4b0a00483046022100881b4639e87b866a26e2397b65cebda755a3e870faa83f93122314e58a111837022100bf8b00a4e7ac9fc0f71faf6314470a221c9a95af8b3590c7076267d4badd9592:922c64590222798bb761d5b6d8e72950

View File

@ -0,0 +1,37 @@
id: ec2-unrestricted-cifs
info:
name: EC2 Unrestricted CIFS Access
author: princechaddha
severity: critical
description: |
Checks for inbound rules in Amazon EC2 security groups allowing unrestricted access (0.0.0.0/0 or ::/0) on TCP port 445, used for CIFS/SMB file sharing, posing a high security risk.
impact: |
Unrestricted CIFS access can expose EC2 instances to unwanted external access, increasing the risk of data breaches and unauthorized control over resources.
remediation: |
Restrict inbound access on TCP port 445 to known IPs or ranges. Regularly review security group configurations to ensure compliance with security policies.
reference:
- https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-network-security.html
tags: cloud,devops,aws,amazon,ec2,aws-cloud-config
variables:
region: "us-east-1"
self-contained: true
code:
- engine:
- sh
- bash
source: |
aws ec2 describe-security-groups --region $region --filters Name=ip-permission.from-port,Values=445 Name=ip-permission.to-port,Values=445 Name=ip-permission.cidr,Values='0.0.0.0/0' Name=ip-permission.ipv6-cidr,Values='::/0' --output json --query 'SecurityGroups[*].GroupId'
extractors:
- type: json
name: securitygroup
internal: true
json:
- '.[]'
- type: dsl
dsl:
- 'securitygroup + " security group(s) alows unrestricted access (0.0.0.0/0 or ::/0) on TCP port 445"'
# digest: 4a0a00473045022100d07b38ee532d1cb1f6cca8d1384049e416bf72bae10727fe3f0fdd70bddf65730220384a7997d216466edabd10fe2f011460f0ade329929e41bf322977aac2d21a43:922c64590222798bb761d5b6d8e72950

View File

@ -0,0 +1,37 @@
id: ec2-unrestricted-dns
info:
name: Unrestricted DNS Access in EC2
author: princechaddha
severity: critical
description: |
Checks for inbound rules in Amazon EC2 security groups that allow unrestricted access (0.0.0.0/0 or ::/0) on TCP/UDP port 53, which can expose DNS servers to potential attacks.
impact: |
Allowing unrestricted access to DNS services can lead to DNS spoofing, DDoS attacks, and unauthorized access to internal networks.
remediation: |
Restrict the inbound rules for TCP/UDP port 53 in EC2 security groups to known, trusted IPs only. Ensure security group rules are tightly controlled and monitored.
reference:
- https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html#SecurityGroupRules
tags: cloud,devops,aws,amazon,ec2,aws-cloud-config
variables:
region: "us-east-1"
self-contained: true
code:
- engine:
- sh
- bash
source: |
aws ec2 describe-security-groups --region $region --filters Name=ip-permission.from-port,Values=53 Name=ip-permission.to-port,Values=53 Name=ip-permission.cidr,Values='0.0.0.0/0' --output json --query 'SecurityGroups[*].GroupId'
extractors:
- type: json
name: securitygroup
internal: true
json:
- '.[]'
- type: dsl
dsl:
- 'securitygroup + " security group(s) alows unrestricted access (0.0.0.0/0 or ::/0) on TCP port 53"'
# digest: 490a0046304402207a2ebb618db4c24fc0d9e868b09e8689a7ccee1c419c1e446d549e2231bf20d202202c9b7cdcef58014affe10a86649a319995447be182a50a5910e13f4911bb9676:922c64590222798bb761d5b6d8e72950

View File

@ -0,0 +1,37 @@
id: ec2-unrestricted-ftp
info:
name: Restrict EC2 FTP Access
author: princechaddha
severity: critical
description: |
Ensure Amazon EC2 security groups disallow unrestricted inbound FTP access on TCP ports 20 and 21 to prevent brute force attacks.
impact: |
Unrestricted FTP access can expose EC2 instances to unauthorized access and brute force attacks, compromising security.
remediation: |
Restrict inbound access on TCP ports 20 and 21 for EC2 security groups to known IPs or remove the rules if FTP is not required.
reference:
- https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-network-security.html#security-group-rules
tags: cloud,devops,aws,amazon,ec2,aws-cloud-config
variables:
region: "us-east-1"
self-contained: true
code:
- engine:
- sh
- bash
source: |
aws ec2 describe-security-groups --region $region --filters Name=ip-permission.from-port,Values=20,21 Name=ip-permission.to-port,Values=20,21 Name=ip-permission.cidr,Values='0.0.0.0/0' --output json --query 'SecurityGroups[*].GroupId'
extractors:
- type: json
name: securitygroup
internal: true
json:
- '.[]'
- type: dsl
dsl:
- 'securitygroup + " security group(s) alows unrestricted access (0.0.0.0/0 or ::/0) on TCP port 20 or 21"'
# digest: 4a0a0047304502205f388ef25cd4e10ea8b0ca947a8100c1b849e7503e01c6485d3d23c30e190d16022100a24ea5679098a9da74b661c8375a32c2e91cb9e9e82682ffdd981cc1b1c78e79:922c64590222798bb761d5b6d8e72950

View File

@ -0,0 +1,37 @@
id: ec2-unrestricted-http
info:
name: Unrestricted HTTP on EC2
author: princechaddha
severity: critical
description: |
Checks for inbound rules in EC2 security groups allowing unrestricted access (0.0.0.0/0) to TCP port 80, increasing exposure to potential breaches.
impact: |
Unrestricted access to TCP port 80 can lead to unauthorized data exposure and increases the risk of security breaches.
remediation: |
Restrict inbound traffic on TCP port 80 to only necessary IP addresses, adhering to the principle of least privilege.
reference:
- https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-security-groups.html
tags: cloud,devops,aws,amazon,ec2,aws-cloud-config
variables:
region: "us-east-1"
self-contained: true
code:
- engine:
- sh
- bash
source: |
aws ec2 describe-security-groups --region $region --filters Name=ip-permission.from-port,Values=80 Name=ip-permission.to-port,Values=80 Name=ip-permission.cidr,Values='0.0.0.0/0' --query 'SecurityGroups[*].GroupId' --output json
extractors:
- type: json
name: securitygroup
internal: true
json:
- '.[]'
- type: dsl
dsl:
- 'securitygroup + " security group(s) alows unrestricted access (0.0.0.0/0 or ::/0) on TCP port 80"'
# digest: 490a00463044022039ebe4ac309956dc8ff7776b17a3982b8cfeadd66b69889950778ef07fca54e3022046047a1017a92794e037d6ad1472d3365ca94835c8071764cad1e8996d99eae0:922c64590222798bb761d5b6d8e72950

View File

@ -0,0 +1,37 @@
id: ec2-unrestricted-https
info:
name: Unrestricted HTTPs on EC2
author: princechaddha
severity: critical
description: |
Checks for inbound rules in EC2 security groups allowing unrestricted access (0.0.0.0/0) to TCP port 443, increasing exposure to potential breaches.
impact: |
Unrestricted access to TCP port 443 can lead to unauthorized data exposure and increases the risk of security breaches.
remediation: |
Restrict inbound traffic on TCP port 443 to only necessary IP addresses, adhering to the principle of least privilege.
reference:
- https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-security-groups.html
tags: cloud,devops,aws,amazon,ec2,aws-cloud-config
variables:
region: "us-east-1"
self-contained: true
code:
- engine:
- sh
- bash
source: |
aws ec2 describe-security-groups --region $region --filters Name=ip-permission.from-port,Values=443 Name=ip-permission.to-port,Values=443 Name=ip-permission.cidr,Values='0.0.0.0/0' --query 'SecurityGroups[*].GroupId' --output json
extractors:
- type: json
name: securitygroup
internal: true
json:
- '.[]'
- type: dsl
dsl:
- 'securitygroup + " security group(s) alows unrestricted access (0.0.0.0/0 or ::/0) on TCP port 443"'
# digest: 4a0a00473045022011c3ec5cdc908912df52c3e254be0010bede95ce080cf0083b2080a5b08b3779022100d719db5872cfb0485e6384332bf6b256c00ce754226c59fd1f4a9ce5d7956750:922c64590222798bb761d5b6d8e72950

View File

@ -0,0 +1,38 @@
id: ec2-unrestricted-icmp
info:
name: Restrict EC2 ICMP Access
author: princechaddha
severity: critical
description: |
Checks for Amazon EC2 security groups with inbound rules allowing unrestricted ICMP access. Advises restricting ICMP to trusted IPs to uphold the Principle of Least Privilege and minimize the attack surface.
impact: |
Unrestricted ICMP can be used for network reconnaissance and Distributed Denial of Service (DDoS) attacks, posing a significant security risk.
remediation: |
Modify EC2 security group rules to limit ICMP access to necessary, trusted IP addresses/ranges only.
reference:
- https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html
tags: cloud,devops,aws,amazon,ec2,aws-cloud-config
variables:
region: "us-east-1"
self-contained: true
code:
- engine:
- sh
- bash
source: |
aws ec2 describe-security-groups --region $region --filters Name=ip-permission.protocol,Values=icmp Name=ip-permission.cidr,Values='0.0.0.0/0' --query 'SecurityGroups[*].GroupId' --output json
extractors:
- type: json
name: securitygroup
internal: true
json:
- '.[]'
- type: dsl
dsl:
- 'securitygroup + " security group(s) alows unrestricted ICMP access (0.0.0.0/0 or ::/0)"'
# digest: 4a0a0047304502201c1e1628656627c21447c7abc8072f76f2a62c9d1e6cadb470ecb80db95258ce022100b4302e8fb947bc6c9bdcd1344ce69898da49781c66a9574bba9bd2eb7920ed35:922c64590222798bb761d5b6d8e72950

View File

@ -0,0 +1,37 @@
id: ec2-unrestricted-memcached
info:
name: Unrestricted Access to Memcached
author: princechaddha
severity: critical
description: |
Detects unrestricted inbound access to Memcached on Amazon EC2 instances, which can lead to cache poisoning, unauthorized access, and DDoS attacks.
impact: |
Unrestricted access increases the risk of cache poisoning, unauthorized data access, and potential DDoS attacks on the Memcached server.
remediation: |
Restrict inbound access to Memcached by updating EC2 security group rules to allow only trusted IPs to connect on TCP/UDP port 11211.
reference:
- https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-security-groups.html
tags: cloud,devops,aws,amazon,ec2,aws-cloud-config
variables:
region: "us-east-1"
self-contained: true
code:
- engine:
- sh
- bash
source: |
aws ec2 describe-security-groups --region $region --filters Name=ip-permission.from-port,Values=11211 Name=ip-permission.to-port,Values=11211 Name=ip-permission.cidr,Values='0.0.0.0/0' --output json --query 'SecurityGroups[*].GroupId'
extractors:
- type: json
name: securitygroup
internal: true
json:
- '.[]'
- type: dsl
dsl:
- 'securitygroup + " security group(s) alows unrestricted access (0.0.0.0/0 or ::/0) on TCP port 11211"'
# digest: 490a0046304402202b6556d6f2df24efabf60ee89f51b5d4d241a0017dfc7b025c95824cdcc26e290220204a2254be4259786fc50401c47fbb35ad21e621c90cf829f74c56d8297ef644:922c64590222798bb761d5b6d8e72950

View File

@ -0,0 +1,39 @@
id: ec2-unrestricted-mongodb
info:
name: Unrestricted MongoDB Access in EC2
author: princechaddha
severity: critical
description: |
Identifies open access to MongoDB in AWS EC2 security groups, where inbound rules allow unrestricted access (0.0.0.0/0 or ::/0) to TCP port 27017. This poses a significant risk as it can lead to unauthorized access and potential data breaches.
impact: |
Allowing unrestricted access to MongoDB in EC2 can lead to unauthorized data access, data manipulation, or denial of service attacks, potentially resulting in critical data breaches and compliance violations.
remediation: |
Restrict MongoDB's TCP port 27017 access in EC2 security groups to only those IP addresses that require it, adhering to the principle of least privilege.
reference:
- https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-security-groups.html
- https://www.mongodb.com/docs/manual/security/
tags: cloud,devops,aws,amazon,ec2,mongodb,aws-cloud-config
variables:
region: "us-east-1"
self-contained: true
code:
- engine:
- sh
- bash
source: |
aws ec2 describe-security-groups --region $region --filters Name=ip-permission.from-port,Values=27017 Name=ip-permission.to-port,Values=27017 Name=ip-permission.cidr,Values='0.0.0.0/0' --query 'SecurityGroups[*].GroupId' --output json
aws ec2 describe-security-groups --region $region --filters Name=ip-permission.from-port,Values=27017 Name=ip-permission.to-port,Values=27017 Name=ip-permission.ipv6-cidr,Values='::/0' --query 'SecurityGroups[*].GroupId' --output json
extractors:
- type: json
name: securitygroup
internal: true
json:
- '.[]'
- type: dsl
dsl:
- 'securitygroup + " security group(s) alows unrestricted mongodb access (0.0.0.0/0 or ::/0) on port 27017"'
# digest: 4b0a0048304602210083e0104b459e8885610b9980b58d725caea579be4660fb40a27750097b47336d022100bc5f067c97ab723d4b4282cfabbf3795e702259686d1d368963d120707913ee5:922c64590222798bb761d5b6d8e72950

View File

@ -0,0 +1,37 @@
id: ec2-unrestricted-mssql
info:
name: Unrestricted Access to SQL on EC2
author: princechaddha
severity: high
description: |
Identifies open inbound access to Microsoft SQL Server on Amazon EC2 instances. Checks for security groups allowing unrestricted access (0.0.0.0/0 or ::/0) on TCP port 1433, increasing risks to SQL databases.
impact: |
Unrestricted access on port 1433 exposes Microsoft SQL Server instances to potential unauthorized access, data breaches, and other security vulnerabilities.
remediation: |
Restrict inbound traffic on TCP port 1433 to known, secure IP addresses. Regularly review and update security group rules to maintain minimal access requirements.
reference:
- https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-security-groups.html
tags: cloud,devops,aws,amazon,ec2,aws-cloud-config
variables:
region: "us-east-1"
self-contained: true
code:
- engine:
- sh
- bash
source: |
aws ec2 describe-security-groups --region $region --filters Name=ip-permission.from-port,Values=1433 Name=ip-permission.to-port,Values=1433 Name=ip-permission.cidr,Values='0.0.0.0/0' --output json --query 'SecurityGroups[*].GroupId'
extractors:
- type: json
name: securitygroup
internal: true
json:
- '.[]'
- type: dsl
dsl:
- 'securitygroup + " security group(s) alows unrestricted access (0.0.0.0/0 or ::/0) on TCP port 1433"'
# digest: 4a0a0047304502207fea1bdfd1275fd4132e71cafa55258390fdaaa1ed649df3bbac41baa9abf1b2022100965299640f42e2ce5f12a3f624939a120518421a38e91ecbcdcdbae3066a6843:922c64590222798bb761d5b6d8e72950

View File

@ -0,0 +1,37 @@
id: ec2-unrestricted-mysql
info:
name: Unrestricted MySQL Access on EC2
author: princechaddha
severity: critical
description: |
Identifies unrestricted inbound access to MySQL database servers on Amazon EC2 instances, specifically targeting TCP port 3306.
impact: |
Unrestricted access to MySQL can lead to unauthorized data access, data manipulation, or exploitation of the database server.
remediation: |
Restrict inbound access on TCP port 3306 to known, necessary IP addresses or ranges, and avoid using 0.0.0.0/0 or ::/0.
reference:
- https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-security-groups.html
tags: cloud,devops,aws,amazon,ec2,aws-cloud-config
variables:
region: "us-east-1"
self-contained: true
code:
- engine:
- sh
- bash
source: |
aws ec2 describe-security-groups --region $region --filters Name=ip-permission.from-port,Values=3306 Name=ip-permission.to-port,Values=3306 Name=ip-permission.cidr,Values='0.0.0.0/0' --output json --query 'SecurityGroups[*].GroupId'
extractors:
- type: json
name: securitygroup
internal: true
json:
- '.[]'
- type: dsl
dsl:
- 'securitygroup + " security group(s) alows unrestricted mongodb access (0.0.0.0/0 or ::/0) on port 3306"'
# digest: 4a0a00473045022100ff19bb5e8c3dfe1f8e153bd309d866713f3e33c0b54882652f6489cc4bac292c02200d43740086e393886f7dbaca0a05947741687ed853c8e128a7b53bc2d926d995:922c64590222798bb761d5b6d8e72950

View File

@ -0,0 +1,37 @@
id: ec2-unrestricted-netbios
info:
name: Unrestricted NetBIOS Access in EC2
author: princechaddha
severity: critical
description: |
Checks for inbound rules in Amazon EC2 security groups that allow unrestricted access on TCP port 139 and UDP ports 137 and 138, increasing the risk of unauthorized access and potential security breaches.
impact: |
Unrestricted NetBIOS access can expose EC2 instances to network-based attacks, compromising data integrity and system availability.
remediation: |
Restrict access to TCP port 139 and UDP ports 137 and 138 in EC2 security groups. Implement strict access control based on the principle of least privilege.
reference:
- https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-security-groups.html
tags: cloud,devops,aws,amazon,ec2,aws-cloud-config
variables:
region: "us-east-1"
self-contained: true
code:
- engine:
- sh
- bash
source: |
aws ec2 describe-security-groups --region $region --filters Name=ip-permission.from-port,Values=137,138,139 Name=ip-permission.to-port,Values=137,138,139 Name=ip-permission.cidr,Values='0.0.0.0/0' --output json --query 'SecurityGroups[*].GroupId'
extractors:
- type: json
name: securitygroup
internal: true
json:
- '.[]'
- type: dsl
dsl:
- 'securitygroup + " security group(s) alows unrestricted access (0.0.0.0/0 or ::/0) on ports 137, 138 or 139"'
# digest: 4b0a00483046022100b04e63ff33e72a571e6fd0e696ab8a39a420f24de0a1d398686da93124a96e50022100bc0a89161a20972f692bba232833227053093823f47628cbb97ca0564c8d6c54:922c64590222798bb761d5b6d8e72950

View File

@ -0,0 +1,37 @@
id: ec2-unrestricted-opensearch
info:
name: Unrestricted OpenSearch Access
author: princechaddha
severity: critical
description: |
Checks EC2 security groups for inbound rules allowing unrestricted access to OpenSearch on TCP port 9200. Restricts access to essential IP addresses only.
impact: |
Unrestricted access to OpenSearch can lead to unauthorized data access, modification, or denial of service attacks.
remediation: |
Modify EC2 security group rules to limit access to TCP port 9200 for OpenSearch, allowing only necessary IPs, implementing the principle of least privilege.
reference:
- https://en.wikipedia.org/wiki/OpenSearch
tags: cloud,devops,aws,amazon,opensearch,aws-cloud-config
variables:
region: "us-east-1"
self-contained: true
code:
- engine:
- sh
- bash
source: |
aws ec2 describe-security-groups --region $region --filters Name=ip-permission.from-port,Values=9200 Name=ip-permission.to-port,Values=9200 Name=ip-permission.cidr,Values='0.0.0.0/0 or ::/0' --query 'SecurityGroups[*].GroupId' --output json
extractors:
- type: json
name: securitygroup
internal: true
json:
- '.[]'
- type: dsl
dsl:
- 'securitygroup + " security group(s) alows unrestricted access (0.0.0.0/0 or ::/0) on TCP port 9200"'
# digest: 490a004630440220592b35acadc3d541d7bab687bb36ff879999897d4c57bee946714c37eef4c37a0220303632eb1d63cfd0d31301ed29423993181942dae0da7a842b80921b989b6b4c:922c64590222798bb761d5b6d8e72950

View File

@ -0,0 +1,37 @@
id: ec2-unrestricted-oracle
info:
name: Unrestricted Oracle DB Access
author: princechaddha
severity: critical
description: |
Identifies unrestricted inbound access to Oracle databases in Amazon EC2 instances, which increases the risk of unauthorized access and attacks.
impact: |
Allows potential unauthorized access to the Oracle database, leading to data leakage, data manipulation, or further exploitation.
remediation: |
Restrict inbound traffic on TCP port 1521 to known IPs or ranges and employ strict access controls.
reference:
- https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html
tags: cloud,devops,aws,amazon,ec2,aws-cloud-config
variables:
region: "us-east-1"
self-contained: true
code:
- engine:
- sh
- bash
source: |
aws ec2 describe-security-groups --region $region --filters Name=ip-permission.from-port,Values=1521 Name=ip-permission.to-port,Values=1521 Name=ip-permission.cidr,Values='0.0.0.0/0' --query 'SecurityGroups[*].GroupId' --output json
extractors:
- type: json
name: securitygroup
internal: true
json:
- '.[]'
- type: dsl
dsl:
- 'securitygroup + " security group(s) alows unrestricted access (0.0.0.0/0 or ::/0) on TCP port 1521"'
# digest: 490a00463044022016b07bbcc6591afe7642ce52428085c7c2e5f2d923acb812a880bc658d607d5a022073f1dc85bb8b3e17f760ded2efa94b2aea4c14a6eb0fa135a1adb12bf604084a:922c64590222798bb761d5b6d8e72950

View File

@ -0,0 +1,37 @@
id: ec2-unrestricted-pgsql
info:
name: Unrestricted PostgreSQL Access
author: princechaddha
severity: critical
description: |
Identifies unrestricted inbound access to PostgreSQL databases in Amazon EC2 security groups, which can expose databases to security risks.
impact: |
Unrestricted access on TCP port 5432 increases vulnerability to unauthorized access and potential data breaches.
remediation: |
Restrict inbound traffic to PostgreSQL servers by setting stringent rules in EC2 security groups, limiting access to specific IPs or ranges.
reference:
- https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-network-security.html#security-group-rules
tags: cloud,devops,aws,amazon,ec2,aws-cloud-config
variables:
region: "us-east-1"
self-contained: true
code:
- engine:
- sh
- bash
source: |
aws ec2 describe-security-groups --region $region --filters Name=ip-permission.from-port,Values=5432 Name=ip-permission.to-port,Values=5432 Name=ip-permission.cidr,Values='0.0.0.0/0' --query 'SecurityGroups[*].GroupId' --output json
extractors:
- type: json
name: securitygroup
internal: true
json:
- '.[]'
- type: dsl
dsl:
- 'securitygroup + " security group(s) alows unrestricted access (0.0.0.0/0 or ::/0) on TCP port 5432"'
# digest: 4a0a004730450221009dc490795c723cfe321511e129d2e6ff3de628de4b81979843eae48bb1b3ba7502200ffde00d7cb8957a0b72aa8bd39b4adde0bbc0236d7b671dd8eade57d62b69bc:922c64590222798bb761d5b6d8e72950

View File

@ -0,0 +1,37 @@
id: ec2-unrestricted-rdp
info:
name: Restrict EC2 RDP Access
author: princechaddha
severity: high
description: |
Check Amazon EC2 security groups for inbound rules that allow unrestricted RDP access and restrict access to trusted IPs.
impact: |
Unrestricted RDP access increases the risk of unauthorized access and potential breaches.
remediation: |
Modify the EC2 security group rules to limit RDP access (TCP 3389) to known, trusted IP addresses or ranges.
reference:
- https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/authorizing-access-to-an-instance.html
tags: cloud,devops,aws,amazon,ec2,aws-cloud-config
variables:
region: "us-east-1"
self-contained: true
code:
- engine:
- sh
- bash
source: |
aws ec2 describe-security-groups --region $region --filters Name=ip-permission.from-port,Values=3389 Name=ip-permission.to-port,Values=3389 Name=ip-permission.cidr,Values='0.0.0.0/0' --output json --query 'SecurityGroups[*].GroupId'
extractors:
- type: json
name: securitygroup
internal: true
json:
- '.[]'
- type: dsl
dsl:
- 'securitygroup + " security group(s) alows unrestricted access (0.0.0.0/0 or ::/0) on TCP port 3389"'
# digest: 4a0a00473045022002ecd5ab647c14882b81b474962bb00f2efc2099d867125b8deb662e1c7a8e70022100877b207077fd1c5a89c0529f98c757af212d85b0d086a8ef00052ebc9005f0a6:922c64590222798bb761d5b6d8e72950

View File

@ -0,0 +1,37 @@
id: ec2-unrestricted-redis
info:
name: Unrestricted Redis Access
author: princechaddha
severity: critical
description: |
Checks for inbound rules in Amazon EC2 security groups that allow unrestricted access to Redis cache server instances on TCP port 6379.
impact: |
Unrestricted access can expose Redis instances to unauthorized access and potential security breaches.
remediation: |
Restrict inbound access to Redis instances by updating EC2 security group rules to allow only specific, trusted IP addresses.
reference:
- https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-security-groups.html
tags: cloud,devops,aws,amazon,ec2,aws-cloud-config
variables:
region: "us-east-1"
self-contained: true
code:
- engine:
- sh
- bash
source: |
aws ec2 describe-security-groups --region $region --filters Name=ip-permission.from-port,Values=6379 Name=ip-permission.to-port,Values=6379 Name=ip-permission.cidr,Values='0.0.0.0/0' --output json --query 'SecurityGroups[*].GroupId'
extractors:
- type: json
name: securitygroup
internal: true
json:
- '.[]'
- type: dsl
dsl:
- 'securitygroup + " security group(s) alows unrestricted access (0.0.0.0/0 or ::/0) on TCP port 6379"'
# digest: 4b0a00483046022100a19a6281bbac4a97ec0b09a1eaa1f789d3eb364bb152c2110e8aacaba4da4895022100c385619aae77905775c394990ef99a35e78f11941d2cb7579db73b2f6a4ef013:922c64590222798bb761d5b6d8e72950

View File

@ -0,0 +1,37 @@
id: ec2-unrestricted-smtp
info:
name: Unrestricted SMTP Access in EC2
author: princechaddha
severity: critical
description: |
Identifies unrestricted inbound access on TCP port 25 for EC2 security groups, which increases the risk of SMTP-related attacks.
impact: |
Allowing unrestricted SMTP access can lead to spamming, mail relay abuse, and potentially compromise mail servers.
remediation: |
Restrict TCP port 25 access to known, necessary IP addresses only. Avoid using 0.0.0.0/0 or ::/0 in security group rules.
reference:
- https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/authorizing-access-to-an-instance.html
tags: cloud,devops,aws,amazon,ec2,aws-cloud-config
variables:
region: "us-east-1"
self-contained: true
code:
- engine:
- sh
- bash
source: |
aws ec2 describe-security-groups --region $region --filters Name=ip-permission.from-port,Values=25 Name=ip-permission.to-port,Values=25 Name=ip-permission.cidr,Values='0.0.0.0/0' --output json --query 'SecurityGroups[*].GroupId'
extractors:
- type: json
name: securitygroup
internal: true
json:
- '.[]'
- type: dsl
dsl:
- 'securitygroup + " security group(s) alows unrestricted access (0.0.0.0/0 or ::/0) on TCP port 25"'
# digest: 490a0046304402207f49f7b3e8b59a10d998936b7fa721458e3659599ca2f4f284aedc250af454e902206668d8d3207fa24654b24c96d1df3b590be443aa8f26d5ed0e2a6e7bef4919a2:922c64590222798bb761d5b6d8e72950

View File

@ -0,0 +1,37 @@
id: ec2-unrestricted-ssh
info:
name: Unrestricted SSH Access in EC2
author: princechaddha
severity: high
description: |
Checks for inbound rules in Amazon EC2 security groups that allow unrestricted SSH access (0.0.0.0/0 or ::/0) on TCP port 22, indicating a security risk by exposing the SSH server to the internet.
impact: |
Unrestricted SSH access increases the risk of unauthorized access and potential brute force attacks against the SSH server, compromising the security of the EC2 instances.
remediation: |
Restrict SSH access in EC2 security groups to trusted IP addresses or ranges, adhering to the Principle of Least Privilege (POLP) and mitigating the risk of unauthorized access.
reference:
- https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html
tags: cloud,devops,aws,amazon,ec2,aws-cloud-config
variables:
region: "us-east-1"
self-contained: true
code:
- engine:
- sh
- bash
source: |
aws ec2 describe-security-groups --region $region --filters Name=ip-permission.from-port,Values=22 Name=ip-permission.to-port,Values=22 Name=ip-permission.cidr,Values='0.0.0.0/0' --output json --query 'SecurityGroups[*].GroupId'
extractors:
- type: json
name: securitygroup
internal: true
json:
- '.[]'
- type: dsl
dsl:
- 'securitygroup + " security group(s) alows unrestricted access (0.0.0.0/0 or ::/0) on TCP port 22"'
# digest: 4a0a0047304502205ba8e3a283bd695b4f0267dab41892b97e7ea38371e15259616ac64c78fe117c0221008ab0347e4be89942208e1bf266891d41678a76a3ec0ce920f060d80429539688:922c64590222798bb761d5b6d8e72950

View File

@ -0,0 +1,37 @@
id: ec2-unrestricted-telnet
info:
name: Restrict EC2 Telnet Access
author: princechaddha
severity: critical
description: |
Checks for unrestricted inbound Telnet access (TCP port 23) in Amazon EC2 security groups, highlighting potential security risks.
impact: |
Unrestricted Telnet access can expose EC2 instances to unauthorized access and potential security breaches.
remediation: |
Restrict inbound Telnet access by updating EC2 security group rules to allow only trusted IP ranges or disabling Telnet if not required.
reference:
- https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/authorizing-access-to-an-instance.html
tags: cloud,devops,aws,amazon,ec2,aws-cloud-config
variables:
region: "us-east-1"
self-contained: true
code:
- engine:
- sh
- bash
source: |
aws ec2 describe-security-groups --region $region --filters Name=ip-permission.from-port,Values=23 Name=ip-permission.to-port,Values=23 Name=ip-permission.cidr,Values='0.0.0.0/0' --output json --query 'SecurityGroups[*].GroupId'
extractors:
- type: json
name: securitygroup
internal: true
json:
- '.[]'
- type: dsl
dsl:
- 'securitygroup + " security group(s) alows unrestricted access (0.0.0.0/0 or ::/0) on TCP port 23"'
# digest: 4a0a004730450221009249024faa045e4c4a777389a760b53b294ea9285a93048a108e694ffdb7401302201be48e1ed82fb8dc69023ae0a15c891a5592f4c00d1c979e07e084456aed7bc6:922c64590222798bb761d5b6d8e72950

View File

@ -0,0 +1,57 @@
id: publicly-shared-ami
info:
name: Publicly Shared AMI
author: princechaddha
severity: medium
description: |
Checks if Amazon Machine Images (AMIs) are publicly shared, potentially exposing sensitive data.
impact: |
Public sharing of AMIs can lead to unauthorized access and compromise of sensitive information contained within these images.
remediation: |
Restrict AMI sharing to specific, trusted AWS accounts and ensure they are not publicly accessible.
reference:
- https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/sharingamis-explicit.html
metadata:
max-request: 2
tags: cloud,devops,aws,amazon,ami,aws-cloud-config
variables:
region: "us-east-1"
flow: |
code(1)
for(let AmiName of iterate(template.amis)){
set("ami", AmiName)
code(2)
}
self-contained: true
code:
- engine:
- sh
- bash
source: |
aws ec2 describe-images --region $region --owners self --output json --query 'Images[*].ImageId' --output json
extractors:
- type: json
name: amis
internal: true
json:
- '.[]'
- engine:
- sh
- bash
source: |
aws ec2 describe-images --region $region --image-ids $ami --owners self --query 'Images[*].Public'
matchers:
- type: word
words:
- "true"
extractors:
- type: dsl
dsl:
- 'ami + " AMI is publically shared"'
# digest: 4a0a0047304502202170a728aa9a257c4f5c57f8cbe604df3b4288eb8d54deeaf7e1c8961e392c4d022100c0f6fffcdfbf887cdf6b0bf253f5d468b33670e054ff2669b3dc4c2245560595:922c64590222798bb761d5b6d8e72950

View File

@ -0,0 +1,57 @@
id: unencrypted-aws-ami
info:
name: Unencrypted AWS AMI
author: princechaddha
severity: high
description: |
Ensure Amazon Machine Images (AMIs) are encrypted to meet data-at-rest encryption compliance and protect sensitive data.
impact: |
Unencrypted AMIs can expose sensitive data to unauthorized access, risking data breaches and non-compliance with data protection regulations.
remediation: |
Encrypt your AMIs using AWS managed keys or customer-managed keys in the AWS Key Management Service (KMS) to ensure data security.
reference:
- https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AMIEncryption.html
metadata:
max-request: 2
tags: cloud,devops,aws,amazon,ec2,aws-cloud-config
variables:
region: "us-east-1"
flow: |
code(1)
for(let AmiName of iterate(template.amis)){
set("ami", AmiName)
code(2)
}
self-contained: true
code:
- engine:
- sh
- bash
source: |
aws ec2 describe-images --region $region --owners self --output json --query 'Images[*].ImageId'
extractors:
- type: json
name: amis
internal: true
json:
- '.[]'
- engine:
- sh
- bash
source: |
aws ec2 describe-images --region $region --image-ids $ami --query 'Images[*].BlockDeviceMappings[*].Ebs.Encrypted[]'
matchers:
- type: word
words:
- "false"
extractors:
- type: dsl
dsl:
- 'ami + " AMI is not encrypted"'
# digest: 4a0a00473045022006b2a8f1493aca05a5bbb6dd85e177cfacec3cf7e380e0bdd32179719555a881022100f893098f309383eacc3b8fff8a3394101a3bd39897babe77b4ac0911555498ba:922c64590222798bb761d5b6d8e72950

View File

@ -0,0 +1,29 @@
id: iam-access-analyzer
info:
name: IAM Access Analyzer is not Used
author: princechaddha
severity: medium
description: |
Checks if Amazon IAM Access Analyzer is active for identifying unsolicited access risks in AWS resources
reference:
- https://docs.aws.amazon.com/cli/latest/reference/accessanalyzer/list-analyzers.html
tags: cloud,devops,aws,amazon,iam,aws-cloud-config
self-contained: true
code:
- engine:
- sh
- bash
source: |
aws accessanalyzer list-analyzers --query 'analyzers[*].arn'
matchers:
- type: word
words:
- "[]"
extractors:
- type: dsl
dsl:
- '"IAM Access Analyzer is not Used in your AWS account"'
# digest: 4a0a00473045022030390836bad5e6468e11d2dbf56d7f809db536831d633867e2d605ec841e8b9d022100ea2e18d9be8f713b472d94507e0df31148e1a1403df2ba103fbf8dacee76173d:922c64590222798bb761d5b6d8e72950

View File

@ -0,0 +1,30 @@
id: iam-expired-ssl
info:
name: Remove Expired SSL/TLS Certificates in AWS IAM
author: princechaddha
severity: high
description: |
Checks for expired SSL/TLS certificates from AWS IAM
reference:
- https://docs.aws.amazon.com/cli/latest/reference/iam/list-server-certificates.html
tags: cloud,devops,aws,amazon,iam,ssl,aws-cloud-config
self-contained: true
code:
- engine:
- sh
- bash
source: |
aws iam list-server-certificates | jq -r '.ServerCertificateMetadataList[] | select(.Expiration | fromdateiso8601 < now) | .ServerCertificateName'
extractors:
- type: regex
name: certificate
internal: true
regex:
- '\b[a-zA-Z0-9]+\b'
- type: dsl
dsl:
- 'certificate + " Certificate is expired in your AWS account"'
# digest: 490a0046304402203c1c60995a3652d60b90c6b18c6aa5e9239fa9cc964b9ccd50e5e1660af1ab29022055d501dd4c86142b75633db268ceb4a226c09b9e1e69b04c8cc7278b5f4fdf48:922c64590222798bb761d5b6d8e72950

View File

@ -0,0 +1,54 @@
id: iam-full-admin-privileges
info:
name: Overly Permissive IAM Policies
author: princechaddha
severity: high
description: |
Verifies that no Amazon IAM policies grant full administrative privileges, ensuring adherence to the Principle of Least Privilege
reference:
- https://docs.aws.amazon.com/cli/latest/reference/iam/get-policy-version.html
metadata:
max-request: 2
tags: cloud,devops,aws,amazon,iam,aws-cloud-config
flow: |
code(1)
for(let PolicyName of iterate(template.policies)){
set("policy", PolicyName)
code(2)
}
self-contained: true
code:
- engine:
- sh
- bash
source: |
aws iam list-policies --scope Local --query 'Policies[*].Arn'
extractors:
- type: json # type of the extractor
internal: true
name: policies
json:
- '.[]'
- engine:
- sh
- bash
source: |
aws iam get-policy-version --policy-arn $policy --version-id v1 --query 'PolicyVersion.Document'
matchers:
- type: word
words:
- '"Effect": "Allow"'
- '"Action": "*"'
- '"Resource": "*"'
condition: and
extractors:
- type: dsl
dsl:
- '"The IAM policy " + policy +" is Overly Permissive"'
# digest: 4a0a004730450221008bc9f722616e4216ee5bccead511cb6086d4f998014314d8a8478ec44f424f40022029c5288eda6b59b7217a8836cb5d506e7b7ad234f6272fe94570815dc7b0d0a6:922c64590222798bb761d5b6d8e72950

View File

@ -0,0 +1,52 @@
id: iam-key-rotation-90days
info:
name: IAM Access Key Rotation - 90-Day Policy
author: princechaddha
severity: high
description: |
Checks if IAM user access keys are rotated every 90 days to minimize accidental exposures and unauthorized access risks
reference:
- https://docs.aws.amazon.com/cli/latest/reference/iam/list-access-keys.html
metadata:
max-request: 2
tags: cloud,devops,aws,amazon,iam,aws-cloud-config
flow: |
code(1)
for(let UserName of iterate(template.users)){
set("user", UserName)
code(2)
}
self-contained: true
code:
- engine:
- sh
- bash
source: |
aws iam list-users --query 'Users[*].UserName'
extractors:
- type: json # type of the extractor
internal: true
name: users
json:
- '.[]'
- engine:
- sh
- bash
source: |
aws iam list-access-keys --user-name $user | jq -r '.AccessKeyMetadata[] | select((.CreateDate[:-6] | strptime("%Y-%m-%dT%H:%M:%S") | mktime) < (now - (90 * 86400))) | .AccessKeyId'
extractors:
- type: regex # type of the extractor
name: accesskey
internal: true
regex:
- '^AK.*'
- type: dsl
dsl:
- '"The IAM Key " + accesskey +" is older than 90 days"'
# digest: 4a0a0047304502202a9b12e596c433a426976cc985f93e87eb624f05932b7e78a72dd633496726fa022100db223fbc664946a1d52e6916fa64fb18bb07efcb40ddba5110bb24c8a29d932b:922c64590222798bb761d5b6d8e72950

View File

@ -0,0 +1,51 @@
id: iam-mfa-enable
info:
name: MFA not enabled for AWS IAM Console User
author: princechaddha
severity: high
description: |
Verifies that Multi-Factor Authentication (MFA) is enabled for all IAM users with console access in AWS
reference:
- https://docs.aws.amazon.com/cli/latest/reference/iam/list-mfa-devices.html
metadata:
max-request: 2
tags: cloud,devops,aws,amazon,iam,aws-cloud-config
flow: |
code(1)
for(let UserName of iterate(template.users)){
set("user", UserName)
code(2)
}
self-contained: true
code:
- engine:
- sh
- bash
source: |
aws iam list-users --query 'Users[*].UserName'
extractors:
- type: json # type of the extractor
internal: true
name: users
json:
- '.[]'
- engine:
- sh
- bash
source: |
aws iam list-mfa-devices --user-name $user --query 'MFADevices'
matchers:
- type: word
words:
- "[]"
extractors:
- type: dsl
dsl:
- '"MFA is no enabled for IAM User " + user'
# digest: 4a0a004730450221008072a04e0f68ee2345d1bfeee304675bc22468a061fd9fa3fbed31279e399640022057efc7bfe58fc41c86be4cfdc0870e4d998282ff71b6d70a3da557cb67cd2d09:922c64590222798bb761d5b6d8e72950

View File

@ -0,0 +1,29 @@
id: iam-password-policy
info:
name: IAM Password Policy Not Configured
author: princechaddha
severity: medium
description: |
Verifies that Amazon IAM users adhere to a strong password policy, including requirements for minimum length, expiration, and pattern
reference:
- https://docs.aws.amazon.com/cli/latest/reference/iam/get-account-password-policy.html
tags: cloud,devops,aws,amazon,iam,aws-cloud-config
self-contained: true
code:
- engine:
- sh
- bash
source: |
aws iam get-account-password-policy
matchers:
- type: word
words:
- "NoSuchEntity"
extractors:
- type: dsl
dsl:
- '"AWS cloud account is not configured with a custom IAM password policy"'
# digest: 490a00463044022055c5e7c44c862bac281cda22b1f74de43c5c590680abbfdef4c7814f844af67702205eb87929fe29247fa90db958e8c56b23e62472b680ae98f265da4a2e57d53f95:922c64590222798bb761d5b6d8e72950

View File

@ -0,0 +1,29 @@
id: iam-root-mfa
info:
name: MFA not enabled on AWS Root Account
author: princechaddha
severity: high
description: |
Checks if Multi-Factor Authentication (MFA) is enabled for the AWS root account
reference:
- https://docs.aws.amazon.com/cli/latest/reference/iam/get-account-summary.html
tags: cloud,devops,aws,amazon,iam,aws-cloud-config
self-contained: true
code:
- engine:
- sh
- bash
source: |
aws iam get-account-summary | jq -r '.SummaryMap.AccountMFAEnabled'
matchers:
- type: word
words:
- "0"
extractors:
- type: dsl
dsl:
- '"MFA is not enabled on your AWS Root account"'
# digest: 4b0a00483046022100add350e50addd6d7c475c7ab805a9869384178065cc1aef7e96777448765fa2e022100cd5ae007e6406f2f721bc5d308de70f92456f2d0280b778690b85a80cd2fdb23:922c64590222798bb761d5b6d8e72950

View File

@ -0,0 +1,52 @@
id: iam-ssh-keys-rotation
info:
name: SSH Key Rotation - 90-Day Policy
author: princechaddha
severity: high
description: |
Verifies that IAM SSH public keys are rotated every 90 days, enhancing security and preventing unauthorized access to AWS CodeCommit repositories
reference:
- https://docs.aws.amazon.com/cli/latest/reference/iam/list-ssh-public-keys.html
metadata:
max-request: 2
tags: cloud,devops,aws,amazon,iam,ssh,aws-cloud-config
flow: |
code(1)
for(let UserName of iterate(template.users)){
set("user", UserName)
code(2)
}
self-contained: true
code:
- engine:
- sh
- bash
source: |
aws iam list-users --query 'Users[*].UserName'
extractors:
- type: json # type of the extractor
internal: true
name: users
json:
- '.[]'
- engine:
- sh
- bash
source: |
aws iam list-ssh-public-keys --user-name $user | jq -r '.SSHPublicKeys[] | select(.UploadDate | fromdateiso8601 < (now - (90 * 86400))) | .SSHPublicKeyId'
extractors:
- type: regex # type of the extractor
name: accesskey
internal: true
regex:
- '^AP.*'
- type: dsl
dsl:
- '"The SSH Public Key " + accesskey +" is older than 90 days"'
# digest: 4a0a0047304502200df47806e0ebcba6e0cbd3e933b7db44c7e85cb3e43bbb634ee48521d2c441e7022100b0694e5404356f0219d841a6ec17f3d756542a0c4137973b21d45dec07f12e47:922c64590222798bb761d5b6d8e72950

View File

@ -0,0 +1,38 @@
id: iam-unapproved-policy
info:
name: Unapproved IAM Policy Attachments
author: princechaddha
severity: high
description: |
Checks for the attachment of unapproved Amazon IAM managed policies to IAM roles, users, or groups, ensuring compliance with organizational access policies
reference:
- https://docs.aws.amazon.com/cli/latest/reference/iam/get-policy.html
tags: cloud,devops,aws,amazon,iam,ssl,tls,aws-cloud-config
self-contained: true
code:
- engine:
- sh
- bash
source: |
aws iam get-policy --policy-arn arn:aws:iam::aws:policy/AmazonRDSFullAccess --query 'Policy.{"AttachmentCount": AttachmentCount}'
matchers-condition: and
matchers:
- type: word
part: body
words:
- "AttachmentCount"
- type: word
part: body
words:
- '"AttachmentCount": 0'
negative: true
extractors:
- type: dsl
dsl:
- '"Unapproved IAM policy is used within your AWS cloud account"'
# digest: 4a0a00473045022100cf22f4542262ded32bcf64050e268d3b514e907385f8c67a8a4f888302bb48b202206b2ee99707ba578560bc83ad3ceeae5e3981288199d898d27d0090f34f6af408:922c64590222798bb761d5b6d8e72950

View File

@ -0,0 +1,29 @@
id: iam-user-password-change
info:
name: Enable Self-Service Password Change for IAM Users
author: princechaddha
severity: high
description: |
Verifies that all Amazon IAM users have permissions to change their own console passwords, allowing access to 'iam:ChangePassword' for their accounts and 'iam:GetAccountPasswordPolicy' action.
reference:
- https://docs.aws.amazon.com/cli/latest/reference/iam/get-account-password-policy.html
tags: cloud,devops,aws,amazon,iam,aws-cloud-config
self-contained: true
code:
- engine:
- sh
- bash
source: |
aws iam get-account-password-policy --query 'PasswordPolicy.AllowUsersToChangePassword'
matchers:
- type: word
words:
- "true"
extractors:
- type: dsl
dsl:
- '"AllowUsersToChangePassword Policy is not enabled in your AWS account"'
# digest: 4b0a00483046022100b046545d3c72c54dee9c4051661d61c8241cbce1fb0f655fa4bb1e8461b3f295022100a7bb33ba3ddff07e68db9bd748802715215b8d62be69ab27fab22c5e539cbb28:922c64590222798bb761d5b6d8e72950

View File

@ -0,0 +1,30 @@
id: ssl-cert-renewal
info:
name: SSL/TLS Certificates in AWS IAM about to expire in 30 days
author: princechaddha
severity: medium
description: |
Checks if SSL/TLS certificates in AWS IAM are set for renewal 30 days before expiration.
reference:
- https://docs.aws.amazon.com/cli/latest/reference/iam/get-account-password-policy.html
tags: cloud,devops,aws,amazon,iam,ssl,tls,aws-cloud-config
self-contained: true
code:
- engine:
- sh
- bash
source: |
aws iam list-server-certificates | jq -r '.ServerCertificateMetadataList[] | select(.Expiration | fromdateiso8601 - now < (30 * 86400)) | .ServerCertificateName'
extractors:
- type: regex
name: certificate
internal: true
regex:
- '\b[a-zA-Z0-9]+\b'
- type: dsl
dsl:
- 'certificate + " Certificate is about to expire in 30 days"'
# digest: 4a0a00473045022100a517288f527ffb0f08d1f6803d7d738d8c9ed2a34f35e32b824cabbe7f3fa41b022028ebdfe7453cc66f3f511e46c5ffbda6db8dc43551271a101edb11021fad7fd3:922c64590222798bb761d5b6d8e72950

View File

@ -0,0 +1,57 @@
id: aurora-copy-tags-snap
info:
name: Aurora Snapshot Tag Copy
author: princechaddha
severity: high
description: |
Ensures Amazon Aurora clusters have Copy Tags to Snapshots feature enabled to automatically copy tags from clusters to snapshots.
impact: |
Without this, tags identifying ownership, purpose, or other critical information aren't propagated to snapshots, complicating management and compliance.
remediation: |
Enable Copy Tags to Snapshots for Aurora clusters via the AWS Management Console or modify the DB cluster to include this feature using AWS CLI.
reference:
- https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_Tagging.html
metadata:
max-request: 2
tags: cloud,devops,aws,amazon,aurora,rds,aws-cloud-config
variables:
region: "ap-northeast-1"
flow: |
code(1)
for(let clustername of iterate(template.clusters)){
set("cluster", clustername)
code(2)
}
self-contained: true
code:
- engine:
- sh
- bash
source: |
aws rds describe-db-clusters --region $region --output json --query 'DBClusters[?Engine==`aurora-mysql` || Engine==`aurora-postgresql`].DBClusterIdentifier | []'
extractors:
- type: json
name: clusters
internal: true
json:
- '.[]'
- engine:
- sh
- bash
source: |
aws rds describe-db-clusters --region $region --db-cluster-identifier $cluster --query 'DBClusters[*].CopyTagsToSnapshot'
matchers:
- type: word
words:
- 'false'
extractors:
- type: dsl
dsl:
- '"Copy Tags To Snapshot is not enable for cluster " + cluster'
# digest: 4b0a00483046022100bc4ba9d64dbc0cb8bfebf677ff5b05c1eae8736bf8e64544dd8d0fc9b6daa762022100fd690deaf7ba10c756be945828cd76f7a03eb4442aeadf3c2cadf5bdb6f995c9:922c64590222798bb761d5b6d8e72950

View File

@ -0,0 +1,57 @@
id: aurora-delete-protect
info:
name: Aurora Cluster Deletion Protection
author: princechaddha
severity: medium
description: |
Ensure Amazon Aurora clusters have Deletion Protection enabled to prevent accidental data loss.
impact: |
Without Deletion Protection, Aurora clusters can be accidentally deleted, leading to irreversible data loss.
remediation: |
Enable Deletion Protection by modifying the Aurora cluster settings in the AWS Management Console or via the AWS CLI.
reference:
- https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/DBInstanceDeletionProtection.html
metadata:
max-request: 2
tags: cloud,devops,aws,amazon,aurora,rds,aws-cloud-config
variables:
region: "ap-northeast-1"
flow: |
code(1)
for(let clustername of iterate(template.clusters)){
set("cluster", clustername)
code(2)
}
self-contained: true
code:
- engine:
- sh
- bash
source: |
aws rds describe-db-clusters --region $region --output json --query 'DBClusters[?Engine==`aurora-mysql` || Engine==`aurora-postgresql`].DBClusterIdentifier | []'
extractors:
- type: json
name: clusters
internal: true
json:
- '.[]'
- engine:
- sh
- bash
source: |
aws rds describe-db-clusters --region $region --db-cluster-identifier $cluster--query 'DBClusters[*].DeletionProtection'
matchers:
- type: word
words:
- 'false'
extractors:
- type: dsl
dsl:
- '"Deletion Protection safety feature is not enabled for " + cluster'
# digest: 490a0046304402203957dae25c011794e69952e0a2122ce835294c72217b3dab63dfb30cec9fb36a02200bcd6f0ed9487a240393aebd0937196c729d98ecf8a3c86cb65a854534da925c:922c64590222798bb761d5b6d8e72950

View File

@ -0,0 +1,57 @@
id: iam-db-auth
info:
name: IAM Database Authentication
author: princechaddha
severity: medium
description: |
Ensure IAM Database Authentication is enabled for RDS instances, allowing IAM service to manage database access, thereby removing the need to store user credentials within database configurations.
impact: |
Without IAM Database Authentication, database credentials need to be managed internally, increasing the risk of credential leakage and unauthorized access.
remediation: |
Enable IAM Database Authentication for MySQL and PostgreSQL RDS database instances to leverage IAM for secure, token-based access control.
reference:
- https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.IAMDBAuth.html
metadata:
max-request: 2
tags: cloud,devops,aws,amazon,rds,aws-cloud-config
variables:
region: "ap-northeast-1"
flow: |
code(1)
for(let DBInstances of iterate(template.instances)){
set("db", DBInstances)
code(2)
}
self-contained: true
code:
- engine:
- sh
- bash
source: |
aws rds describe-db-instances --region $region --output json --query 'DBInstances[?Engine==`mysql` || Engine==`postgres`].DBInstanceIdentifier | []'
extractors:
- type: json
name: instances
internal: true
json:
- '.[]'
- engine:
- sh
- bash
source: |
aws rds describe-db-instances --region $region --db-instance-identifier $db --query 'DBInstances[*].IAMDatabaseAuthenticationEnabled'
matchers:
- type: word
words:
- 'false'
extractors:
- type: dsl
dsl:
- '"Database Authentication feature is not enabled for RDS database instance " + db'
# digest: 4a0a00473045022100c13b8d1e92988ff64fb71594f77d83105a2c8381fb5de3a284e41ee9b5c707940220585d60f323e31b9bc5ad2c72b045b1645c4a1546555f29c1ffb99936519dea83:922c64590222798bb761d5b6d8e72950

View File

@ -0,0 +1,57 @@
id: rds-backup-enable
info:
name: RDS Automated Backup Check
author: princechaddha
severity: high
description: |
Ensure that your Amazon RDS database instances have automated backups enabled for point-in-time recovery.
impact: |
Lack of automated backups can lead to data loss in case of accidental deletion or database corruption.
remediation: |
Enable automated backups for RDS instances by setting the backup retention period to a value other than 0.
reference:
- https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_WorkingWithAutomatedBackups.html
metadata:
max-request: 2
tags: cloud,devops,aws,amazon,rds,aws-cloud-config
variables:
region: "ap-northeast-1"
flow: |
code(1)
for(let DBInstances of iterate(template.instances)){
set("db", DBInstances)
code(2)
}
self-contained: true
code:
- engine:
- sh
- bash
source: |
aws rds describe-db-instances --region $region --output json --query 'DBInstances[*].DBInstanceIdentifier'
extractors:
- type: json
name: instances
internal: true
json:
- '.[]'
- engine:
- sh
- bash
source: |
aws rds describe-db-instances --region $region --db-instance-identifier $db --query 'DBInstances[*].BackupRetentionPeriod'
matchers:
- type: word
words:
- '0'
extractors:
- type: dsl
dsl:
- '"Automated backups are not enabled for " + db + " RDS database instance"'
# digest: 4a0a00473045022100886ff717bb53ef7b235b73d9d22a861dee9a08a2c196289d611085a7e0418faa02200ad55fc97ce71f4828dc428a743be339174c1fdd6b0e68b4501e0ef6acf6b9de:922c64590222798bb761d5b6d8e72950

View File

@ -0,0 +1,57 @@
id: rds-deletion-protection
info:
name: RDS Deletion Protection
author: princechaddha
severity: high
description: |
Ensure Amazon RDS instances have Deletion Protection enabled to prevent accidental deletions.
impact: |
Without Deletion Protection, RDS instances can be inadvertently deleted, leading to potential data loss and service disruption.
remediation: |
Enable Deletion Protection for all Amazon RDS instances via the AWS Management Console or using the AWS CLI.
reference:
- https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_DeleteInstance.html
metadata:
max-request: 2
tags: cloud,devops,aws,amazon,rds,aws-cloud-config
variables:
region: "ap-northeast-1"
flow: |
code(1)
for(let DBInstances of iterate(template.instances)){
set("db", DBInstances)
code(2)
}
self-contained: true
code:
- engine:
- sh
- bash
source: |
aws rds describe-db-instances --region $region --output json --query 'DBInstances[*].DBInstanceIdentifier'
extractors:
- type: json
name: instances
internal: true
json:
- '.[]'
- engine:
- sh
- bash
source: |
aws rds describe-db-instances --region $region --db-instance-identifier $db --query 'DBInstances[*].DeletionProtection' --output json
matchers:
- type: word
words:
- 'false'
extractors:
- type: dsl
dsl:
- '"RDS Deletion protection feature is not enabled for RDS database instance " + db'
# digest: 490a00463044022038daa8448190d837886c059bdc5c6ac4e48af03bf77572125c2465420d62224a02206ee2419a639762e33d52f890714e4e1dcb9aac3b10882d8accbdfc4e3324d67f:922c64590222798bb761d5b6d8e72950

View File

@ -0,0 +1,57 @@
id: rds-encryption-check
info:
name: RDS Instance Encryption
author: princechaddha
severity: high
description: |
Ensure that your Amazon RDS database instances are encrypted to fulfill compliance requirements for data-at-rest encryption.
impact: |
Non-encrypted RDS instances may lead to data breaches, failing to comply with data protection regulations, which could result in hefty fines and loss of reputation.
remediation: |
Enable encryption for your Amazon RDS instances by modifying the instance and setting the "Storage Encrypted" option to true. For new instances, enable encryption within the launch wizard.
reference:
- https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.Encryption.html
metadata:
max-request: 2
tags: cloud,devops,aws,amazon,rds,aws-cloud-config
variables:
region: "ap-northeast-1"
flow: |
code(1)
for(let DBInstances of iterate(template.instances)){
set("db", DBInstances)
code(2)
}
self-contained: true
code:
- engine:
- sh
- bash
source: |
aws rds describe-db-instances --region $region --output json --query 'DBInstances[*].DBInstanceIdentifier'
extractors:
- type: json
name: instances
internal: true
json:
- '.[]'
- engine:
- sh
- bash
source: |
aws rds describe-db-instances --region $region --db-instance-identifier $db --query 'DBInstances[*].{"StorageEncrypted":StorageEncrypted,"KmsKeyId":KmsKeyId}'
matchers:
- type: word
words:
- 'false'
extractors:
- type: dsl
dsl:
- '"The encryption of data at rest is not enabled for " + db + " RDS database instance"'
# digest: 4a0a00473045022013a493868c5989511d93d8702f49b30f995463ea94c0e0b9bfc859864b301cf3022100e40eecfced944d0776dcc8cc0f6b762902df7fcffc45e727b3a6a2b25630cf79:922c64590222798bb761d5b6d8e72950

View File

@ -0,0 +1,36 @@
id: rds-event-notify
info:
name: RDS Event Notification Absence
author: princechaddha
severity: medium
description: |
Checks for the activation of event notifications for Amazon RDS instances to monitor significant database events.
impact: |
Without event notifications, there's a risk of missing critical database events, impacting operational awareness and incident response.
remediation: |
Enable event notifications in Amazon RDS by creating an event subscription with Amazon SNS to receive notifications.
reference:
- https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Events.html
tags: cloud,devops,aws,amazon,rds,aws-cloud-config
variables:
region: "ap-northeast-1"
self-contained: true
code:
- engine:
- sh
- bash
source: |
aws rds describe-event-subscriptions --region $region --query 'EventSubscriptionsList'
matchers:
- type: word
words:
- '[]'
extractors:
- type: dsl
dsl:
- '"No event notifications for RDS resources in " + region + " AWS region"'
# digest: 4a0a0047304502203da20f61e273f1598025e8b5fc491882b2b9b93d743bf7be37209af3351653b0022100b109b8c9e591621fe1c087381073e5d49cad3d424fa9a3491609c28d4bb8cbdf:922c64590222798bb761d5b6d8e72950

View File

@ -0,0 +1,36 @@
id: rds-event-sub-enable
info:
name: RDS Event Subscription Not Enabled
author: princechaddha
severity: high
description: |
Ensures Amazon RDS event notifications are enabled for database instance level events, allowing for real-time alerts on operational changes.
impact: |
Lack of event notifications may delay the response to critical RDS operational events, affecting database availability and performance.
remediation: |
Enable RDS event notification subscriptions for relevant database instance level events through the AWS Management Console or AWS CLI.
reference:
- https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Events.html
tags: cloud,devops,aws,amazon,rds,aws-cloud-config
variables:
region: "ap-northeast-1"
self-contained: true
code:
- engine:
- sh
- bash
source: |
aws rds describe-event-subscriptions --region $region --query "EventSubscriptionsList[?SourceType == 'db-instance'].CustSubscriptionId"
matchers:
- type: word
words:
- '[]'
extractors:
- type: dsl
dsl:
- '"There are no Amazon RDS event subscriptions created for instance level events in " + region + " AWS region"'
# digest: 4a0a00473045022046dbc7d74b95e340ebc6d0bc27c308f378cea938470e758605822ac111ed6843022100ba1ee6fdbb6940216c57cbd8666cb56a4645ad5f8138bd63b649fb85abf80b5f:922c64590222798bb761d5b6d8e72950

Some files were not shown because too many files have changed in this diff Show More