Marcus Bakker 2020-06-22 19:36:31 +02:00
commit 08b30f0f35
26 changed files with 438 additions and 240 deletions

View File

@ -1,6 +1,6 @@
FROM python:3.8-slim-buster
LABEL version="1.3"
LABEL version="1.3.1"
# copy DeTT&CT and install the requirements
COPY . /opt/DeTTECT

View File

@ -1,7 +1,7 @@
<img src="https://github.com/rabobank-cdc/DeTTECT/wiki/images/logo.png" alt="DeTT&CT" width=30% height=30%>
#### Detect Tactics, Techniques & Combat Threats
Latest version: [1.3](https://github.com/rabobank-cdc/DeTTECT/wiki/Changelog#version-13)
Latest version: [1.3.1](https://github.com/rabobank-cdc/DeTTECT/wiki/Changelog#version-131)
To get started with DeTT&CT, check out this [page](https://github.com/rabobank-cdc/DeTTECT/wiki/Getting-started), our [talk](https://www.youtube.com/watch?v=_kWpekkhomU) at hack.lu 2019 and our blog on:
- [mbsecure.nl/blog/2019/5/dettact-mapping-your-blue-team-to-mitre-attack](https://www.mbsecure.nl/blog/2019/5/dettact-mapping-your-blue-team-to-mitre-attack) or

View File

@ -2,7 +2,7 @@ import re
APP_NAME = 'DeTT&CT'
APP_DESC = 'Detect Tactics, Techniques & Combat Threats'
VERSION = '1.3'
VERSION = '1.3.1'
EXPIRE_TIME = 60 * 60 * 24
@ -44,7 +44,7 @@ COLOR_DS_99p_HAPPY = '#689F38'
COLOR_DS_100p_HAPPY = '#33691E'
# Detection colors (green range)
COLOR_D_0 = '#64B5F6' # Blue: Forensics/Context
COLOR_D_0 = '#9C27B0' # Purple: Forensics/Context
COLOR_D_1 = '#DCEDC8'
COLOR_D_2 = '#AED581'
COLOR_D_3 = '#8BC34A'
@ -57,6 +57,14 @@ COLOR_V_2 = '#64B5F6'
COLOR_V_3 = '#1976D2'
COLOR_V_4 = '#0D47A1'
# Orange overlay colors
COLOR_O_0 = '#FFECB7'
COLOR_O_1 = '#FFE07A'
COLOR_O_2 = '#FFCA28'
COLOR_O_3 = '#FFAE00'
COLOR_O_4 = '#FF8F00'
COLOR_O_5 = '#FF6F00'
COLOR_WHITE = '#FFFFFF'
# Detection and visibility overlay color:

View File

@ -8,10 +8,12 @@ from generic import *
# Imports for pandas and plotly are because of performance reasons in the function that uses these libraries.
def generate_data_sources_layer(filename):
def generate_data_sources_layer(filename, output_filename, layer_name):
"""
Generates a generic layer for data sources.
:param filename: the filename of the YAML file containing the data sources administration
:param output_filename: the output filename defined by the user
:param layer_name: the name of the Navigator layer
:return:
"""
my_data_sources, name, platform, exceptions = _load_data_sources(filename)
@ -19,17 +21,23 @@ def generate_data_sources_layer(filename):
# Do the mapping between my data sources and MITRE data sources:
my_techniques = _map_and_colorize_techniques(my_data_sources, platform, exceptions)
layer = get_layer_template_data_sources("Data sources " + name, 'description', 'attack', platform)
if not layer_name:
layer_name = 'Data sources ' + name
layer = get_layer_template_data_sources(layer_name, 'description', 'attack', platform)
layer['techniques'] = my_techniques
json_string = simplejson.dumps(layer).replace('}, ', '},\n')
write_file('data_sources', name, json_string)
if not output_filename:
output_filename = create_output_filename('data_sources', name)
write_file(output_filename, json_string)
def plot_data_sources_graph(filename):
def plot_data_sources_graph(filename, output_filename):
"""
Generates a line graph which shows the improvements on numbers of data sources through time.
:param filename: the filename of the YAML file containing the data sources administration
:param output_filename: the output filename defined by the user
:return:
"""
# pylint: disable=unused-variable
@ -45,7 +53,11 @@ def plot_data_sources_graph(filename):
df = pd.DataFrame(graph_values).groupby('date', as_index=False)[['count']].sum()
df['cumcount'] = df['count'].cumsum()
output_filename = get_non_existing_filename('output/graph_data_sources', 'html')
if not output_filename:
output_filename = 'graph_data_sources'
elif output_filename.endswith('.html'):
output_filename = output_filename.replace('.html', '')
output_filename = get_non_existing_filename('output/' + output_filename, 'html')
import plotly
import plotly.graph_objs as go
@ -57,18 +69,22 @@ def plot_data_sources_graph(filename):
print("File written: " + output_filename)
def export_data_source_list_to_excel(filename, eql_search=False):
def export_data_source_list_to_excel(filename, output_filename, eql_search=False):
"""
Makes an overview of all MITRE ATT&CK data sources (via techniques) and lists which data sources are present
in the YAML administration including all properties and data quality score.
:param filename: the filename of the YAML file containing the data sources administration
:param output_filename: the output filename defined by the user
:param eql_search: specify if an EQL search was performed which may have resulted in missing ATT&CK data sources
:return:
"""
# pylint: disable=unused-variable
my_data_sources, name, platforms, exceptions = _load_data_sources(filename, filter_empty_scores=False)
excel_filename = get_non_existing_filename('output/data_sources', 'xlsx')
if not output_filename:
output_filename = 'data_sources'
elif output_filename.endswith('.xlsx'):
output_filename = output_filename.replace('.xlsx', '')
excel_filename = get_non_existing_filename('output/' + output_filename, 'xlsx')
workbook = xlsxwriter.Workbook(excel_filename)
worksheet = workbook.add_worksheet('Data sources')
@ -307,7 +323,7 @@ def update_technique_administration_file(file_data_sources, file_tech_admin):
:return:
"""
# first we generate the new visibility scores contained within a temporary tech. admin YAML 'file'
new_visibility_scores = generate_technique_administration_file(file_data_sources, write_file=False)
new_visibility_scores = generate_technique_administration_file(file_data_sources, None, write_file=False)
# we get the date to remove the single quotes at the end of the code
today = new_visibility_scores['techniques'][0]['visibility']['score_logbook'][0]['date']
@ -509,10 +525,11 @@ def update_technique_administration_file(file_data_sources, file_tech_admin):
# pylint: disable=redefined-outer-name
def generate_technique_administration_file(filename, write_file=True, all_techniques=False):
def generate_technique_administration_file(filename, output_filename, write_file=True, all_techniques=False):
"""
Generate a technique administration file based on the data source administration YAML file
:param filename: the filename of the YAML file containing the data sources administration
:param output_filename: the output filename defined by the user
:param write_file: by default the file is written to disk
:param all_techniques: include all ATT&CK techniques in the generated YAML file that are applicable to the
platform(s) specified in the data source YAML file
@ -529,7 +546,7 @@ def generate_technique_administration_file(filename, write_file=True, all_techni
yaml_file['name'] = name
yaml_file['platform'] = platform
yaml_file['techniques'] = []
today = dt.now().strftime('%Y-%m-%d')
today = dt.now()
# Score visibility based on the number of available data sources and the exceptions
for t in techniques:
@ -575,8 +592,11 @@ def generate_technique_administration_file(filename, write_file=True, all_techni
# remove the single quotes from the date
yaml_file_lines = fix_date_and_remove_null(file_lines, today, input_type='list')
output_filename = get_non_existing_filename('output/techniques-administration-' +
normalize_name_to_filename(name + '-' + platform_to_name(platform)), 'yaml')
if not output_filename:
output_filename = 'techniques-administration-' + normalize_name_to_filename(name + '-' + platform_to_name(platform))
elif output_filename.endswith('.yaml'):
output_filename = output_filename.replace('.yaml', '')
output_filename = get_non_existing_filename('output/' + output_filename, 'yaml')
with open(output_filename, 'w') as f:
f.writelines(yaml_file_lines)
print("File written: " + output_filename)

View File

@ -59,6 +59,8 @@ def _init_menu():
'not updated without your approval. The updated visibility '
'scores are calculated in the same way as with the option: '
'-y, --yaml', action='store_true')
parser_data_sources.add_argument('-of', '--output-filename', help='set the output filename')
parser_data_sources.add_argument('-ln', '--layer-name', help='set the name of the Navigator layer')
parser_data_sources.add_argument('--health', help='check the YAML file(s) for errors', action='store_true')
# create the visibility parser
@ -87,6 +89,8 @@ def _init_menu():
'the ATT&CK navigator', action='store_true')
parser_visibility.add_argument('-g', '--graph', help='generate a graph with visibility added through time',
action='store_true')
parser_visibility.add_argument('-of', '--output-filename', help='set the output filename')
parser_visibility.add_argument('-ln', '--layer-name', help='set the name of the Navigator layer')
parser_visibility.add_argument('--health', help='check the YAML file for errors', action='store_true')
# create the detection parser
@ -117,6 +121,8 @@ def _init_menu():
'the ATT&CK navigator', action='store_true')
parser_detection.add_argument('-g', '--graph', help='generate a graph with detections added through time',
action='store_true')
parser_detection.add_argument('-of', '--output-filename', help='set the output filename')
parser_detection.add_argument('-ln', '--layer-name', help='set the name of the Navigator layer')
parser_detection.add_argument('--health', help='check the YAML file(s) for errors', action='store_true')
# create the group parser
@ -154,6 +160,8 @@ def _init_menu():
'the EQL search. The default behaviour is to only include the '
'most recent \'score\' objects',
action='store_true', default=False)
parser_group.add_argument('-of', '--output-filename', help='set the output filename')
parser_group.add_argument('-ln', '--layer-name', help='set the name of the Navigator layer')
parser_group.add_argument('--health', help='check the YAML file(s) for errors', action='store_true')
# create the generic parser
@ -202,13 +210,13 @@ def _menu(menu_parser):
if args.update and check_file(args.file_tech, FILE_TYPE_TECHNIQUE_ADMINISTRATION, args.health):
update_technique_administration_file(file_ds, args.file_tech)
if args.layer:
generate_data_sources_layer(file_ds)
generate_data_sources_layer(file_ds, args.output_filename, args.layer_name)
if args.excel:
export_data_source_list_to_excel(file_ds, eql_search=args.search)
export_data_source_list_to_excel(file_ds, args.output_filename, eql_search=args.search)
if args.graph:
plot_data_sources_graph(file_ds)
plot_data_sources_graph(file_ds, args.output_filename)
if args.yaml:
generate_technique_administration_file(file_ds, all_techniques=args.yaml_all_techniques)
generate_technique_administration_file(file_ds, args.output_filename, all_techniques=args.yaml_all_techniques)
elif args.subparser in ['visibility', 'v']:
if args.layer or args.overlay:
@ -228,20 +236,19 @@ def _menu(menu_parser):
if not file_tech:
quit() # something went wrong in executing the search or 0 results where returned
if args.layer:
generate_visibility_layer(file_tech, args.file_ds, False)
generate_visibility_layer(file_tech, args.file_ds, False, args.output_filename, args.layer_name)
if args.overlay:
generate_visibility_layer(file_tech, args.file_ds, True)
generate_visibility_layer(file_tech, args.file_ds, True, args.output_filename, args.layer_name)
if args.graph:
plot_graph(file_tech, 'visibility')
plot_graph(file_tech, 'visibility', args.output_filename)
if args.excel:
export_techniques_list_to_excel(file_tech)
export_techniques_list_to_excel(file_tech, args.output_filename)
# todo add search capabilities
elif args.subparser in ['group', 'g']:
if not generate_group_heat_map(args.groups, args.overlay, args.overlay_type, args.stage, args.platform,
args.software_group, args.search_visibility, args.search_detection, args.health,
include_all_score_objs=args.all_scores):
quit() # something went wrong in executing the search or 0 results where returned
generate_group_heat_map(args.groups, args.overlay, args.overlay_type, args.stage, args.platform,
args.software_group, args.search_visibility, args.search_detection, args.health,
args.output_filename, args.layer_name, include_all_score_objs=args.all_scores)
elif args.subparser in ['detection', 'd']:
if args.overlay:
@ -260,13 +267,13 @@ def _menu(menu_parser):
if not file_tech:
quit() # something went wrong in executing the search or 0 results where returned
if args.layer:
generate_detection_layer(file_tech, args.file_ds, False)
generate_detection_layer(file_tech, args.file_ds, False, args.output_filename, args.layer_name)
if args.overlay and check_file(args.file_ds, FILE_TYPE_DATA_SOURCE_ADMINISTRATION, args.health):
generate_detection_layer(file_tech, args.file_ds, True)
generate_detection_layer(file_tech, args.file_ds, True, args.output_filename, args.layer_name)
if args.graph:
plot_graph(file_tech, 'detection')
plot_graph(file_tech, 'detection', args.output_filename)
if args.excel:
export_techniques_list_to_excel(file_tech)
export_techniques_list_to_excel(file_tech, args.output_filename)
elif args.subparser in ['generic', 'ge']:
if args.datasources:

View File

@ -315,6 +315,17 @@ textarea.form-control {
max-height: 115px !important;
}
.textareaFileDetails textarea.textarea-customstyle {
height: 75px !important;
}
.textareaFileDetails {
margin-top: 6px;
margin-bottom: 6px;
position: relative;
padding-right: 15px;
}
.modal-content .modal-body textarea {
height: 52px !important;
}
@ -427,14 +438,14 @@ h4 {
.icon-example {
position: absolute;
z-index: 9999;
z-index: 1;
right: 19px;
bottom: 3px;
}
.icon-example:after {
content: url(img/maximize-2.svg);
position: absolute;
z-index: 9999;
z-index: 1;
right: 6px;
bottom: 3px;
cursor: pointer;

View File

@ -1 +1 @@
<!DOCTYPE html><html lang=en><head><meta charset=utf-8><link rel=apple-touch-icon sizes=76x76 href=/dettect-editor/apple-icon.png><!--[if IE]><link rel="icon" type="image/png" sizes="96x96" href="/dettect-editor/favicon.png" /><![endif]--><meta http-equiv=X-UA-Compatible content="IE=edge,chrome=1"><title>DeTT&CT Editor</title><meta name=viewport content="width=device-width,initial-scale=1,maximum-scale=1.5"><link href="https://fonts.googleapis.com/css?family=Poppins:200,300,400,600,700,800" rel=stylesheet><link href=https://use.fontawesome.com/releases/v5.0.6/css/all.css rel=stylesheet><link href=https://cdnjs.cloudflare.com/ajax/libs/bootstrap-rtl/3.4.0/css/bootstrap-rtl.css rel=stylesheet><link href=/dettect-editor/custom.css rel=stylesheet><link href=/dettect-editor/css/app.1edb8aa1.css rel=preload as=style><link href=/dettect-editor/css/chunk-vendors.628827c0.css rel=preload as=style><link href=/dettect-editor/js/app.3656c6ca.js rel=preload as=script><link href=/dettect-editor/js/chunk-vendors.0fe637eb.js rel=preload as=script><link href=/dettect-editor/css/chunk-vendors.628827c0.css rel=stylesheet><link href=/dettect-editor/css/app.1edb8aa1.css rel=stylesheet><link rel=icon type=image/png sizes=32x32 href=/dettect-editor/img/icons/favicon-32x32.png><link rel=icon type=image/png sizes=16x16 href=/dettect-editor/img/icons/favicon-16x16.png><link rel=manifest href=/dettect-editor/manifest.json><meta name=theme-color content=#344675><meta name=apple-mobile-web-app-capable content=yes><meta name=apple-mobile-web-app-status-bar-style content=#344675><meta name=apple-mobile-web-app-title content="DeTT&CT Editor"><link rel=apple-touch-icon href=/dettect-editor/img/icons/apple-touch-icon-152x152.png><link rel=mask-icon href=/dettect-editor/img/icons/safari-pinned-tab.svg color=#344675><meta name=msapplication-TileImage content=/dettect-editor/img/icons/msapplication-icon-144x144.png><meta name=msapplication-TileColor content=#344675></head><body><div class=wrapper id=app></div><script src=/dettect-editor/js/chunk-vendors.0fe637eb.js></script><script src=/dettect-editor/js/app.3656c6ca.js></script></body></html>
<!DOCTYPE html><html lang=en><head><meta charset=utf-8><link rel=apple-touch-icon sizes=76x76 href=/dettect-editor/apple-icon.png><!--[if IE]><link rel="icon" type="image/png" sizes="96x96" href="/dettect-editor/favicon.png" /><![endif]--><meta http-equiv=X-UA-Compatible content="IE=edge,chrome=1"><title>DeTT&CT Editor</title><meta name=viewport content="width=device-width,initial-scale=1,maximum-scale=1.5"><link href="https://fonts.googleapis.com/css?family=Poppins:200,300,400,600,700,800" rel=stylesheet><link href=https://use.fontawesome.com/releases/v5.0.6/css/all.css rel=stylesheet><link href=https://cdnjs.cloudflare.com/ajax/libs/bootstrap-rtl/3.4.0/css/bootstrap-rtl.css rel=stylesheet><link href=/dettect-editor/custom.css rel=stylesheet><link href=/dettect-editor/css/app.1edb8aa1.css rel=preload as=style><link href=/dettect-editor/css/chunk-vendors.628827c0.css rel=preload as=style><link href=/dettect-editor/js/app.a473a36b.js rel=preload as=script><link href=/dettect-editor/js/chunk-vendors.0fe637eb.js rel=preload as=script><link href=/dettect-editor/css/chunk-vendors.628827c0.css rel=stylesheet><link href=/dettect-editor/css/app.1edb8aa1.css rel=stylesheet><link rel=icon type=image/png sizes=32x32 href=/dettect-editor/img/icons/favicon-32x32.png><link rel=icon type=image/png sizes=16x16 href=/dettect-editor/img/icons/favicon-16x16.png><link rel=manifest href=/dettect-editor/manifest.json><meta name=theme-color content=#344675><meta name=apple-mobile-web-app-capable content=yes><meta name=apple-mobile-web-app-status-bar-style content=#344675><meta name=apple-mobile-web-app-title content="DeTT&CT Editor"><link rel=apple-touch-icon href=/dettect-editor/img/icons/apple-touch-icon-152x152.png><link rel=mask-icon href=/dettect-editor/img/icons/safari-pinned-tab.svg color=#344675><meta name=msapplication-TileImage content=/dettect-editor/img/icons/msapplication-icon-144x144.png><meta name=msapplication-TileColor content=#344675></head><body><div class=wrapper id=app></div><script src=/dettect-editor/js/chunk-vendors.0fe637eb.js></script><script src=/dettect-editor/js/app.a473a36b.js></script></body></html>

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -1,6 +1,6 @@
self.__precacheManifest = (self.__precacheManifest || []).concat([
{
"revision": "7ae11f9aaf9be462fb92",
"revision": "5a439e7ee5d9fd17199a",
"url": "/dettect-editor/css/app.1edb8aa1.css"
},
{
@ -8,7 +8,7 @@ self.__precacheManifest = (self.__precacheManifest || []).concat([
"url": "/dettect-editor/css/chunk-vendors.628827c0.css"
},
{
"revision": "b13c75bab7c629cadb021976fa83902f",
"revision": "dcc1c1e6240a9be1c1d954e40912e920",
"url": "/dettect-editor/custom.css"
},
{
@ -48,12 +48,12 @@ self.__precacheManifest = (self.__precacheManifest || []).concat([
"url": "/dettect-editor/img/maximize-2.svg"
},
{
"revision": "d944491edc3a600a4f90a4083dca6d55",
"revision": "02165f55e735bb48324b20851285049c",
"url": "/dettect-editor/index.html"
},
{
"revision": "7ae11f9aaf9be462fb92",
"url": "/dettect-editor/js/app.3656c6ca.js"
"revision": "5a439e7ee5d9fd17199a",
"url": "/dettect-editor/js/app.a473a36b.js"
},
{
"revision": "c0732251fd5ce7183fd8",

View File

@ -14,7 +14,7 @@
importScripts("https://storage.googleapis.com/workbox-cdn/releases/4.3.1/workbox-sw.js");
importScripts(
"/dettect-editor/precache-manifest.b51498fd253b79cb164d6ff808e85bd1.js"
"/dettect-editor/precache-manifest.141e839a0de27b124e559ce780fcf62d.js"
);
workbox.core.setCacheNameDetails({prefix: "vue-black-dashboard"});

View File

@ -315,6 +315,17 @@ textarea.form-control {
max-height: 115px !important;
}
.textareaFileDetails textarea.textarea-customstyle {
height: 75px !important;
}
.textareaFileDetails {
margin-top: 6px;
margin-bottom: 6px;
position: relative;
padding-right: 15px;
}
.modal-content .modal-body textarea {
height: 52px !important;
}
@ -427,14 +438,14 @@ h4 {
.icon-example {
position: absolute;
z-index: 9999;
z-index: 1;
right: 19px;
bottom: 3px;
}
.icon-example:after {
content: url(img/maximize-2.svg);
position: absolute;
z-index: 9999;
z-index: 1;
right: 6px;
bottom: 3px;
cursor: pointer;

View File

@ -21,6 +21,14 @@
<td>Name:</td>
<td><base-input v-model="doc['name']" class="file-detail-edit"></base-input></td>
</tr>
<tr>
<td>Notes:</td>
<td>
<div class="textareaFileDetails">
<extended-textarea :data_object="doc" data_field="notes" rows="2" id="notes"></extended-textarea>
</div>
</td>
</tr>
<tr>
<td>Platform:</td>
<td>
@ -36,27 +44,28 @@
<script>
import { notificationMixin } from '@/mixins/NotificationMixins.js';
import ExtendedTextarea from '@/components/Inputs/ExtendedTextarea';
export default {
mixins: [notificationMixin],
props: {
filename: {
type: String,
required: true
required: true,
},
doc: {
type: Object,
required: true
required: true,
},
platforms: {
type: Array,
required: true
required: true,
},
showName: {
type: Boolean,
required: false,
default: true
}
default: true,
},
},
methods: {
platformEventHandler(event) {
@ -80,8 +89,11 @@ export default {
if (this.doc.platform.length == 0) {
this.notifyDanger('Missing value', 'No value for platform selected. Please select one or more platforms.');
}
}
}
},
},
components: {
ExtendedTextarea,
},
};
</script>

View File

@ -69,7 +69,13 @@
</div>
<div class="row mt-md-0">
<div class="col-md-11">
<extended-textarea :row="row" :id="title + i" :cb_function="editCommentCallback"></extended-textarea>
<extended-textarea
:data_object="row"
data_field="comment"
:id="title + i"
rows="4"
:cb_function="editCommentCallback"
></extended-textarea>
</div>
</div>
<div class="row mt-md-3">
@ -171,57 +177,57 @@ export default {
defaultKVKeys: Object.keys(this.emptyObject),
showHelptextScore: false,
currentModal: '',
commentModal: ''
commentModal: '',
};
},
mixins: [notificationMixin, pageDetailMixin],
props: {
title: {
type: String,
required: true
required: true,
},
applicable_to: {
type: Array,
required: true
required: true,
},
showLocation: {
type: Boolean,
required: false,
default: true
default: true,
},
helpText: {
type: String,
required: true
required: true,
},
scores: {
type: Array,
required: true
required: true,
},
scoresTooltip: {
type: Object,
required: true
required: true,
},
defaultScore: {
type: Number,
required: true
required: true,
},
showAutoGenerated: {
type: Boolean,
required: false,
default: false
default: false,
},
emptyScoreEntry: {
type: Object,
required: true
required: true,
},
helptextScore: {
type: String,
required: true
required: true,
},
emptyObject: {
type: Object,
required: true
}
required: true,
},
},
components: {
ListEditor,
@ -230,7 +236,7 @@ export default {
ScoreLogbook,
ScoreSlider,
Icons,
ExtendedTextarea
ExtendedTextarea,
},
methods: {
escapeKeyListener: function(evt) {
@ -329,8 +335,8 @@ export default {
},
editCommentCallback(b) {
this.commentModal = b;
}
}
},
},
};
</script>

View File

@ -1,6 +1,12 @@
<template>
<div>
<textarea rows="4" placeholder=". . ." class="form-control textarea-border pl-md-3" v-model="row.comment"> </textarea>
<textarea
:rows="rows"
placeholder=". . ."
class="form-control textarea-border pl-md-3 textarea-customstyle"
v-model="data_object[data_field]"
>
</textarea>
<div v-b-modal="'comment-modal-' + id" class="icon-example" @click="callCbFunction('comment-modal-' + id)"></div>
<b-modal :id="'comment-modal-' + id" dialog-class="modal-edit-small" content-class="modal-dark-mode" hide-footer hide-header no-close-on-esc>
<div class="row">
@ -27,7 +33,7 @@
rows="50"
placeholder=". . ."
class="form-control textarea-border textarea-modal pl-md-3"
v-model="row.comment"
v-model="data_object[data_field]"
></textarea>
</div>
</div>
@ -42,25 +48,33 @@ export default {
},
components: {},
props: {
row: {
data_object: {
type: Object,
required: true
required: true,
},
data_field: {
type: String,
required: true,
},
id: {
type: String,
required: true
required: true,
},
cb_function: {
type: Function,
required: false
}
required: false,
},
rows: {
type: String,
required: true,
},
},
methods: {
callCbFunction(b) {
if (this.cb_function != undefined) {
this.cb_function(b);
}
}
}
},
},
};
</script>

View File

@ -60,7 +60,13 @@
></score-slider>
</div>
<div class="col-md-5">
<extended-textarea :row="v" :id="'modal' + index" :cb_function="cb_function"></extended-textarea>
<extended-textarea
:data_object="v"
data_field="comment"
:id="'modal' + index"
:cb_function="cb_function"
rows="4"
></extended-textarea>
</div>
<div v-if="showAutoGenerated" class="col-md-auto">
<toggle-button
@ -99,51 +105,51 @@ export default {
data() {
return {
newScore: this.defaultScore,
componentKey: 0
componentKey: 0,
};
},
mixins: [notificationMixin],
props: {
item: {
type: Array,
required: true
required: true,
},
scores: {
type: Array,
required: true
required: true,
},
scoresTooltip: {
type: Object,
required: true
required: true,
},
defaultScore: {
type: Number,
required: true
required: true,
},
showAutoGenerated: {
type: Boolean,
required: false,
default: false
default: false,
},
modalId: {
type: String,
required: true
required: true,
},
emptyScoreEntry: {
type: Object,
required: true
required: true,
},
cb_function: {
type: Function,
required: false
}
required: false,
},
},
components: {
DatePicker,
ScoreSlider,
ToggleButton,
Icons,
ExtendedTextarea
ExtendedTextarea,
},
mounted() {
this.sortOnDates();
@ -203,8 +209,8 @@ export default {
},
showHelptextScore(event) {
this.$emit('showHelptextScoreNow', event);
}
}
},
},
};
</script>

View File

@ -54,7 +54,13 @@
<div class="row mt-md-0">
<div class="col-md-11 form-group pr-md-2">
<label class="card">Comment</label>
<extended-textarea :row="dataSource" id="datasource" :cb_function="editCommentCallback"></extended-textarea>
<extended-textarea
:data_object="dataSource"
data_field="comment"
id="datasource"
rows="4"
:cb_function="editCommentCallback"
></extended-textarea>
</div>
</div>
<div class="row mt-md-3 col-md-5">
@ -153,37 +159,37 @@ export default {
'2': 'Fair',
'3': 'Good',
'4': 'Very good',
'5': 'Excellent'
'5': 'Excellent',
},
dataSourceDefaultKeys: Object.keys(constants.YAML_OBJ_DATA_SOURCES),
dataSourceSuggestionList: dataSources,
helptextDataQuality: false,
helptextDataSourceKVPairs: false,
commentModal: false
commentModal: false,
};
},
mixins: [pageDetailMixin],
props: {
dataSource: {
type: Object,
required: true
required: true,
},
allDataSources: {
type: Array,
required: true
required: true,
},
dqHelpText: {
type: String,
required: true
required: true,
},
dsHelpText: {
type: String,
required: true
required: true,
},
prevDataSourceQuality: {
type: Array,
required: true
}
required: true,
},
},
methods: {
escapeKeyListener: function(evt) {
@ -217,7 +223,7 @@ export default {
},
editCommentCallback(b) {
this.commentModal = b;
}
},
},
computed: {
dsEnabled() {
@ -227,7 +233,7 @@ export default {
}
}
return false;
}
},
},
components: {
DatePicker,
@ -238,7 +244,7 @@ export default {
AutoSuggestTitle,
Modal,
Icons,
ExtendedTextarea
}
ExtendedTextarea,
},
};
</script>

View File

@ -243,11 +243,27 @@ def get_layer_template_groups(name, max_count, description, stage, platform, ove
layer['legendItems'].append({'label': 'Src. of tech. is only software', 'color': COLOR_SOFTWARE})
layer['legendItems'].append({'label': 'Src. of tech. is group(s)/overlay + software', 'color': COLOR_GROUP_AND_SOFTWARE})
elif overlay_type == OVERLAY_TYPE_DETECTION:
layer['legendItems'].append({'label': 'Tech. in group + detection', 'color': COLOR_GROUP_OVERLAY_MATCH})
layer['legendItems'].append({'label': 'Tech. in detection', 'color': COLOR_GROUP_OVERLAY_ONLY_DETECTION})
layer['legendItems'].append({'label': 'Tech. in group + detection score 0: Forensics/Context', 'color': COLOR_O_0})
layer['legendItems'].append({'label': 'Tech. in group + detection score 1: Basic', 'color': COLOR_O_1})
layer['legendItems'].append({'label': 'Tech. in group + detection score 2: Fair', 'color': COLOR_O_2})
layer['legendItems'].append({'label': 'Tech. in group + detection score 3: Good', 'color': COLOR_O_3})
layer['legendItems'].append({'label': 'Tech. in group + detection score 4: Very good', 'color': COLOR_O_4})
layer['legendItems'].append({'label': 'Tech. in group + detection score 5: Excellent', 'color': COLOR_O_5})
layer['legendItems'].append({'label': 'Tech. in detection, score 0: Forensics/Context', 'color': COLOR_D_0})
layer['legendItems'].append({'label': 'Tech. in detection, score 1: Basic', 'color': COLOR_D_1})
layer['legendItems'].append({'label': 'Tech. in detection, score 2: Fair', 'color': COLOR_D_2})
layer['legendItems'].append({'label': 'Tech. in detection, score 3: Good', 'color': COLOR_D_3})
layer['legendItems'].append({'label': 'Tech. in detection, score 4: Very good', 'color': COLOR_D_4})
layer['legendItems'].append({'label': 'Tech. in detection, score 5: Excellent', 'color': COLOR_D_5})
elif overlay_type == OVERLAY_TYPE_VISIBILITY:
layer['legendItems'].append({'label': 'Tech. in group + visibility', 'color': COLOR_GROUP_OVERLAY_MATCH})
layer['legendItems'].append({'label': 'Tech. in visibility', 'color': COLOR_GROUP_OVERLAY_ONLY_VISIBILITY})
layer['legendItems'].append({'label': 'Tech. in group + visibility score 1: Minimal', 'color': COLOR_O_1})
layer['legendItems'].append({'label': 'Tech. in group + visibility score 2: Medium', 'color': COLOR_O_2})
layer['legendItems'].append({'label': 'Tech. in group + visibility score 3: Good', 'color': COLOR_O_3})
layer['legendItems'].append({'label': 'Tech. in group + visibility score 4: Excellent', 'color': COLOR_O_4})
layer['legendItems'].append({'label': 'Tech. in visibility, score 1: Minimal', 'color': COLOR_V_1})
layer['legendItems'].append({'label': 'Tech. in visibility, score 2: Medium', 'color': COLOR_V_2})
layer['legendItems'].append({'label': 'Tech. in visibility, score 3: Good', 'color': COLOR_V_3})
layer['legendItems'].append({'label': 'Tech. in visibility, score 4: Excellent', 'color': COLOR_V_4})
return layer
@ -335,23 +351,39 @@ def get_layer_template_layered(name, description, stage, platform):
layer = _get_base_template(name, description, stage, platform, 0)
layer['legendItems'] = \
[
{'label': 'Visibility', 'color': COLOR_OVERLAY_VISIBILITY},
{'label': 'Detection', 'color': COLOR_OVERLAY_DETECTION},
{'label': 'Visibility and detection', 'color': COLOR_OVERLAY_BOTH}
{'label': 'Visibility and detection', 'color': COLOR_OVERLAY_BOTH},
{'label': 'Visibility score 1: Minimal', 'color': COLOR_V_1},
{'label': 'Visibility score 2: Medium', 'color': COLOR_V_2},
{'label': 'Visibility score 3: Good', 'color': COLOR_V_3},
{'label': 'Visibility score 4: Excellent', 'color': COLOR_V_4},
{'label': 'Detection score 1: Basic', 'color': COLOR_D_1},
{'label': 'Detection score 2: Fair', 'color': COLOR_D_2},
{'label': 'Detection score 3: Good', 'color': COLOR_D_3},
{'label': 'Detection score 4: Very good', 'color': COLOR_D_4},
{'label': 'Detection score 5: Excellent', 'color': COLOR_D_5}
]
return layer
def write_file(filename_prefix, filename, content):
def create_output_filename(filename_prefix, filename):
"""
Creates a filename using pre determined convention.
:param filename_prefix: prefix part of the filename
:param filename: filename
:return:
"""
return '%s_%s' % (filename_prefix, normalize_name_to_filename(filename))
def write_file(filename, content):
"""
Writes content to a file and ensures if the file already exists it won't be overwritten by appending a number
as suffix.
:param filename_prefix: prefix part of the filename
:param filename: filename
:param content: the content of the file that needs to be written to the file
:return:
"""
output_filename = 'output/%s_%s' % (filename_prefix, normalize_name_to_filename(filename))
output_filename = 'output/%s' % clean_filename(filename)
output_filename = get_non_existing_filename(output_filename, 'json')
with open(output_filename, 'w') as f:
@ -367,6 +399,8 @@ def get_non_existing_filename(filename, extension):
:param extension:
:return:
"""
if filename.endswith('.' + extension):
filename = filename.replace('.' + extension, '')
if os.path.exists('%s.%s' % (filename, extension)):
suffix = 1
while os.path.exists('%s_%s.%s' % (filename, suffix, extension)):
@ -493,7 +527,7 @@ def fix_date_and_remove_null(yaml_file, date, input_type='ruamel'):
elif input_type == 'file':
new_lines = yaml_file.readlines()
fixed_lines = [l.replace('\'' + date + '\'', date).replace('null', '')
fixed_lines = [l.replace('\'' + str(date) + '\'', str(date)).replace('null', '')
if REGEX_YAML_DATE.match(l) else
l.replace('null', '') for l in new_lines]
@ -514,8 +548,15 @@ def get_latest_score_obj(yaml_object):
newest_score_obj = None
newest_date = None
for score_obj in yaml_object['score_logbook']:
if not newest_score_obj or score_obj['date'] > newest_date:
newest_date = score_obj['date']
# Scores in the score_logbook can be dates (yyyy-mm-dd) but also datetimes (yyyy-mm-dd hh:mm:ss.ffffff).
# So convert the datetimes to dates to make it possible to compare.
if type(score_obj['date']) == dt: # dt is the name of the datetime class (see import table)
score_obj_date = score_obj['date'].date()
else:
score_obj_date = score_obj['date']
if not newest_score_obj or score_obj_date > newest_date:
newest_date = score_obj_date
newest_score_obj = score_obj
return newest_score_obj
@ -692,7 +733,7 @@ def calculate_score(list_detections, zero_value=0):
number = 0
for v in list_detections:
score = get_latest_score(v)
if score and score >= 0:
if score is not None and score >= 0:
avg_score += score
number += 1
@ -856,6 +897,28 @@ def make_layer_metadata_compliant(metadata):
return metadata
def add_metadata_technique_object(technique, obj_type, metadata):
"""
Add the metadata for a detection or visibility object as used within any type of overlay.
:param technique: technique object containing both the visibility and detection object
:param obj_type: valid values are 'detection' and 'visibility'
:param metadata: a list to which the metadata will be added
:return: the created metadata as a list
"""
if obj_type not in ['detection', 'visibility']:
raise Exception("Invalid value for 'obj_type' provided.")
metadata.append({'name': '---', 'value': '---'})
metadata.append({'name': '-Applicable to', 'value': ', '.join(set([a for v in technique[obj_type] for a in v['applicable_to']]))}) # noqa
metadata.append({'name': '-' + obj_type.capitalize() + ' score', 'value': ', '.join([str(calculate_score(technique[obj_type]))])}) # noqa
if obj_type == 'detection':
metadata.append({'name': '-' + obj_type.capitalize() + ' location', 'value': ', '.join(set([a for v in technique[obj_type] for a in v['location']]))}) # noqa
metadata.append({'name': '-' + obj_type.capitalize() + ' comment', 'value': ' | '.join(set(filter(lambda x: x != '', map(lambda k: k['comment'], technique[obj_type]))))}) # noqa
metadata.append({'name': '-' + obj_type.capitalize() + ' score comment', 'value': ' | '.join(set(filter(lambda x: x != '', map(lambda i: get_latest_comment(i), technique[obj_type]))))}) # noqa
return metadata
def get_updates(update_type, sort='modified'):
"""
Print a list of updates for a techniques, groups or software. Sort by modified or creation date.
@ -1003,3 +1066,12 @@ def get_platform_from_yaml(yaml_content):
valid_platform_list.append(PLATFORMS[p])
platform = valid_platform_list
return platform
def clean_filename(filename):
"""
Remove invalid characters from filename and maximize it to 200 characters
:param filename: Input filename
:return: sanitized filename
"""
return filename.replace('/', '').replace('\\', '').replace(':', '')[:200]

View File

@ -109,7 +109,7 @@ def _get_software_techniques(groups, stage, platform):
# software matches the ATT&CK Matrix and platform
# and the group is a group we are interested in
if s['x_mitre_platforms']: # there is software that do not have a platform, skip those
if s['matrix'] == 'mitre-'+stage and (platform == 'all' or len(set(s['x_mitre_platforms']).intersection(set(platform))) > 0) and \
if s['matrix'] == 'mitre-' + stage and (platform == 'all' or len(set(s['x_mitre_platforms']).intersection(set(platform))) > 0) and \
(groups[0] == 'all' or s['group_id'].lower() in groups or _is_in_group(s['aliases'], groups)):
if s['group_id'] not in groups_dict:
groups_dict[s['group_id']] = {'group_name': s['name']}
@ -197,7 +197,7 @@ def _get_group_techniques(groups, stage, platform, file_type):
platforms = 'Windows'
# group matches the: matrix/stage, platform and the group(s) we are interested in
if gr['matrix'] == 'mitre-'+stage and (platform == 'all' or len(set(platforms).intersection(set(platform))) > 0) and \
if gr['matrix'] == 'mitre-' + stage and (platform == 'all' or len(set(platforms).intersection(set(platform))) > 0) and \
(groups[0] == 'all' or gr['group_id'].lower() in groups or _is_in_group(gr['aliases'], groups)):
if gr['group_id'] not in groups_dict:
groups_found.add(gr['group_id'])
@ -235,8 +235,8 @@ def _get_detection_techniques(filename):
groups_dict[group_id]['techniques'] = set()
groups_dict[group_id]['weight'] = dict()
for t, v in detection_techniques.items():
s = calculate_score(v['detection'])
if s > 0:
s = calculate_score(v['detection'], zero_value=-1)
if s >= 0:
groups_dict[group_id]['techniques'].add(t)
groups_dict[group_id]['weight'][t] = 1
@ -302,7 +302,10 @@ def _get_technique_count(groups, groups_overlay, groups_software, overlay_type,
dict_tech_score = {}
list_tech = groups_overlay[overlay_type.upper()]['techniques']
for tech in list_tech:
dict_tech_score[tech] = calculate_score(all_techniques[tech][overlay_type]) + max_count
if overlay_type == OVERLAY_TYPE_VISIBILITY:
dict_tech_score[tech] = calculate_score(all_techniques[tech]['visibility']) + max_count
elif overlay_type == OVERLAY_TYPE_DETECTION:
dict_tech_score[tech] = calculate_score(all_techniques[tech]['detection'], zero_value=-1) + max_count
for group, v in groups_overlay.items():
for tech in v['techniques']:
@ -373,8 +376,8 @@ def _get_technique_layer(techniques_count, groups, overlay, groups_software, ove
for group, values in groups.items():
if tech in values['techniques']: # we do not color this one because that's done using the scoring
if 'Groups' not in metadata_dict:
metadata_dict['Groups'] = set()
metadata_dict['Groups'].add(values['group_name'])
metadata_dict['Group'] = set()
metadata_dict['Group'].add(values['group_name'])
# this will only be effective when loading a YAML files that have a value for the key 'campaign'
if 'campaign' in values:
@ -388,32 +391,41 @@ def _get_technique_layer(techniques_count, groups, overlay, groups_software, ove
# Determine color:
if len(v['groups'].intersection(set(groups.keys()))) > 0:
# if the technique is both present in the group (-g/--groups) and the groups overlay (-o/--overlay)
t['color'] = COLOR_GROUP_OVERLAY_MATCH
metadata_dict['Group'].add(values['group_name'])
# determine the color of the overlay:
# - using groups, it's normal orange
# - using detections, it's 6 variations or orange (score 0 to 5)
# - using visibility, it's 4 variations of orange (score 1 to 4)
if overlay_file_type == FILE_TYPE_TECHNIQUE_ADMINISTRATION:
if overlay_type == OVERLAY_TYPE_VISIBILITY:
s = calculate_score(all_techniques[tech]['visibility'])
t['color'] = COLOR_O_1 if s == 1 else COLOR_O_2 if s == 2 else COLOR_O_3 if s == 3 else COLOR_O_4 if s == 4 else ''
elif overlay_type == OVERLAY_TYPE_DETECTION:
s = calculate_score(all_techniques[tech]['detection'], zero_value=-1)
t['color'] = COLOR_O_0 if s == 0 else COLOR_O_1 if s == 1 else COLOR_O_2 if s == 2 else COLOR_O_3 if s == 3 else COLOR_O_4 if s == 4 else COLOR_O_5 if s == 5 else ''
else:
t['color'] = COLOR_GROUP_OVERLAY_MATCH
else:
# the technique is only present in the overlay and not in the provided groups (-g/--groups)
if overlay_file_type == FILE_TYPE_TECHNIQUE_ADMINISTRATION:
if overlay_type == OVERLAY_TYPE_VISIBILITY:
t['color'] = COLOR_GROUP_OVERLAY_ONLY_VISIBILITY
s = calculate_score(all_techniques[tech]['visibility'])
t['color'] = COLOR_V_1 if s == 1 else COLOR_V_2 if s == 2 else COLOR_V_3 if s == 3 else COLOR_V_4 if s == 4 else ''
elif overlay_type == OVERLAY_TYPE_DETECTION:
t['color'] = COLOR_GROUP_OVERLAY_ONLY_DETECTION
s = calculate_score(all_techniques[tech]['detection'], zero_value=-1)
t['color'] = COLOR_D_0 if s == 0 else COLOR_D_1 if s == 1 else COLOR_D_2 if s == 2 else COLOR_D_3 if s == 3 else COLOR_D_4 if s == 4 else COLOR_D_5 if s == 5 else ''
else:
t['color'] = COLOR_GROUP_OVERLAY_NO_MATCH
if 'Groups' not in metadata_dict:
metadata_dict['Group'] = set()
metadata_dict['Group'].add(values['group_name'])
# Add applicable_to to metadata in case of overlay for detection/visibility:
if overlay_file_type == FILE_TYPE_TECHNIQUE_ADMINISTRATION:
metadata_dict['Applicable to'] = set([a for v in all_techniques[tech][overlay_type] for a in v['applicable_to']])
metadata_dict['Detection score'] = [str(calculate_score(all_techniques[tech]['detection']))]
metadata_dict['Visibility score'] = [str(calculate_score(all_techniques[tech]['visibility']))]
if 'Overlay' not in metadata_dict:
metadata_dict['Overlay'] = set()
metadata_dict['Overlay'].add(values['group_name'])
# this will only be effective when loading a YAML files that has a value for the key 'campaign'
if 'campaign' in values:
if 'Campaign' not in metadata_dict:
metadata_dict['Campaign'] = set()
metadata_dict['Campaign'].add(values['campaign'])
t['metadata'].append({'name': '-Overlay', 'value': overlay_type})
for obj_type in ['detection', 'visibility']:
t['metadata'] = add_metadata_technique_object(all_techniques[tech], obj_type, t['metadata'])
# change the color and add metadata to make the groups software overlay visible
for group, values in groups_software.items():
@ -432,10 +444,13 @@ def _get_technique_layer(techniques_count, groups, overlay, groups_software, ove
metadata_dict['Software campaign'].add(values['campaign'])
# create the metadata based on the dict 'metadata_dict'
i = 0
for metadata, values in metadata_dict.items():
tmp_dict = {'name': '-' + metadata, 'value': ', '.join(values)}
t['metadata'].append(tmp_dict)
t['metadata'].insert(i, tmp_dict)
i += 1
t['metadata'] = make_layer_metadata_compliant(t['metadata'])
techniques_layer.append(t)
return techniques_layer
@ -461,8 +476,8 @@ def _get_group_list(groups, file_type):
return groups
def generate_group_heat_map(groups, overlay, overlay_type, stage, platform, software_groups,
search_visibility, search_detection, health_is_called, include_all_score_objs=False):
def generate_group_heat_map(groups, overlay, overlay_type, stage, platform, software_groups, search_visibility,
search_detection, health_is_called, output_filename, layer_name, include_all_score_objs=False):
"""
Calls all functions that are necessary for the generation of the heat map and write a json layer to disk.
:param groups: threat actor groups
@ -475,8 +490,10 @@ def generate_group_heat_map(groups, overlay, overlay_type, stage, platform, soft
:param search_visibility: visibility EQL search query
:param search_detection: detection EQL search query
:param health_is_called: boolean that specifies if detailed errors in the file will be printed
:param output_filename: output filename defined by the user
:param layer_name: the name of the Navigator layer
:param include_all_score_objs: include all score objects within the score_logbook for the EQL query
:return: returns nothing when something's wrong
:return: returns None when something went wrong
"""
overlay_dict = {}
groups_software_dict = {}
@ -486,7 +503,7 @@ def generate_group_heat_map(groups, overlay, overlay_type, stage, platform, soft
groups_file_type = check_file(groups, file_type=FILE_TYPE_GROUP_ADMINISTRATION,
health_is_called=health_is_called)
if not groups_file_type:
return
return None # the groups_file_type is not of the type FILE_TYPE_GROUP_ADMINISTRATION
else:
# remove whitespaces (leading and trailing), convert to lower case and put in a list
groups = groups.split(',')
@ -512,7 +529,7 @@ def generate_group_heat_map(groups, overlay, overlay_type, stage, platform, soft
if overlay_type in [OVERLAY_TYPE_VISIBILITY, OVERLAY_TYPE_DETECTION] else None
overlay_file_type = check_file(overlay, expected_file_type, health_is_called=health_is_called)
if not overlay_file_type:
return
return None # the overlay_file_type is not of the expected type
else:
overlay = overlay.split(',')
overlay = list(map(lambda x: x.strip().lower(), overlay))
@ -533,18 +550,19 @@ def generate_group_heat_map(groups, overlay, overlay_type, stage, platform, soft
overlay_dict, all_techniques = _get_visibility_techniques(overlay)
elif overlay_type == OVERLAY_TYPE_DETECTION:
overlay_dict, all_techniques = _get_detection_techniques(overlay)
# we are not overlaying visibility or detection, overlay group will therefore contain information on another group
elif len(overlay) > 0:
overlay_dict = _get_group_techniques(overlay, stage, platform, overlay_file_type)
if overlay_dict == -1:
return
return None # returns None when the provided Group(s) to be overlaid, contains Groups not part of ATT&CK
groups_dict = _get_group_techniques(groups, stage, platform, groups_file_type)
if groups_dict == -1:
return
return None # returns None when the provided Group contains Groups not part of ATT&CK
if len(groups_dict) == 0:
print('[!] Empty layer.') # the provided groups dit not result in any techniques
return
return None
# check if we are doing a software group overlay
if software_groups and overlay:
@ -566,19 +584,25 @@ def generate_group_heat_map(groups, overlay, overlay_type, stage, platform, soft
overlay_list = _get_group_list(overlay_dict, overlay_file_type)
desc = 'stage: ' + stage + ' | platform(s): ' + platform_to_name(platform, separator=', ') + ' | group(s): ' \
+ ', '.join(groups_list) + ' | overlay group(s): ' + ', '.join(overlay_list)
+ ', '.join(groups_list) + ' | overlay group(s): ' + ', '.join(overlay_list)
layer = get_layer_template_groups(stage[0].upper() + stage[1:] + ' - ' + platform_to_name(platform, separator=', '),
max_count, desc, stage, platform, overlay_type)
if not layer_name:
layer_name = stage[0].upper() + stage[1:] + ' - ' + platform_to_name(platform, separator=', ')
layer = get_layer_template_groups(layer_name, max_count, desc, stage, platform, overlay_type)
layer['techniques'] = technique_layer
json_string = simplejson.dumps(layer).replace('}, ', '},\n')
if stage == 'pre-attack':
filename = '_'.join(groups_list)
elif overlay:
filename = platform_to_name(platform) + '_' + '_'.join(groups_list) + '-overlay_' + '_'.join(overlay_list)
else:
filename = platform_to_name(platform) + '_' + '_'.join(groups_list)
if not output_filename:
if stage == 'pre-attack':
filename = '_'.join(groups_list)
elif overlay:
filename = platform_to_name(platform) + '_' + '_'.join(groups_list) + '-overlay_' + '_'.join(overlay_list)
else:
filename = platform_to_name(platform) + '_' + '_'.join(groups_list)
write_file(stage, filename[:255], json_string)
filename = create_output_filename(stage, filename)
write_file(filename, json_string)
else:
write_file(output_filename, json_string)

View File

@ -322,7 +322,7 @@ def _check_health_techniques(filename, technique_content, health_is_called):
'\' in \'' + obj_type + '\' is NOT a list', health_is_called)
for okey in obj_keys_not_none:
if okey in obj:
if okey in obj and isinstance(obj[okey], list):
none_count = 0
for item in obj[okey]:
if item is None:

View File

@ -31,9 +31,9 @@ def _clear():
name = '-= %s =-' % APP_NAME
desc = '-- %s --' % APP_DESC
version = 'version %s' % VERSION
print(' ' * int((len(desc)-len(name))/2) + name)
print(' ' * int((len(desc) - len(name)) / 2) + name)
print(desc)
print(' ' * int((len(desc)-len(version))/2) + version)
print(' ' * int((len(desc) - len(version)) / 2) + version)
print('')
@ -273,19 +273,19 @@ def _menu_data_source(filename_ds):
_menu_data_source(filename_ds)
if choice == '3':
print('Writing data sources layer...')
generate_data_sources_layer(file_ds)
generate_data_sources_layer(file_ds, None, None)
_wait()
elif choice == '4':
print('Drawing the graph...')
plot_data_sources_graph(file_ds)
plot_data_sources_graph(file_ds, None)
_wait()
elif choice == '5':
print('Generating Excel file...')
export_data_source_list_to_excel(file_ds, eql_search=eql_query_data_sources)
export_data_source_list_to_excel(file_ds, None, eql_search=eql_query_data_sources)
_wait()
elif choice == '6':
print('Generating YAML file...')
generate_technique_administration_file(file_ds, all_techniques=yaml_all_techniques)
generate_technique_administration_file(file_ds, None, all_techniques=yaml_all_techniques)
_wait()
elif choice == '7':
filename_t = _select_file(MENU_NAME_DETECTION_COVERAGE_MAPPING, 'techniques (used to score the level of visibility)',
@ -352,22 +352,22 @@ def _menu_detection(filename_t):
_menu_detection(filename_t)
if choice == '4':
print('Writing detection coverage layer...')
generate_detection_layer(file_tech, None, False)
generate_detection_layer(file_tech, None, False, None, None)
_wait()
elif choice == '5':
filename_ds = _select_file(MENU_NAME_DETECTION_COVERAGE_MAPPING, 'data sources (used to add metadata on the '
'involved data sources to the heat map)',
FILE_TYPE_DATA_SOURCE_ADMINISTRATION, False)
print('Writing detection coverage layer with visibility as overlay...')
generate_detection_layer(file_tech, filename_ds, True)
generate_detection_layer(file_tech, filename_ds, True, None, None)
_wait()
elif choice == '6':
print('Drawing the graph...')
plot_graph(file_tech, 'detection')
plot_graph(file_tech, 'detection', None)
_wait()
elif choice == '7':
print('Generating Excel file...')
export_techniques_list_to_excel(file_tech)
export_techniques_list_to_excel(file_tech, None)
_wait()
elif choice == '8':
print('Checking the technique YAML file for errors...')
@ -430,19 +430,19 @@ def _menu_visibility(filename_t, filename_ds):
_menu_visibility(filename_t, filename_ds)
if choice == '4':
print('Writing visibility coverage layer...')
generate_visibility_layer(file_tech, filename_ds, False)
generate_visibility_layer(file_tech, filename_ds, False, None, None)
_wait()
elif choice == '5':
print('Writing visibility coverage layer overlaid with detections...')
generate_visibility_layer(file_tech, filename_ds, True)
generate_visibility_layer(file_tech, filename_ds, True, None, None)
_wait()
elif choice == '6':
print('Drawing the graph...')
plot_graph(file_tech, 'visibility')
plot_graph(file_tech, 'visibility', None)
_wait()
elif choice == '7':
print('Generating Excel file...')
export_techniques_list_to_excel(file_tech)
export_techniques_list_to_excel(file_tech, None)
_wait()
elif choice == '8':
print('Checking the technique YAML file for errors...')
@ -538,11 +538,9 @@ def _menu_groups():
eql_all_scores = not eql_all_scores
elif choice == '7':
if not generate_group_heat_map(groups, groups_overlay, overlay_type, default_stage, default_platform,
software_group, eql_query_visibility, eql_query_detection, False,
include_all_score_objs=eql_all_scores):
_wait()
_menu_groups()
generate_group_heat_map(groups, groups_overlay, overlay_type, default_stage, default_platform,
software_group, eql_query_visibility, eql_query_detection, False,
None, None, include_all_score_objs=eql_all_scores)
_wait()
elif choice == '9':
interactive_menu()

View File

@ -1,8 +1,8 @@
attackcti==0.3.2
simplejson==3.17.0
plotly==4.5.0
pandas==1.0.1
xlsxwriter==1.2.7
ruamel.yaml==0.16.7
eql==0.8.2
plotly==4.8.1
pandas==1.0.3
xlsxwriter==1.2.8
ruamel.yaml==0.16.10
eql==0.9.1
taxii2-client==2.0.0

Binary file not shown.

View File

@ -5,33 +5,41 @@ from datetime import datetime
# Imports for pandas and plotly are because of performance reasons in the function that uses these libraries.
def generate_detection_layer(filename_techniques, filename_data_sources, overlay):
def generate_detection_layer(filename_techniques, filename_data_sources, overlay, output_filename, layer_name):
"""
Generates layer for detection coverage and optionally an overlaid version with visibility coverage.
:param filename_techniques: the filename of the YAML file containing the techniques administration
:param filename_data_sources: the filename of the YAML file containing the data sources administration
:param overlay: boolean value to specify if an overlay between detection and visibility should be generated
:param layer_name: the name of the Navigator layer
:param output_filename: the output filename defined by the user
:return:
"""
if not overlay:
my_techniques, name, platform = load_techniques(filename_techniques)
mapped_techniques_detection = _map_and_colorize_techniques_for_detections(my_techniques)
layer_detection = get_layer_template_detections('Detections ' + name, 'description', 'attack', platform)
_write_layer(layer_detection, mapped_techniques_detection, 'detection', name)
if not layer_name:
layer_name = 'Detections ' + name
layer_detection = get_layer_template_detections(layer_name, 'description', 'attack', platform)
_write_layer(layer_detection, mapped_techniques_detection, 'detection', name, output_filename)
else:
my_techniques, name, platform = load_techniques(filename_techniques)
my_data_sources = _load_data_sources(filename_data_sources)
mapped_techniques_both = _map_and_colorize_techniques_for_overlaid(my_techniques, my_data_sources, platform)
layer_both = get_layer_template_layered('Visibility and Detection ' + name, 'description', 'attack', platform)
_write_layer(layer_both, mapped_techniques_both, 'visibility_and_detection', name)
if not layer_name:
layer_name = 'Visibility and Detection ' + name
layer_both = get_layer_template_layered(layer_name, 'description', 'attack', platform)
_write_layer(layer_both, mapped_techniques_both, 'visibility_and_detection', name, output_filename)
def generate_visibility_layer(filename_techniques, filename_data_sources, overlay):
def generate_visibility_layer(filename_techniques, filename_data_sources, overlay, output_filename, layer_name):
"""
Generates layer for visibility coverage and optionally an overlaid version with detection coverage.
:param filename_techniques: the filename of the YAML file containing the techniques administration
:param filename_data_sources: the filename of the YAML file containing the data sources administration
:param overlay: boolean value to specify if an overlay between detection and visibility should be generated
:param output_filename: the output filename defined by the user
:param layer_name: the name of the Navigator layer
:return:
"""
my_data_sources = _load_data_sources(filename_data_sources)
@ -39,20 +47,25 @@ def generate_visibility_layer(filename_techniques, filename_data_sources, overla
if not overlay:
my_techniques, name, platform = load_techniques(filename_techniques)
mapped_techniques_visibility = _map_and_colorize_techniques_for_visibility(my_techniques, my_data_sources, platform)
layer_visibility = get_layer_template_visibility('Visibility ' + name, 'description', 'attack', platform)
_write_layer(layer_visibility, mapped_techniques_visibility, 'visibility', name)
if not layer_name:
layer_name = 'Visibility ' + name
layer_visibility = get_layer_template_visibility(layer_name, 'description', 'attack', platform)
_write_layer(layer_visibility, mapped_techniques_visibility, 'visibility', name, output_filename)
else:
my_techniques, name, platform = load_techniques(filename_techniques)
mapped_techniques_both = _map_and_colorize_techniques_for_overlaid(my_techniques, my_data_sources, platform)
layer_both = get_layer_template_layered('Visibility and Detection ' + name, 'description', 'attack', platform)
_write_layer(layer_both, mapped_techniques_both, 'visibility_and_detection', name)
if not layer_name:
layer_name = 'Visibility and Detection ' + name
layer_both = get_layer_template_layered(layer_name, 'description', 'attack', platform)
_write_layer(layer_both, mapped_techniques_both, 'visibility_and_detection', name, output_filename)
def plot_graph(filename, type_graph):
def plot_graph(filename, type_graph, output_filename):
"""
Generates a line graph which shows the improvements on detections through the time.
:param filename: the filename of the YAML file containing the techniques administration
:param type_graph: indicates the type of the graph: detection or visibility
:param output_filename: the output filename defined by the user
:return:
"""
# pylint: disable=unused-variable
@ -70,7 +83,11 @@ def plot_graph(filename, type_graph):
df = pd.DataFrame(graph_values).groupby('date', as_index=False)[['count']].sum()
df['cumcount'] = df['count'].cumsum()
output_filename = get_non_existing_filename('output/graph_%s' % type_graph, 'html')
if not output_filename:
output_filename = 'graph_' + type_graph
elif output_filename.endswith('.html'):
output_filename = output_filename.replace('.html', '')
output_filename = get_non_existing_filename('output/' + output_filename, 'html')
import plotly
import plotly.graph_objs as go
@ -108,19 +125,26 @@ def _load_data_sources(file):
return my_data_sources
def _write_layer(layer, mapped_techniques, filename_prefix, name):
def _write_layer(layer, mapped_techniques, filename_prefix, name, output_filename):
"""
Writes the json layer file to disk.
:param layer: the prepped layer dictionary
:param mapped_techniques: the techniques section that will be included in the layer
:param filename_prefix: the prefix for the output filename
:param name: the name that will be used in the filename together with the prefix
:param output_filename: the output filename defined by the user
:return:
"""
layer['techniques'] = mapped_techniques
json_string = simplejson.dumps(layer).replace('}, ', '},\n')
write_file(filename_prefix, name, json_string)
if not output_filename:
output_filename = create_output_filename(filename_prefix, name)
else:
if output_filename.endswith('.json'):
output_filename = output_filename.replace('.json', '')
if filename_prefix == 'visibility_and_detection':
output_filename += '_overlay'
write_file(output_filename, json_string)
def _map_and_colorize_techniques_for_detections(my_techniques):
@ -276,9 +300,11 @@ def _map_and_colorize_techniques_for_overlaid(my_techniques, my_data_sources, pl
if detection and visibility:
color = COLOR_OVERLAY_BOTH
elif detection and not visibility:
color = COLOR_OVERLAY_DETECTION
s = detection_score
color = COLOR_D_0 if s == 0 else COLOR_D_1 if s == 1 else COLOR_D_2 if s == 2 else COLOR_D_3 if s == 3 else COLOR_D_4 if s == 4 else COLOR_D_5 if s == 5 else ''
elif not detection and visibility:
color = COLOR_OVERLAY_VISIBILITY
s = visibility_score
color = COLOR_V_1 if s == 1 else COLOR_V_2 if s == 2 else COLOR_V_3 if s == 3 else COLOR_V_4 if s == 4 else ''
else:
color = COLOR_WHITE
@ -296,39 +322,11 @@ def _map_and_colorize_techniques_for_overlaid(my_techniques, my_data_sources, pl
x['metadata'].append({'name': '-Available data sources', 'value': my_ds})
x['metadata'].append({'name': '-ATT&CK data sources', 'value': ', '.join(get_applicable_data_sources_technique(technique['x_mitre_data_sources'],
applicable_data_sources))})
x['metadata'].append({'name': '---', 'value': '---'})
# Metadata for detection:
cnt = 1
tcnt = len([d for d in technique_data['detection'] if get_latest_score(d) >= 0])
for detection in technique_data['detection']:
d_score = get_latest_score(detection)
if d_score >= 0:
location = ', '.join(detection['location'])
applicable_to = ', '.join(detection['applicable_to'])
x['metadata'].append({'name': '-Applicable to', 'value': applicable_to})
x['metadata'].append({'name': '-Detection score', 'value': str(d_score)})
x['metadata'].append({'name': '-Detection location', 'value': location})
x['metadata'].append({'name': '-Technique comment', 'value': detection['comment']})
x['metadata'].append({'name': '-Detection comment', 'value': get_latest_comment(detection)})
if cnt != tcnt:
x['metadata'].append({'name': '---', 'value': '---'})
cnt += 1
# Metadata for visibility:
if tcnt > 0:
x['metadata'].append({'name': '---', 'value': '---'})
cnt = 1
tcnt = len([v for v in technique_data['visibility']])
for visibility in technique_data['visibility']:
applicable_to = ', '.join(visibility['applicable_to'])
x['metadata'].append({'name': '-Applicable to', 'value': applicable_to})
x['metadata'].append({'name': '-Visibility score', 'value': str(get_latest_score(visibility))})
x['metadata'].append({'name': '-Technique comment', 'value': visibility['comment']})
x['metadata'].append({'name': '-Visibility comment', 'value': get_latest_comment(visibility)})
if cnt != tcnt:
x['metadata'].append({'name': '---', 'value': '---'})
cnt += 1
# Metadata for detection and visibility:
for obj_type in ['detection', 'visibility']:
tcnt = len([obj for obj in technique_data[obj_type] if get_latest_score(obj) >= 0])
if tcnt > 0:
x['metadata'] = add_metadata_technique_object(technique_data, obj_type, x['metadata'])
x['metadata'] = make_layer_metadata_compliant(x['metadata'])
mapped_techniques.append(x)
@ -336,10 +334,11 @@ def _map_and_colorize_techniques_for_overlaid(my_techniques, my_data_sources, pl
return mapped_techniques
def export_techniques_list_to_excel(filename):
def export_techniques_list_to_excel(filename, output_filename):
"""
Makes an overview of the MITRE ATT&CK techniques from the YAML administration file.
:param filename: the filename of the YAML file containing the techniques administration
:param output_filename: the output filename defined by the user
:return:
"""
# pylint: disable=unused-variable
@ -347,7 +346,11 @@ def export_techniques_list_to_excel(filename):
my_techniques = dict(sorted(my_techniques.items(), key=lambda kv: kv[0], reverse=False))
mitre_techniques = load_attack_data(DATA_TYPE_STIX_ALL_TECH)
excel_filename = get_non_existing_filename('output/techniques', 'xlsx')
if not output_filename:
output_filename = 'techniques'
elif output_filename.endswith('.xlsx'):
output_filename = output_filename.replace('.xlsx', '')
excel_filename = get_non_existing_filename('output/' + output_filename, 'xlsx')
workbook = xlsxwriter.Workbook(excel_filename)
worksheet_detections = workbook.add_worksheet('Detections')
worksheet_visibility = workbook.add_worksheet('Visibility')