always close cloudstorage sessions

1. propagate proper pdf filesize handling to GDrive and MSDrive classes
2. use the session as a context handler to assure it closes even if an exception is unhandled.
fix_pdf
eric 2019-06-19 15:03:16 -04:00
parent ee826a5148
commit f197d5227f
3 changed files with 39 additions and 37 deletions

View File

@ -57,9 +57,11 @@ class Dropbox (CloudStorage.CloudStorage):
'Content-Type' : 'application/octet-stream',
'Dropbox-API-Arg' : json.dumps (parameters)
}
with closing (session.post (self.upload_endpoint,
data = response.content,
headers = headers)) as r:
if 'error_summary' in r.text:
CloudStorage.error_log (r.text)
r.raise_for_status ()
data = response.content
with session as s:
with closing (s.post (self.upload_endpoint,
data = data,
headers = headers)) as r:
if 'error_summary' in r.text:
CloudStorage.error_log (r.text)
r.raise_for_status ()

View File

@ -54,19 +54,20 @@ class GDrive (CloudStorage.CloudStorage):
}
headers = {
'X-Upload-Content-Type': request.headers['Content-Type'],
'X-Upload-Content-Length': request.headers['Content-Length'],
'X-Upload-Content-Length': str(len(request.content)),
'Content-Type': 'application/json; charset=UTF-8',
}
with closing (session.post (self.upload_endpoint,
data = json.dumps (file_metadata),
headers = headers)) as r2:
r2.raise_for_status ()
session_uri = r2.headers['Location']
with session as s:
with closing (s.post (self.upload_endpoint,
data = json.dumps (file_metadata),
headers = headers)) as r2:
r2.raise_for_status ()
session_uri = r2.headers['Location']
headers = {
'Content-Type': request.headers['Content-Type'],
}
with closing (session.put (session_uri,
data = request.iter_content (1024 * 1024),
headers = headers)) as r3:
r3.raise_for_status ()
headers = {
'Content-Type': request.headers['Content-Type'],
}
with closing (s.put (session_uri,
data = request.iter_content (1024 * 1024),
headers = headers)) as r3:
r3.raise_for_status ()

View File

@ -50,7 +50,7 @@ class MSDrive(CloudStorage.CloudStorage):
'description': 'A Project Gutenberg Ebook',
"@microsoft.graph.conflictBehavior": "rename",
}
filesize = int(response.headers['Content-Length'])
filesize = len(response.content)
url = self.upload_endpoint.format(filename=filename)
chunk_size = 327680 # weird onedrive thing related to FAT tables
upload_data = session.post(url, json={'item': item_data}).json()
@ -60,21 +60,20 @@ class MSDrive(CloudStorage.CloudStorage):
'Content-Length': str(end - start + 1),
'Content-Range': 'bytes {}-{}/{}'.format(start, end, filesize)
}
with session as s:
if 'uploadUrl' in upload_data:
session_uri = upload_data['uploadUrl']
start = 0
end = min(chunk_size - 1, filesize - 1)
if 'uploadUrl' in upload_data:
session_uri = upload_data['uploadUrl']
start = 0
end = min(chunk_size - 1, filesize - 1)
for chunk in response.iter_content(chunk_size):
r = session.put(
session_uri,
data=chunk,
headers=headers(start, end, filesize),
)
start = start + chunk_size
end = min(end + chunk_size, filesize - 1)
r.raise_for_status()
else:
CloudStorage.log('no uploadUrl in %s' % upload_data)
session.close()
for chunk in response.iter_content(chunk_size):
r = s.put(
session_uri,
data=chunk,
headers=headers(start, end, filesize),
)
start = start + chunk_size
end = min(end + chunk_size, filesize - 1)
r.raise_for_status()
else:
CloudStorage.log('no uploadUrl in %s' % upload_data)