always close cloudstorage sessions

1. propagate proper pdf filesize handling to GDrive and MSDrive classes
2. use the session as a context handler to assure it closes even if an exception is unhandled.
fix_pdf
eric 2019-06-19 15:03:16 -04:00
parent ee826a5148
commit f197d5227f
3 changed files with 39 additions and 37 deletions

View File

@ -57,8 +57,10 @@ class Dropbox (CloudStorage.CloudStorage):
'Content-Type' : 'application/octet-stream', 'Content-Type' : 'application/octet-stream',
'Dropbox-API-Arg' : json.dumps (parameters) 'Dropbox-API-Arg' : json.dumps (parameters)
} }
with closing (session.post (self.upload_endpoint, data = response.content
data = response.content, with session as s:
with closing (s.post (self.upload_endpoint,
data = data,
headers = headers)) as r: headers = headers)) as r:
if 'error_summary' in r.text: if 'error_summary' in r.text:
CloudStorage.error_log (r.text) CloudStorage.error_log (r.text)

View File

@ -54,10 +54,11 @@ class GDrive (CloudStorage.CloudStorage):
} }
headers = { headers = {
'X-Upload-Content-Type': request.headers['Content-Type'], 'X-Upload-Content-Type': request.headers['Content-Type'],
'X-Upload-Content-Length': request.headers['Content-Length'], 'X-Upload-Content-Length': str(len(request.content)),
'Content-Type': 'application/json; charset=UTF-8', 'Content-Type': 'application/json; charset=UTF-8',
} }
with closing (session.post (self.upload_endpoint, with session as s:
with closing (s.post (self.upload_endpoint,
data = json.dumps (file_metadata), data = json.dumps (file_metadata),
headers = headers)) as r2: headers = headers)) as r2:
r2.raise_for_status () r2.raise_for_status ()
@ -66,7 +67,7 @@ class GDrive (CloudStorage.CloudStorage):
headers = { headers = {
'Content-Type': request.headers['Content-Type'], 'Content-Type': request.headers['Content-Type'],
} }
with closing (session.put (session_uri, with closing (s.put (session_uri,
data = request.iter_content (1024 * 1024), data = request.iter_content (1024 * 1024),
headers = headers)) as r3: headers = headers)) as r3:
r3.raise_for_status () r3.raise_for_status ()

View File

@ -50,7 +50,7 @@ class MSDrive(CloudStorage.CloudStorage):
'description': 'A Project Gutenberg Ebook', 'description': 'A Project Gutenberg Ebook',
"@microsoft.graph.conflictBehavior": "rename", "@microsoft.graph.conflictBehavior": "rename",
} }
filesize = int(response.headers['Content-Length']) filesize = len(response.content)
url = self.upload_endpoint.format(filename=filename) url = self.upload_endpoint.format(filename=filename)
chunk_size = 327680 # weird onedrive thing related to FAT tables chunk_size = 327680 # weird onedrive thing related to FAT tables
upload_data = session.post(url, json={'item': item_data}).json() upload_data = session.post(url, json={'item': item_data}).json()
@ -60,14 +60,14 @@ class MSDrive(CloudStorage.CloudStorage):
'Content-Length': str(end - start + 1), 'Content-Length': str(end - start + 1),
'Content-Range': 'bytes {}-{}/{}'.format(start, end, filesize) 'Content-Range': 'bytes {}-{}/{}'.format(start, end, filesize)
} }
with session as s:
if 'uploadUrl' in upload_data: if 'uploadUrl' in upload_data:
session_uri = upload_data['uploadUrl'] session_uri = upload_data['uploadUrl']
start = 0 start = 0
end = min(chunk_size - 1, filesize - 1) end = min(chunk_size - 1, filesize - 1)
for chunk in response.iter_content(chunk_size): for chunk in response.iter_content(chunk_size):
r = session.put( r = s.put(
session_uri, session_uri,
data=chunk, data=chunk,
headers=headers(start, end, filesize), headers=headers(start, end, filesize),
@ -77,4 +77,3 @@ class MSDrive(CloudStorage.CloudStorage):
r.raise_for_status() r.raise_for_status()
else: else:
CloudStorage.log('no uploadUrl in %s' % upload_data) CloudStorage.log('no uploadUrl in %s' % upload_data)
session.close()